diff options
Diffstat (limited to 'drivers/net')
338 files changed, 277953 insertions, 0 deletions
diff --git a/drivers/net/Makefile b/drivers/net/Makefile new file mode 100644 index 00000000..3386a673 --- /dev/null +++ b/drivers/net/Makefile @@ -0,0 +1,60 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2015 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet +DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x +DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding +DIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe +DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000 +DIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena +DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic +DIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k +DIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e +DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe +DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4 +DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5 +DIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += mpipe +DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp +DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += null +DIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += pcap +DIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += ring +DIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2 +DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio +DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3 +DIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += xenvirt + +ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y) +DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost +endif # $(CONFIG_RTE_LIBRTE_VHOST) + +include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/net/af_packet/Makefile b/drivers/net/af_packet/Makefile new file mode 100644 index 00000000..cb1a7aea --- /dev/null +++ b/drivers/net/af_packet/Makefile @@ -0,0 +1,58 @@ +# BSD LICENSE +# +# Copyright(c) 2014 John W. Linville <linville@redhat.com> +# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. +# Copyright(c) 2014 6WIND S.A. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_af_packet.a + +EXPORT_MAP := rte_pmd_af_packet_version.map + +LIBABIVER := 1 + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += rte_eth_af_packet.c + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_kvargs + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c new file mode 100644 index 00000000..f17bd7e7 --- /dev/null +++ b/drivers/net/af_packet/rte_eth_af_packet.c @@ -0,0 +1,881 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014 John W. Linville <linville@tuxdriver.com> + * + * Originally based upon librte_pmd_pcap code: + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2014 6WIND S.A. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rte_mbuf.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> +#include <rte_kvargs.h> +#include <rte_dev.h> + +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <arpa/inet.h> +#include <net/if.h> +#include <sys/types.h> +#include <sys/socket.h> +#include <sys/ioctl.h> +#include <sys/mman.h> +#include <unistd.h> +#include <poll.h> + +#define ETH_AF_PACKET_IFACE_ARG "iface" +#define ETH_AF_PACKET_NUM_Q_ARG "qpairs" +#define ETH_AF_PACKET_BLOCKSIZE_ARG "blocksz" +#define ETH_AF_PACKET_FRAMESIZE_ARG "framesz" +#define ETH_AF_PACKET_FRAMECOUNT_ARG "framecnt" + +#define DFLT_BLOCK_SIZE (1 << 12) +#define DFLT_FRAME_SIZE (1 << 11) +#define DFLT_FRAME_COUNT (1 << 9) + +#define RTE_PMD_AF_PACKET_MAX_RINGS 16 + +struct pkt_rx_queue { + int sockfd; + + struct iovec *rd; + uint8_t *map; + unsigned int framecount; + unsigned int framenum; + + struct rte_mempool *mb_pool; + uint8_t in_port; + + volatile unsigned long rx_pkts; + volatile unsigned long err_pkts; +}; + +struct pkt_tx_queue { + int sockfd; + + struct iovec *rd; + uint8_t *map; + unsigned int framecount; + unsigned int framenum; + + volatile unsigned long tx_pkts; + volatile unsigned long err_pkts; +}; + +struct pmd_internals { + unsigned nb_queues; + + int if_index; + struct ether_addr eth_addr; + + struct tpacket_req req; + + struct pkt_rx_queue rx_queue[RTE_PMD_AF_PACKET_MAX_RINGS]; + struct pkt_tx_queue tx_queue[RTE_PMD_AF_PACKET_MAX_RINGS]; +}; + +static const char *valid_arguments[] = { + ETH_AF_PACKET_IFACE_ARG, + ETH_AF_PACKET_NUM_Q_ARG, + ETH_AF_PACKET_BLOCKSIZE_ARG, + ETH_AF_PACKET_FRAMESIZE_ARG, + ETH_AF_PACKET_FRAMECOUNT_ARG, + NULL +}; + +static const char *drivername = "AF_PACKET PMD"; + +static struct rte_eth_link pmd_link = { + .link_speed = ETH_SPEED_NUM_10G, + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_status = ETH_LINK_DOWN, + .link_autoneg = ETH_LINK_SPEED_AUTONEG +}; + +static uint16_t +eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + unsigned i; + struct tpacket2_hdr *ppd; + struct rte_mbuf *mbuf; + uint8_t *pbuf; + struct pkt_rx_queue *pkt_q = queue; + uint16_t num_rx = 0; + unsigned int framecount, framenum; + + if (unlikely(nb_pkts == 0)) + return 0; + + /* + * Reads the given number of packets from the AF_PACKET socket one by + * one and copies the packet data into a newly allocated mbuf. + */ + framecount = pkt_q->framecount; + framenum = pkt_q->framenum; + for (i = 0; i < nb_pkts; i++) { + /* point at the next incoming frame */ + ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base; + if ((ppd->tp_status & TP_STATUS_USER) == 0) + break; + + /* allocate the next mbuf */ + mbuf = rte_pktmbuf_alloc(pkt_q->mb_pool); + if (unlikely(mbuf == NULL)) + break; + + /* packet will fit in the mbuf, go ahead and receive it */ + rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf) = ppd->tp_snaplen; + pbuf = (uint8_t *) ppd + ppd->tp_mac; + memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf, rte_pktmbuf_data_len(mbuf)); + + /* release incoming frame and advance ring buffer */ + ppd->tp_status = TP_STATUS_KERNEL; + if (++framenum >= framecount) + framenum = 0; + mbuf->port = pkt_q->in_port; + + /* account for the receive frame */ + bufs[i] = mbuf; + num_rx++; + } + pkt_q->framenum = framenum; + pkt_q->rx_pkts += num_rx; + return num_rx; +} + +/* + * Callback to handle sending packets through a real NIC. + */ +static uint16_t +eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct tpacket2_hdr *ppd; + struct rte_mbuf *mbuf; + uint8_t *pbuf; + unsigned int framecount, framenum; + struct pollfd pfd; + struct pkt_tx_queue *pkt_q = queue; + uint16_t num_tx = 0; + int i; + + if (unlikely(nb_pkts == 0)) + return 0; + + memset(&pfd, 0, sizeof(pfd)); + pfd.fd = pkt_q->sockfd; + pfd.events = POLLOUT; + pfd.revents = 0; + + framecount = pkt_q->framecount; + framenum = pkt_q->framenum; + ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base; + for (i = 0; i < nb_pkts; i++) { + /* point at the next incoming frame */ + if ((ppd->tp_status != TP_STATUS_AVAILABLE) && + (poll(&pfd, 1, -1) < 0)) + continue; + + /* copy the tx frame data */ + mbuf = bufs[num_tx]; + pbuf = (uint8_t *) ppd + TPACKET2_HDRLEN - + sizeof(struct sockaddr_ll); + memcpy(pbuf, rte_pktmbuf_mtod(mbuf, void*), rte_pktmbuf_data_len(mbuf)); + ppd->tp_len = ppd->tp_snaplen = rte_pktmbuf_data_len(mbuf); + + /* release incoming frame and advance ring buffer */ + ppd->tp_status = TP_STATUS_SEND_REQUEST; + if (++framenum >= framecount) + framenum = 0; + ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base; + + num_tx++; + rte_pktmbuf_free(mbuf); + } + + /* kick-off transmits */ + if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1) + return 0; /* error sending -- no packets transmitted */ + + pkt_q->framenum = framenum; + pkt_q->tx_pkts += num_tx; + pkt_q->err_pkts += nb_pkts - num_tx; + return num_tx; +} + +static int +eth_dev_start(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = ETH_LINK_UP; + return 0; +} + +/* + * This function gets called when the current port gets stopped. + */ +static void +eth_dev_stop(struct rte_eth_dev *dev) +{ + unsigned i; + int sockfd; + struct pmd_internals *internals = dev->data->dev_private; + + for (i = 0; i < internals->nb_queues; i++) { + sockfd = internals->rx_queue[i].sockfd; + if (sockfd != -1) + close(sockfd); + sockfd = internals->tx_queue[i].sockfd; + if (sockfd != -1) + close(sockfd); + } + + dev->data->dev_link.link_status = ETH_LINK_DOWN; +} + +static int +eth_dev_configure(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static void +eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct pmd_internals *internals = dev->data->dev_private; + + dev_info->driver_name = drivername; + dev_info->if_index = internals->if_index; + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN; + dev_info->max_rx_queues = (uint16_t)internals->nb_queues; + dev_info->max_tx_queues = (uint16_t)internals->nb_queues; + dev_info->min_rx_bufsize = 0; + dev_info->pci_dev = NULL; +} + +static void +eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats) +{ + unsigned i, imax; + unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0; + const struct pmd_internals *internal = dev->data->dev_private; + + imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ? + internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (i = 0; i < imax; i++) { + igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts; + rx_total += igb_stats->q_ipackets[i]; + } + + imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ? + internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (i = 0; i < imax; i++) { + igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts; + igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts; + tx_total += igb_stats->q_opackets[i]; + tx_err_total += igb_stats->q_errors[i]; + } + + igb_stats->ipackets = rx_total; + igb_stats->opackets = tx_total; + igb_stats->oerrors = tx_err_total; +} + +static void +eth_stats_reset(struct rte_eth_dev *dev) +{ + unsigned i; + struct pmd_internals *internal = dev->data->dev_private; + + for (i = 0; i < internal->nb_queues; i++) + internal->rx_queue[i].rx_pkts = 0; + + for (i = 0; i < internal->nb_queues; i++) { + internal->tx_queue[i].tx_pkts = 0; + internal->tx_queue[i].err_pkts = 0; + } +} + +static void +eth_dev_close(struct rte_eth_dev *dev __rte_unused) +{ +} + +static void +eth_queue_release(void *q __rte_unused) +{ +} + +static int +eth_link_update(struct rte_eth_dev *dev __rte_unused, + int wait_to_complete __rte_unused) +{ + return 0; +} + +static int +eth_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct pkt_rx_queue *pkt_q = &internals->rx_queue[rx_queue_id]; + uint16_t buf_size; + + pkt_q->mb_pool = mb_pool; + + /* Now get the space available for data in the mbuf */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(pkt_q->mb_pool) - + RTE_PKTMBUF_HEADROOM); + + if (ETH_FRAME_LEN > buf_size) { + RTE_LOG(ERR, PMD, + "%s: %d bytes will not fit in mbuf (%d bytes)\n", + dev->data->name, ETH_FRAME_LEN, buf_size); + return -ENOMEM; + } + + dev->data->rx_queues[rx_queue_id] = pkt_q; + pkt_q->in_port = dev->data->port_id; + + return 0; +} + +static int +eth_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + + struct pmd_internals *internals = dev->data->dev_private; + + dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id]; + return 0; +} + +static const struct eth_dev_ops ops = { + .dev_start = eth_dev_start, + .dev_stop = eth_dev_stop, + .dev_close = eth_dev_close, + .dev_configure = eth_dev_configure, + .dev_infos_get = eth_dev_info, + .rx_queue_setup = eth_rx_queue_setup, + .tx_queue_setup = eth_tx_queue_setup, + .rx_queue_release = eth_queue_release, + .tx_queue_release = eth_queue_release, + .link_update = eth_link_update, + .stats_get = eth_stats_get, + .stats_reset = eth_stats_reset, +}; + +/* + * Opens an AF_PACKET socket + */ +static int +open_packet_iface(const char *key __rte_unused, + const char *value __rte_unused, + void *extra_args) +{ + int *sockfd = extra_args; + + /* Open an AF_PACKET socket... */ + *sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); + if (*sockfd == -1) { + RTE_LOG(ERR, PMD, "Could not open AF_PACKET socket\n"); + return -1; + } + + return 0; +} + +static int +rte_pmd_init_internals(const char *name, + const int sockfd, + const unsigned nb_queues, + unsigned int blocksize, + unsigned int blockcnt, + unsigned int framesize, + unsigned int framecnt, + const unsigned numa_node, + struct pmd_internals **internals, + struct rte_eth_dev **eth_dev, + struct rte_kvargs *kvlist) +{ + struct rte_eth_dev_data *data = NULL; + struct rte_kvargs_pair *pair = NULL; + struct ifreq ifr; + size_t ifnamelen; + unsigned k_idx; + struct sockaddr_ll sockaddr; + struct tpacket_req *req; + struct pkt_rx_queue *rx_queue; + struct pkt_tx_queue *tx_queue; + int rc, tpver, discard; + int qsockfd = -1; + unsigned int i, q, rdsize; + int fanout_arg __rte_unused, bypass __rte_unused; + + for (k_idx = 0; k_idx < kvlist->count; k_idx++) { + pair = &kvlist->pairs[k_idx]; + if (strstr(pair->key, ETH_AF_PACKET_IFACE_ARG) != NULL) + break; + } + if (pair == NULL) { + RTE_LOG(ERR, PMD, + "%s: no interface specified for AF_PACKET ethdev\n", + name); + goto error_early; + } + + RTE_LOG(INFO, PMD, + "%s: creating AF_PACKET-backed ethdev on numa socket %u\n", + name, numa_node); + + /* + * now do all data allocation - for eth_dev structure, dummy pci driver + * and internal (private) data + */ + data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); + if (data == NULL) + goto error_early; + + *internals = rte_zmalloc_socket(name, sizeof(**internals), + 0, numa_node); + if (*internals == NULL) + goto error_early; + + for (q = 0; q < nb_queues; q++) { + (*internals)->rx_queue[q].map = MAP_FAILED; + (*internals)->tx_queue[q].map = MAP_FAILED; + } + + req = &((*internals)->req); + + req->tp_block_size = blocksize; + req->tp_block_nr = blockcnt; + req->tp_frame_size = framesize; + req->tp_frame_nr = framecnt; + + ifnamelen = strlen(pair->value); + if (ifnamelen < sizeof(ifr.ifr_name)) { + memcpy(ifr.ifr_name, pair->value, ifnamelen); + ifr.ifr_name[ifnamelen] = '\0'; + } else { + RTE_LOG(ERR, PMD, + "%s: I/F name too long (%s)\n", + name, pair->value); + goto error_early; + } + if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) { + RTE_LOG(ERR, PMD, + "%s: ioctl failed (SIOCGIFINDEX)\n", + name); + goto error_early; + } + (*internals)->if_index = ifr.ifr_ifindex; + + if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) { + RTE_LOG(ERR, PMD, + "%s: ioctl failed (SIOCGIFHWADDR)\n", + name); + goto error_early; + } + memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN); + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sll_family = AF_PACKET; + sockaddr.sll_protocol = htons(ETH_P_ALL); + sockaddr.sll_ifindex = (*internals)->if_index; + +#if defined(PACKET_FANOUT) + fanout_arg = (getpid() ^ (*internals)->if_index) & 0xffff; + fanout_arg |= (PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_DEFRAG) << 16; +#if defined(PACKET_FANOUT_FLAG_ROLLOVER) + fanout_arg |= PACKET_FANOUT_FLAG_ROLLOVER << 16; +#endif +#endif + + for (q = 0; q < nb_queues; q++) { + /* Open an AF_PACKET socket for this queue... */ + qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); + if (qsockfd == -1) { + RTE_LOG(ERR, PMD, + "%s: could not open AF_PACKET socket\n", + name); + return -1; + } + + tpver = TPACKET_V2; + rc = setsockopt(qsockfd, SOL_PACKET, PACKET_VERSION, + &tpver, sizeof(tpver)); + if (rc == -1) { + RTE_LOG(ERR, PMD, + "%s: could not set PACKET_VERSION on AF_PACKET " + "socket for %s\n", name, pair->value); + goto error; + } + + discard = 1; + rc = setsockopt(qsockfd, SOL_PACKET, PACKET_LOSS, + &discard, sizeof(discard)); + if (rc == -1) { + RTE_LOG(ERR, PMD, + "%s: could not set PACKET_LOSS on " + "AF_PACKET socket for %s\n", name, pair->value); + goto error; + } + +#if defined(PACKET_QDISC_BYPASS) + bypass = 1; + rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS, + &bypass, sizeof(bypass)); + if (rc == -1) { + RTE_LOG(ERR, PMD, + "%s: could not set PACKET_QDISC_BYPASS " + "on AF_PACKET socket for %s\n", name, + pair->value); + goto error; + } +#endif + + rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req)); + if (rc == -1) { + RTE_LOG(ERR, PMD, + "%s: could not set PACKET_RX_RING on AF_PACKET " + "socket for %s\n", name, pair->value); + goto error; + } + + rc = setsockopt(qsockfd, SOL_PACKET, PACKET_TX_RING, req, sizeof(*req)); + if (rc == -1) { + RTE_LOG(ERR, PMD, + "%s: could not set PACKET_TX_RING on AF_PACKET " + "socket for %s\n", name, pair->value); + goto error; + } + + rx_queue = &((*internals)->rx_queue[q]); + rx_queue->framecount = req->tp_frame_nr; + + rx_queue->map = mmap(NULL, 2 * req->tp_block_size * req->tp_block_nr, + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, + qsockfd, 0); + if (rx_queue->map == MAP_FAILED) { + RTE_LOG(ERR, PMD, + "%s: call to mmap failed on AF_PACKET socket for %s\n", + name, pair->value); + goto error; + } + + /* rdsize is same for both Tx and Rx */ + rdsize = req->tp_frame_nr * sizeof(*(rx_queue->rd)); + + rx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node); + if (rx_queue->rd == NULL) + goto error; + for (i = 0; i < req->tp_frame_nr; ++i) { + rx_queue->rd[i].iov_base = rx_queue->map + (i * framesize); + rx_queue->rd[i].iov_len = req->tp_frame_size; + } + rx_queue->sockfd = qsockfd; + + tx_queue = &((*internals)->tx_queue[q]); + tx_queue->framecount = req->tp_frame_nr; + + tx_queue->map = rx_queue->map + req->tp_block_size * req->tp_block_nr; + + tx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node); + if (tx_queue->rd == NULL) + goto error; + for (i = 0; i < req->tp_frame_nr; ++i) { + tx_queue->rd[i].iov_base = tx_queue->map + (i * framesize); + tx_queue->rd[i].iov_len = req->tp_frame_size; + } + tx_queue->sockfd = qsockfd; + + rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr)); + if (rc == -1) { + RTE_LOG(ERR, PMD, + "%s: could not bind AF_PACKET socket to %s\n", + name, pair->value); + goto error; + } + +#if defined(PACKET_FANOUT) + rc = setsockopt(qsockfd, SOL_PACKET, PACKET_FANOUT, + &fanout_arg, sizeof(fanout_arg)); + if (rc == -1) { + RTE_LOG(ERR, PMD, + "%s: could not set PACKET_FANOUT on AF_PACKET socket " + "for %s\n", name, pair->value); + goto error; + } +#endif + } + + /* reserve an ethdev entry */ + *eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL); + if (*eth_dev == NULL) + goto error; + + /* + * now put it all together + * - store queue data in internals, + * - store numa_node in eth_dev + * - point eth_dev_data to internals + * - and point eth_dev structure to new eth_dev_data structure + */ + + (*internals)->nb_queues = nb_queues; + + data->dev_private = *internals; + data->port_id = (*eth_dev)->data->port_id; + data->nb_rx_queues = (uint16_t)nb_queues; + data->nb_tx_queues = (uint16_t)nb_queues; + data->dev_link = pmd_link; + data->mac_addrs = &(*internals)->eth_addr; + strncpy(data->name, + (*eth_dev)->data->name, strlen((*eth_dev)->data->name)); + + (*eth_dev)->data = data; + (*eth_dev)->dev_ops = &ops; + (*eth_dev)->driver = NULL; + (*eth_dev)->data->dev_flags = RTE_ETH_DEV_DETACHABLE; + (*eth_dev)->data->drv_name = drivername; + (*eth_dev)->data->kdrv = RTE_KDRV_NONE; + (*eth_dev)->data->numa_node = numa_node; + + return 0; + +error: + if (qsockfd != -1) + close(qsockfd); + for (q = 0; q < nb_queues; q++) { + munmap((*internals)->rx_queue[q].map, + 2 * req->tp_block_size * req->tp_block_nr); + + rte_free((*internals)->rx_queue[q].rd); + rte_free((*internals)->tx_queue[q].rd); + if (((*internals)->rx_queue[q].sockfd != 0) && + ((*internals)->rx_queue[q].sockfd != qsockfd)) + close((*internals)->rx_queue[q].sockfd); + } + rte_free(*internals); +error_early: + rte_free(data); + return -1; +} + +static int +rte_eth_from_packet(const char *name, + int const *sockfd, + const unsigned numa_node, + struct rte_kvargs *kvlist) +{ + struct pmd_internals *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + struct rte_kvargs_pair *pair = NULL; + unsigned k_idx; + unsigned int blockcount; + unsigned int blocksize = DFLT_BLOCK_SIZE; + unsigned int framesize = DFLT_FRAME_SIZE; + unsigned int framecount = DFLT_FRAME_COUNT; + unsigned int qpairs = 1; + + /* do some parameter checking */ + if (*sockfd < 0) + return -1; + + /* + * Walk arguments for configurable settings + */ + for (k_idx = 0; k_idx < kvlist->count; k_idx++) { + pair = &kvlist->pairs[k_idx]; + if (strstr(pair->key, ETH_AF_PACKET_NUM_Q_ARG) != NULL) { + qpairs = atoi(pair->value); + if (qpairs < 1 || + qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) { + RTE_LOG(ERR, PMD, + "%s: invalid qpairs value\n", + name); + return -1; + } + continue; + } + if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) { + blocksize = atoi(pair->value); + if (!blocksize) { + RTE_LOG(ERR, PMD, + "%s: invalid blocksize value\n", + name); + return -1; + } + continue; + } + if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) { + framesize = atoi(pair->value); + if (!framesize) { + RTE_LOG(ERR, PMD, + "%s: invalid framesize value\n", + name); + return -1; + } + continue; + } + if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) { + framecount = atoi(pair->value); + if (!framecount) { + RTE_LOG(ERR, PMD, + "%s: invalid framecount value\n", + name); + return -1; + } + continue; + } + } + + if (framesize > blocksize) { + RTE_LOG(ERR, PMD, + "%s: AF_PACKET MMAP frame size exceeds block size!\n", + name); + return -1; + } + + blockcount = framecount / (blocksize / framesize); + if (!blockcount) { + RTE_LOG(ERR, PMD, + "%s: invalid AF_PACKET MMAP parameters\n", name); + return -1; + } + + RTE_LOG(INFO, PMD, "%s: AF_PACKET MMAP parameters:\n", name); + RTE_LOG(INFO, PMD, "%s:\tblock size %d\n", name, blocksize); + RTE_LOG(INFO, PMD, "%s:\tblock count %d\n", name, blockcount); + RTE_LOG(INFO, PMD, "%s:\tframe size %d\n", name, framesize); + RTE_LOG(INFO, PMD, "%s:\tframe count %d\n", name, framecount); + + if (rte_pmd_init_internals(name, *sockfd, qpairs, + blocksize, blockcount, + framesize, framecount, + numa_node, &internals, ð_dev, + kvlist) < 0) + return -1; + + eth_dev->rx_pkt_burst = eth_af_packet_rx; + eth_dev->tx_pkt_burst = eth_af_packet_tx; + + return 0; +} + +static int +rte_pmd_af_packet_devinit(const char *name, const char *params) +{ + unsigned numa_node; + int ret = 0; + struct rte_kvargs *kvlist; + int sockfd = -1; + + RTE_LOG(INFO, PMD, "Initializing pmd_af_packet for %s\n", name); + + numa_node = rte_socket_id(); + + kvlist = rte_kvargs_parse(params, valid_arguments); + if (kvlist == NULL) { + ret = -1; + goto exit; + } + + /* + * If iface argument is passed we open the NICs and use them for + * reading / writing + */ + if (rte_kvargs_count(kvlist, ETH_AF_PACKET_IFACE_ARG) == 1) { + + ret = rte_kvargs_process(kvlist, ETH_AF_PACKET_IFACE_ARG, + &open_packet_iface, &sockfd); + if (ret < 0) + goto exit; + } + + ret = rte_eth_from_packet(name, &sockfd, numa_node, kvlist); + close(sockfd); /* no longer needed */ + +exit: + rte_kvargs_free(kvlist); + return ret; +} + +static int +rte_pmd_af_packet_devuninit(const char *name) +{ + struct rte_eth_dev *eth_dev = NULL; + struct pmd_internals *internals; + unsigned q; + + RTE_LOG(INFO, PMD, "Closing AF_PACKET ethdev on numa socket %u\n", + rte_socket_id()); + + if (name == NULL) + return -1; + + /* find the ethdev entry */ + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev == NULL) + return -1; + + internals = eth_dev->data->dev_private; + for (q = 0; q < internals->nb_queues; q++) { + rte_free(internals->rx_queue[q].rd); + rte_free(internals->tx_queue[q].rd); + } + + rte_free(eth_dev->data->dev_private); + rte_free(eth_dev->data); + + rte_eth_dev_release_port(eth_dev); + + return 0; +} + +static struct rte_driver pmd_af_packet_drv = { + .name = "eth_af_packet", + .type = PMD_VDEV, + .init = rte_pmd_af_packet_devinit, + .uninit = rte_pmd_af_packet_devuninit, +}; + +PMD_REGISTER_DRIVER(pmd_af_packet_drv); diff --git a/drivers/net/af_packet/rte_pmd_af_packet_version.map b/drivers/net/af_packet/rte_pmd_af_packet_version.map new file mode 100644 index 00000000..ef353984 --- /dev/null +++ b/drivers/net/af_packet/rte_pmd_af_packet_version.map @@ -0,0 +1,4 @@ +DPDK_2.0 { + + local: *; +}; diff --git a/drivers/net/bnx2x/LICENSE.bnx2x_pmd b/drivers/net/bnx2x/LICENSE.bnx2x_pmd new file mode 100644 index 00000000..96c7c1e1 --- /dev/null +++ b/drivers/net/bnx2x/LICENSE.bnx2x_pmd @@ -0,0 +1,28 @@ +/* + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Broadcom Corporation nor the name of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written consent. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile new file mode 100644 index 00000000..6f1f385d --- /dev/null +++ b/drivers/net/bnx2x/Makefile @@ -0,0 +1,33 @@ +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_bnx2x.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -DZLIB_CONST +LDLIBS += -lz + +EXPORT_MAP := rte_pmd_bnx2x_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x.c +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_stats.c +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += ecore_sp.c +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += elink.c +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_vfpf.c +SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_DEBUG_PERIODIC) += debug.c + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += lib/librte_eal lib/librte_ether lib/librte_hash +DEPDIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += lib/librte_mempool lib/librte_mbuf + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c new file mode 100644 index 00000000..6edb2f95 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x.c @@ -0,0 +1,11851 @@ +/*- + * Copyright (c) 2007-2013 Broadcom Corporation. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#define BNX2X_DRIVER_VERSION "1.78.18" + +#include "bnx2x.h" +#include "bnx2x_vfpf.h" +#include "ecore_sp.h" +#include "ecore_init.h" +#include "ecore_init_ops.h" + +#include "rte_version.h" +#include "rte_pci_dev_ids.h" + +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <zlib.h> + +#define BNX2X_PMD_VER_PREFIX "BNX2X PMD" +#define BNX2X_PMD_VERSION_MAJOR 1 +#define BNX2X_PMD_VERSION_MINOR 0 +#define BNX2X_PMD_VERSION_PATCH 0 + +static inline const char * +bnx2x_pmd_version(void) +{ + static char version[32]; + + snprintf(version, sizeof(version), "%s %s_%d.%d.%d", + BNX2X_PMD_VER_PREFIX, + BNX2X_DRIVER_VERSION, + BNX2X_PMD_VERSION_MAJOR, + BNX2X_PMD_VERSION_MINOR, + BNX2X_PMD_VERSION_PATCH); + + return version; +} + +static z_stream zlib_stream; + +#define EVL_VLID_MASK 0x0FFF + +#define BNX2X_DEF_SB_ATT_IDX 0x0001 +#define BNX2X_DEF_SB_IDX 0x0002 + +/* + * FLR Support - bnx2x_pf_flr_clnup() is called during nic_load in the per + * function HW initialization. + */ +#define FLR_WAIT_USEC 10000 /* 10 msecs */ +#define FLR_WAIT_INTERVAL 50 /* usecs */ +#define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ + +struct pbf_pN_buf_regs { + int pN; + uint32_t init_crd; + uint32_t crd; + uint32_t crd_freed; +}; + +struct pbf_pN_cmd_regs { + int pN; + uint32_t lines_occup; + uint32_t lines_freed; +}; + +/* resources needed for unloading a previously loaded device */ + +#define BNX2X_PREV_WAIT_NEEDED 1 +rte_spinlock_t bnx2x_prev_mtx; +struct bnx2x_prev_list_node { + LIST_ENTRY(bnx2x_prev_list_node) node; + uint8_t bus; + uint8_t slot; + uint8_t path; + uint8_t aer; + uint8_t undi; +}; + +static LIST_HEAD(, bnx2x_prev_list_node) bnx2x_prev_list + = LIST_HEAD_INITIALIZER(bnx2x_prev_list); + +static int load_count[2][3] = { { 0 } }; + /* per-path: 0-common, 1-port0, 2-port1 */ + +static void bnx2x_cmng_fns_init(struct bnx2x_softc *sc, uint8_t read_cfg, + uint8_t cmng_type); +static int bnx2x_get_cmng_fns_mode(struct bnx2x_softc *sc); +static void storm_memset_cmng(struct bnx2x_softc *sc, struct cmng_init *cmng, + uint8_t port); +static void bnx2x_set_reset_global(struct bnx2x_softc *sc); +static void bnx2x_set_reset_in_progress(struct bnx2x_softc *sc); +static uint8_t bnx2x_reset_is_done(struct bnx2x_softc *sc, int engine); +static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc); +static uint8_t bnx2x_chk_parity_attn(struct bnx2x_softc *sc, uint8_t * global, + uint8_t print); +static void bnx2x_int_disable(struct bnx2x_softc *sc); +static int bnx2x_release_leader_lock(struct bnx2x_softc *sc); +static void bnx2x_pf_disable(struct bnx2x_softc *sc); +static void bnx2x_update_rx_prod(struct bnx2x_softc *sc, + struct bnx2x_fastpath *fp, + uint16_t rx_bd_prod, uint16_t rx_cq_prod); +static void bnx2x_link_report(struct bnx2x_softc *sc); +void bnx2x_link_status_update(struct bnx2x_softc *sc); +static int bnx2x_alloc_mem(struct bnx2x_softc *sc); +static void bnx2x_free_mem(struct bnx2x_softc *sc); +static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc); +static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc); +static __attribute__ ((noinline)) +int bnx2x_nic_load(struct bnx2x_softc *sc); + +static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc); +static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp); +static void bnx2x_periodic_stop(struct bnx2x_softc *sc); +static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, + uint8_t storm, uint16_t index, uint8_t op, + uint8_t update); + +int bnx2x_test_bit(int nr, volatile unsigned long *addr) +{ + int res; + + mb(); + res = ((*addr) & (1UL << nr)) != 0; + mb(); + return res; +} + +void bnx2x_set_bit(unsigned int nr, volatile unsigned long *addr) +{ + __sync_fetch_and_or(addr, (1UL << nr)); +} + +void bnx2x_clear_bit(int nr, volatile unsigned long *addr) +{ + __sync_fetch_and_and(addr, ~(1UL << nr)); +} + +int bnx2x_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = (1UL << nr); + return __sync_fetch_and_and(addr, ~mask) & mask; +} + +int bnx2x_cmpxchg(volatile int *addr, int old, int new) +{ + return __sync_val_compare_and_swap(addr, old, new); +} + +int +bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma, + const char *msg, uint32_t align) +{ + char mz_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *z; + + dma->sc = sc; + if (IS_PF(sc)) + sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg, + rte_get_timer_cycles()); + else + sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg, + rte_get_timer_cycles()); + + /* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */ + z = rte_memzone_reserve_aligned(mz_name, (uint64_t) (size), + rte_lcore_to_socket_id(rte_lcore_id()), + 0, align); + if (z == NULL) { + PMD_DRV_LOG(ERR, "DMA alloc failed for %s", msg); + return -ENOMEM; + } + dma->paddr = (uint64_t) z->phys_addr; + dma->vaddr = z->addr; + + PMD_DRV_LOG(DEBUG, "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr); + + return 0; +} + +static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource) +{ + uint32_t lock_status; + uint32_t resource_bit = (1 << resource); + int func = SC_FUNC(sc); + uint32_t hw_lock_control_reg; + int cnt; + + PMD_INIT_FUNC_TRACE(); + + /* validate the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + PMD_DRV_LOG(NOTICE, + "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE", + resource); + return -1; + } + + if (func <= 5) { + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); + } else { + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); + } + + /* validate the resource is not already taken */ + lock_status = REG_RD(sc, hw_lock_control_reg); + if (lock_status & resource_bit) { + PMD_DRV_LOG(NOTICE, + "resource in use (status 0x%x bit 0x%x)", + lock_status, resource_bit); + return -1; + } + + /* try every 5ms for 5 seconds */ + for (cnt = 0; cnt < 1000; cnt++) { + REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); + lock_status = REG_RD(sc, hw_lock_control_reg); + if (lock_status & resource_bit) { + return 0; + } + DELAY(5000); + } + + PMD_DRV_LOG(NOTICE, "Resource lock timeout!"); + return -1; +} + +static int bnx2x_release_hw_lock(struct bnx2x_softc *sc, uint32_t resource) +{ + uint32_t lock_status; + uint32_t resource_bit = (1 << resource); + int func = SC_FUNC(sc); + uint32_t hw_lock_control_reg; + + PMD_INIT_FUNC_TRACE(); + + /* validate the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + PMD_DRV_LOG(NOTICE, + "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE", + resource); + return -1; + } + + if (func <= 5) { + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); + } else { + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); + } + + /* validate the resource is currently taken */ + lock_status = REG_RD(sc, hw_lock_control_reg); + if (!(lock_status & resource_bit)) { + PMD_DRV_LOG(NOTICE, + "resource not in use (status 0x%x bit 0x%x)", + lock_status, resource_bit); + return -1; + } + + REG_WR(sc, hw_lock_control_reg, resource_bit); + return 0; +} + +/* copy command into DMAE command memory and set DMAE command Go */ +void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx) +{ + uint32_t cmd_offset; + uint32_t i; + + cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx)); + for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) { + REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *) dmae) + i)); + } + + REG_WR(sc, dmae_reg_go_c[idx], 1); +} + +uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type) +{ + return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | + DMAE_COMMAND_C_TYPE_ENABLE); +} + +uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode) +{ + return opcode & ~DMAE_COMMAND_SRC_RESET; +} + +uint32_t +bnx2x_dmae_opcode(struct bnx2x_softc * sc, uint8_t src_type, uint8_t dst_type, + uint8_t with_comp, uint8_t comp_type) +{ + uint32_t opcode = 0; + + opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | + (dst_type << DMAE_COMMAND_DST_SHIFT)); + + opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET); + + opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); + + opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) | + (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT)); + + opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); + +#ifdef __BIG_ENDIAN + opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; +#else + opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; +#endif + + if (with_comp) { + opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); + } + + return opcode; +} + +static void +bnx2x_prep_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae, + uint8_t src_type, uint8_t dst_type) +{ + memset(dmae, 0, sizeof(struct dmae_command)); + + /* set the opcode */ + dmae->opcode = bnx2x_dmae_opcode(sc, src_type, dst_type, + TRUE, DMAE_COMP_PCI); + + /* fill in the completion parameters */ + dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, wb_comp)); + dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, wb_comp)); + dmae->comp_val = DMAE_COMP_VAL; +} + +/* issue a DMAE command over the init channel and wait for completion */ +static int +bnx2x_issue_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae) +{ + uint32_t *wb_comp = BNX2X_SP(sc, wb_comp); + int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; + + /* reset completion */ + *wb_comp = 0; + + /* post the command on the channel used for initializations */ + bnx2x_post_dmae(sc, dmae, INIT_DMAE_C(sc)); + + /* wait for completion */ + DELAY(500); + + while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { + if (!timeout || + (sc->recovery_state != BNX2X_RECOVERY_DONE && + sc->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { + PMD_DRV_LOG(INFO, "DMAE timeout!"); + return DMAE_TIMEOUT; + } + + timeout--; + DELAY(50); + } + + if (*wb_comp & DMAE_PCI_ERR_FLAG) { + PMD_DRV_LOG(INFO, "DMAE PCI error!"); + return DMAE_PCI_ERROR; + } + + return 0; +} + +void bnx2x_read_dmae(struct bnx2x_softc *sc, uint32_t src_addr, uint32_t len32) +{ + struct dmae_command dmae; + uint32_t *data; + uint32_t i; + int rc; + + if (!sc->dmae_ready) { + data = BNX2X_SP(sc, wb_data[0]); + + for (i = 0; i < len32; i++) { + data[i] = REG_RD(sc, (src_addr + (i * 4))); + } + + return; + } + + /* set opcode and fixed command fields */ + bnx2x_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); + + /* fill in addresses and len */ + dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ + dmae.src_addr_hi = 0; + dmae.dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, wb_data)); + dmae.dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, wb_data)); + dmae.len = len32; + + /* issue the command and wait for completion */ + if ((rc = bnx2x_issue_dmae_with_comp(sc, &dmae)) != 0) { + rte_panic("DMAE failed (%d)", rc); + }; +} + +void +bnx2x_write_dmae(struct bnx2x_softc *sc, phys_addr_t dma_addr, uint32_t dst_addr, + uint32_t len32) +{ + struct dmae_command dmae; + int rc; + + if (!sc->dmae_ready) { + ecore_init_str_wr(sc, dst_addr, BNX2X_SP(sc, wb_data[0]), len32); + return; + } + + /* set opcode and fixed command fields */ + bnx2x_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); + + /* fill in addresses and len */ + dmae.src_addr_lo = U64_LO(dma_addr); + dmae.src_addr_hi = U64_HI(dma_addr); + dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ + dmae.dst_addr_hi = 0; + dmae.len = len32; + + /* issue the command and wait for completion */ + if ((rc = bnx2x_issue_dmae_with_comp(sc, &dmae)) != 0) { + rte_panic("DMAE failed (%d)", rc); + } +} + +static void +bnx2x_write_dmae_phys_len(struct bnx2x_softc *sc, phys_addr_t phys_addr, + uint32_t addr, uint32_t len) +{ + uint32_t dmae_wr_max = DMAE_LEN32_WR_MAX(sc); + uint32_t offset = 0; + + while (len > dmae_wr_max) { + bnx2x_write_dmae(sc, (phys_addr + offset), /* src DMA address */ + (addr + offset), /* dst GRC address */ + dmae_wr_max); + offset += (dmae_wr_max * 4); + len -= dmae_wr_max; + } + + bnx2x_write_dmae(sc, (phys_addr + offset), /* src DMA address */ + (addr + offset), /* dst GRC address */ + len); +} + +void +bnx2x_set_ctx_validation(struct bnx2x_softc *sc, struct eth_context *cxt, + uint32_t cid) +{ + /* ustorm cxt validation */ + cxt->ustorm_ag_context.cdu_usage = + CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), + CDU_REGION_NUMBER_UCM_AG, + ETH_CONNECTION_TYPE); + /* xcontext validation */ + cxt->xstorm_ag_context.cdu_reserved = + CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), + CDU_REGION_NUMBER_XCM_AG, + ETH_CONNECTION_TYPE); +} + +static void +bnx2x_storm_memset_hc_timeout(struct bnx2x_softc *sc, uint8_t fw_sb_id, + uint8_t sb_index, uint8_t ticks) +{ + uint32_t addr = + (BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); + + REG_WR8(sc, addr, ticks); +} + +static void +bnx2x_storm_memset_hc_disable(struct bnx2x_softc *sc, uint16_t fw_sb_id, + uint8_t sb_index, uint8_t disable) +{ + uint32_t enable_flag = + (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); + uint32_t addr = + (BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); + uint8_t flags; + + /* clear and set */ + flags = REG_RD8(sc, addr); + flags &= ~HC_INDEX_DATA_HC_ENABLED; + flags |= enable_flag; + REG_WR8(sc, addr, flags); +} + +void +bnx2x_update_coalesce_sb_index(struct bnx2x_softc *sc, uint8_t fw_sb_id, + uint8_t sb_index, uint8_t disable, uint16_t usec) +{ + uint8_t ticks = (usec / 4); + + bnx2x_storm_memset_hc_timeout(sc, fw_sb_id, sb_index, ticks); + + disable = (disable) ? 1 : ((usec) ? 0 : 1); + bnx2x_storm_memset_hc_disable(sc, fw_sb_id, sb_index, disable); +} + +uint32_t elink_cb_reg_read(struct bnx2x_softc *sc, uint32_t reg_addr) +{ + return REG_RD(sc, reg_addr); +} + +void elink_cb_reg_write(struct bnx2x_softc *sc, uint32_t reg_addr, uint32_t val) +{ + REG_WR(sc, reg_addr, val); +} + +void +elink_cb_event_log(__rte_unused struct bnx2x_softc *sc, + __rte_unused const elink_log_id_t elink_log_id, ...) +{ + PMD_DRV_LOG(DEBUG, "ELINK EVENT LOG (%d)", elink_log_id); +} + +static int bnx2x_set_spio(struct bnx2x_softc *sc, int spio, uint32_t mode) +{ + uint32_t spio_reg; + + /* Only 2 SPIOs are configurable */ + if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { + PMD_DRV_LOG(NOTICE, "Invalid SPIO 0x%x", spio); + return -1; + } + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); + + /* read SPIO and mask except the float bits */ + spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); + + switch (mode) { + case MISC_SPIO_OUTPUT_LOW: + /* clear FLOAT and set CLR */ + spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); + spio_reg |= (spio << MISC_SPIO_CLR_POS); + break; + + case MISC_SPIO_OUTPUT_HIGH: + /* clear FLOAT and set SET */ + spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); + spio_reg |= (spio << MISC_SPIO_SET_POS); + break; + + case MISC_SPIO_INPUT_HI_Z: + /* set FLOAT */ + spio_reg |= (spio << MISC_SPIO_FLOAT_POS); + break; + + default: + break; + } + + REG_WR(sc, MISC_REG_SPIO, spio_reg); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); + + return 0; +} + +static int bnx2x_gpio_read(struct bnx2x_softc *sc, int gpio_num, uint8_t port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && + REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); + int gpio_shift = gpio_num; + if (gpio_port) + gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; + + uint32_t gpio_mask = (1 << gpio_shift); + uint32_t gpio_reg; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + PMD_DRV_LOG(NOTICE, "Invalid GPIO %d", gpio_num); + return -1; + } + + /* read GPIO value */ + gpio_reg = REG_RD(sc, MISC_REG_GPIO); + + /* get the requested pin value */ + return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; +} + +static int +bnx2x_gpio_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode, uint8_t port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && + REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); + int gpio_shift = gpio_num; + if (gpio_port) + gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; + + uint32_t gpio_mask = (1 << gpio_shift); + uint32_t gpio_reg; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + PMD_DRV_LOG(NOTICE, "Invalid GPIO %d", gpio_num); + return -1; + } + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); + + /* read GPIO and mask except the float bits */ + gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); + + switch (mode) { + case MISC_REGISTERS_GPIO_OUTPUT_LOW: + /* clear FLOAT and set CLR */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_OUTPUT_HIGH: + /* clear FLOAT and set SET */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); + break; + + case MISC_REGISTERS_GPIO_INPUT_HI_Z: + /* set FLOAT */ + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + break; + + default: + break; + } + + REG_WR(sc, MISC_REG_GPIO, gpio_reg); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); + + return 0; +} + +static int +bnx2x_gpio_mult_write(struct bnx2x_softc *sc, uint8_t pins, uint32_t mode) +{ + uint32_t gpio_reg; + + /* any port swapping should be handled by caller */ + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); + + /* read GPIO and mask except the float bits */ + gpio_reg = REG_RD(sc, MISC_REG_GPIO); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); + + switch (mode) { + case MISC_REGISTERS_GPIO_OUTPUT_LOW: + /* set CLR */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_OUTPUT_HIGH: + /* set SET */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); + break; + + case MISC_REGISTERS_GPIO_INPUT_HI_Z: + /* set FLOAT */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); + break; + + default: + PMD_DRV_LOG(NOTICE, "Invalid GPIO mode assignment %d", mode); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); + return -1; + } + + REG_WR(sc, MISC_REG_GPIO, gpio_reg); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); + + return 0; +} + +static int +bnx2x_gpio_int_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode, + uint8_t port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && + REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); + int gpio_shift = gpio_num; + if (gpio_port) + gpio_shift += MISC_REGISTERS_GPIO_PORT_SHIFT; + + uint32_t gpio_mask = (1 << gpio_shift); + uint32_t gpio_reg; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + PMD_DRV_LOG(NOTICE, "Invalid GPIO %d", gpio_num); + return -1; + } + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); + + /* read GPIO int */ + gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); + + switch (mode) { + case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: + /* clear SET and set CLR */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: + /* clear CLR and set SET */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); + break; + + default: + break; + } + + REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); + + return 0; +} + +uint32_t +elink_cb_gpio_read(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t port) +{ + return bnx2x_gpio_read(sc, gpio_num, port); +} + +uint8_t elink_cb_gpio_write(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ + uint8_t port) +{ + return bnx2x_gpio_write(sc, gpio_num, mode, port); +} + +uint8_t +elink_cb_gpio_mult_write(struct bnx2x_softc * sc, uint8_t pins, + uint8_t mode /* 0=low 1=high */ ) +{ + return bnx2x_gpio_mult_write(sc, pins, mode); +} + +uint8_t elink_cb_gpio_int_write(struct bnx2x_softc * sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ + uint8_t port) +{ + return bnx2x_gpio_int_write(sc, gpio_num, mode, port); +} + +void elink_cb_notify_link_changed(struct bnx2x_softc *sc) +{ + REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + + (SC_FUNC(sc) * sizeof(uint32_t))), 1); +} + +/* send the MCP a request, block until there is a reply */ +uint32_t +elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param) +{ + int mb_idx = SC_FW_MB_IDX(sc); + uint32_t seq; + uint32_t rc = 0; + uint32_t cnt = 1; + uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; + + seq = ++sc->fw_seq; + SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); + SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); + + PMD_DRV_LOG(DEBUG, + "wrote command 0x%08x to FW MB param 0x%08x", + (command | seq), param); + + /* Let the FW do it's magic. GIve it up to 5 seconds... */ + do { + DELAY(delay * 1000); + rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); + } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); + + /* is this a reply to our command? */ + if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { + rc &= FW_MSG_CODE_MASK; + } else { + /* Ruh-roh! */ + PMD_DRV_LOG(NOTICE, "FW failed to respond!"); + rc = 0; + } + + return rc; +} + +static uint32_t +bnx2x_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param) +{ + return elink_cb_fw_command(sc, command, param); +} + +static void +__storm_memset_dma_mapping(struct bnx2x_softc *sc, uint32_t addr, + phys_addr_t mapping) +{ + REG_WR(sc, addr, U64_LO(mapping)); + REG_WR(sc, (addr + 4), U64_HI(mapping)); +} + +static void +storm_memset_spq_addr(struct bnx2x_softc *sc, phys_addr_t mapping, + uint16_t abs_fid) +{ + uint32_t addr = (XSEM_REG_FAST_MEMORY + + XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); + __storm_memset_dma_mapping(sc, addr, mapping); +} + +static void +storm_memset_vf_to_pf(struct bnx2x_softc *sc, uint16_t abs_fid, uint16_t pf_id) +{ + REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), + pf_id); + REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), + pf_id); + REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), + pf_id); + REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), + pf_id); +} + +static void +storm_memset_func_en(struct bnx2x_softc *sc, uint16_t abs_fid, uint8_t enable) +{ + REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), + enable); + REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), + enable); + REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), + enable); + REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), + enable); +} + +static void +storm_memset_eq_data(struct bnx2x_softc *sc, struct event_ring_data *eq_data, + uint16_t pfid) +{ + uint32_t addr; + size_t size; + + addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); + size = sizeof(struct event_ring_data); + ecore_storm_memset_struct(sc, addr, size, (uint32_t *) eq_data); +} + +static void +storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid) +{ + uint32_t addr = (BAR_CSTRORM_INTMEM + + CSTORM_EVENT_RING_PROD_OFFSET(pfid)); + REG_WR16(sc, addr, eq_prod); +} + +/* + * Post a slowpath command. + * + * A slowpath command is used to propogate a configuration change through + * the controller in a controlled manner, allowing each STORM processor and + * other H/W blocks to phase in the change. The commands sent on the + * slowpath are referred to as ramrods. Depending on the ramrod used the + * completion of the ramrod will occur in different ways. Here's a + * breakdown of ramrods and how they complete: + * + * RAMROD_CMD_ID_ETH_PORT_SETUP + * Used to setup the leading connection on a port. Completes on the + * Receive Completion Queue (RCQ) of that port (typically fp[0]). + * + * RAMROD_CMD_ID_ETH_CLIENT_SETUP + * Used to setup an additional connection on a port. Completes on the + * RCQ of the multi-queue/RSS connection being initialized. + * + * RAMROD_CMD_ID_ETH_STAT_QUERY + * Used to force the storm processors to update the statistics database + * in host memory. This ramrod is send on the leading connection CID and + * completes as an index increment of the CSTORM on the default status + * block. + * + * RAMROD_CMD_ID_ETH_UPDATE + * Used to update the state of the leading connection, usually to udpate + * the RSS indirection table. Completes on the RCQ of the leading + * connection. (Not currently used under FreeBSD until OS support becomes + * available.) + * + * RAMROD_CMD_ID_ETH_HALT + * Used when tearing down a connection prior to driver unload. Completes + * on the RCQ of the multi-queue/RSS connection being torn down. Don't + * use this on the leading connection. + * + * RAMROD_CMD_ID_ETH_SET_MAC + * Sets the Unicast/Broadcast/Multicast used by the port. Completes on + * the RCQ of the leading connection. + * + * RAMROD_CMD_ID_ETH_CFC_DEL + * Used when tearing down a conneciton prior to driver unload. Completes + * on the RCQ of the leading connection (since the current connection + * has been completely removed from controller memory). + * + * RAMROD_CMD_ID_ETH_PORT_DEL + * Used to tear down the leading connection prior to driver unload, + * typically fp[0]. Completes as an index increment of the CSTORM on the + * default status block. + * + * RAMROD_CMD_ID_ETH_FORWARD_SETUP + * Used for connection offload. Completes on the RCQ of the multi-queue + * RSS connection that is being offloaded. (Not currently used under + * FreeBSD.) + * + * There can only be one command pending per function. + * + * Returns: + * 0 = Success, !0 = Failure. + */ + +/* must be called under the spq lock */ +static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x_softc *sc) +{ + struct eth_spe *next_spe = sc->spq_prod_bd; + + if (sc->spq_prod_bd == sc->spq_last_bd) { + /* wrap back to the first eth_spq */ + sc->spq_prod_bd = sc->spq; + sc->spq_prod_idx = 0; + } else { + sc->spq_prod_bd++; + sc->spq_prod_idx++; + } + + return next_spe; +} + +/* must be called under the spq lock */ +static void bnx2x_sp_prod_update(struct bnx2x_softc *sc) +{ + int func = SC_FUNC(sc); + + /* + * Make sure that BD data is updated before writing the producer. + * BD data is written to the memory, the producer is read from the + * memory, thus we need a full memory barrier to ensure the ordering. + */ + mb(); + + REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), + sc->spq_prod_idx); + + mb(); +} + +/** + * bnx2x_is_contextless_ramrod - check if the current command ends on EQ + * + * @cmd: command to check + * @cmd_type: command type + */ +static int bnx2x_is_contextless_ramrod(int cmd, int cmd_type) +{ + if ((cmd_type == NONE_CONNECTION_TYPE) || + (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || + (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || + (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || + (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || + (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || + (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { + return TRUE; + } else { + return FALSE; + } +} + +/** + * bnx2x_sp_post - place a single command on an SP ring + * + * @sc: driver handle + * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) + * @cid: SW CID the command is related to + * @data_hi: command private data address (high 32 bits) + * @data_lo: command private data address (low 32 bits) + * @cmd_type: command type (e.g. NONE, ETH) + * + * SP data is handled as if it's always an address pair, thus data fields are + * not swapped to little endian in upper functions. Instead this function swaps + * data as if it's two uint32 fields. + */ +int +bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi, + uint32_t data_lo, int cmd_type) +{ + struct eth_spe *spe; + uint16_t type; + int common; + + common = bnx2x_is_contextless_ramrod(command, cmd_type); + + if (common) { + if (!atomic_load_acq_long(&sc->eq_spq_left)) { + PMD_DRV_LOG(INFO, "EQ ring is full!"); + return -1; + } + } else { + if (!atomic_load_acq_long(&sc->cq_spq_left)) { + PMD_DRV_LOG(INFO, "SPQ ring is full!"); + return -1; + } + } + + spe = bnx2x_sp_get_next(sc); + + /* CID needs port number to be encoded int it */ + spe->hdr.conn_and_cmd_data = + htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid)); + + type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; + + /* TBD: Check if it works for VFs */ + type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) & + SPE_HDR_FUNCTION_ID); + + spe->hdr.type = htole16(type); + + spe->data.update_data_addr.hi = htole32(data_hi); + spe->data.update_data_addr.lo = htole32(data_lo); + + /* + * It's ok if the actual decrement is issued towards the memory + * somewhere between the lock and unlock. Thus no more explict + * memory barrier is needed. + */ + if (common) { + atomic_subtract_acq_long(&sc->eq_spq_left, 1); + } else { + atomic_subtract_acq_long(&sc->cq_spq_left, 1); + } + + PMD_DRV_LOG(DEBUG, + "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x" + "data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)", + sc->spq_prod_idx, + (uint32_t) U64_HI(sc->spq_dma.paddr), + (uint32_t) (U64_LO(sc->spq_dma.paddr) + + (uint8_t *) sc->spq_prod_bd - + (uint8_t *) sc->spq), command, common, + HW_CID(sc, cid), data_hi, data_lo, type, + atomic_load_acq_long(&sc->cq_spq_left), + atomic_load_acq_long(&sc->eq_spq_left)); + + bnx2x_sp_prod_update(sc); + + return 0; +} + +static void bnx2x_drv_pulse(struct bnx2x_softc *sc) +{ + SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, + sc->fw_drv_pulse_wr_seq); +} + +static int bnx2x_tx_queue_has_work(const struct bnx2x_fastpath *fp) +{ + uint16_t hw_cons; + struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; + + if (unlikely(!txq)) { + PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); + return 0; + } + + mb(); /* status block fields can change */ + hw_cons = le16toh(*fp->tx_cons_sb); + return hw_cons != txq->tx_pkt_head; +} + +static uint8_t bnx2x_has_tx_work(struct bnx2x_fastpath *fp) +{ + /* expand this for multi-cos if ever supported */ + return bnx2x_tx_queue_has_work(fp); +} + +static int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) +{ + uint16_t rx_cq_cons_sb; + struct bnx2x_rx_queue *rxq; + rxq = fp->sc->rx_queues[fp->index]; + if (unlikely(!rxq)) { + PMD_RX_LOG(ERR, "ERROR: RX queue is NULL"); + return 0; + } + + mb(); /* status block fields can change */ + rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); + if (unlikely((rx_cq_cons_sb & MAX_RCQ_ENTRIES(rxq)) == + MAX_RCQ_ENTRIES(rxq))) + rx_cq_cons_sb++; + return rxq->rx_cq_head != rx_cq_cons_sb; +} + +static void +bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + union eth_rx_cqe *rr_cqe) +{ +#ifdef RTE_LIBRTE_BNX2X_DEBUG + int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); +#endif + int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); + enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; + struct ecore_queue_sp_obj *q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj; + + PMD_DRV_LOG(DEBUG, + "fp=%d cid=%d got ramrod #%d state is %x type is %d", + fp->index, cid, command, sc->state, + rr_cqe->ramrod_cqe.ramrod_type); + + switch (command) { + case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): + PMD_DRV_LOG(DEBUG, "got UPDATE ramrod. CID %d", cid); + drv_cmd = ECORE_Q_CMD_UPDATE; + break; + + case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): + PMD_DRV_LOG(DEBUG, "got MULTI[%d] setup ramrod", cid); + drv_cmd = ECORE_Q_CMD_SETUP; + break; + + case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): + PMD_DRV_LOG(DEBUG, "got MULTI[%d] tx-only setup ramrod", cid); + drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; + break; + + case (RAMROD_CMD_ID_ETH_HALT): + PMD_DRV_LOG(DEBUG, "got MULTI[%d] halt ramrod", cid); + drv_cmd = ECORE_Q_CMD_HALT; + break; + + case (RAMROD_CMD_ID_ETH_TERMINATE): + PMD_DRV_LOG(DEBUG, "got MULTI[%d] teminate ramrod", cid); + drv_cmd = ECORE_Q_CMD_TERMINATE; + break; + + case (RAMROD_CMD_ID_ETH_EMPTY): + PMD_DRV_LOG(DEBUG, "got MULTI[%d] empty ramrod", cid); + drv_cmd = ECORE_Q_CMD_EMPTY; + break; + + default: + PMD_DRV_LOG(DEBUG, + "ERROR: unexpected MC reply (%d)" + "on fp[%d]", command, fp->index); + return; + } + + if ((drv_cmd != ECORE_Q_CMD_MAX) && + q_obj->complete_cmd(sc, q_obj, drv_cmd)) { + /* + * q_obj->complete_cmd() failure means that this was + * an unexpected completion. + * + * In this case we don't want to increase the sc->spq_left + * because apparently we haven't sent this command the first + * place. + */ + // rte_panic("Unexpected SP completion"); + return; + } + + atomic_add_acq_long(&sc->cq_spq_left, 1); + + PMD_DRV_LOG(DEBUG, "sc->cq_spq_left 0x%lx", + atomic_load_acq_long(&sc->cq_spq_left)); +} + +static uint8_t bnx2x_rxeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp) +{ + struct bnx2x_rx_queue *rxq; + uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; + uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; + + rxq = sc->rx_queues[fp->index]; + if (!rxq) { + PMD_RX_LOG(ERR, "RX queue %d is NULL", fp->index); + return 0; + } + + /* CQ "next element" is of the size of the regular element */ + hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); + if (unlikely((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) == + USABLE_RCQ_ENTRIES_PER_PAGE)) { + hw_cq_cons++; + } + + bd_cons = rxq->rx_bd_head; + bd_prod = rxq->rx_bd_tail; + bd_prod_fw = bd_prod; + sw_cq_cons = rxq->rx_cq_head; + sw_cq_prod = rxq->rx_cq_tail; + + /* + * Memory barrier necessary as speculative reads of the rx + * buffer can be ahead of the index in the status block + */ + rmb(); + + while (sw_cq_cons != hw_cq_cons) { + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; + uint8_t cqe_fp_flags; + enum eth_rx_cqe_type cqe_fp_type; + + comp_ring_cons = RCQ_ENTRY(sw_cq_cons, rxq); + bd_prod = RX_BD(bd_prod, rxq); + bd_cons = RX_BD(bd_cons, rxq); + + cqe = &rxq->cq_ring[comp_ring_cons]; + cqe_fp = &cqe->fast_path_cqe; + cqe_fp_flags = cqe_fp->type_error_flags; + cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; + + /* is this a slowpath msg? */ + if (CQE_TYPE_SLOW(cqe_fp_type)) { + bnx2x_sp_event(sc, fp, cqe); + goto next_cqe; + } + + /* is this an error packet? */ + if (unlikely(cqe_fp_flags & + ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { + PMD_RX_LOG(DEBUG, "flags 0x%x rx packet %u", + cqe_fp_flags, sw_cq_cons); + goto next_rx; + } + + PMD_RX_LOG(DEBUG, "Dropping fastpath called from attn poller!"); + +next_rx: + bd_cons = NEXT_RX_BD(bd_cons); + bd_prod = NEXT_RX_BD(bd_prod); + bd_prod_fw = NEXT_RX_BD(bd_prod_fw); + +next_cqe: + sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod); + sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons); + + } /* while work to do */ + + rxq->rx_bd_head = bd_cons; + rxq->rx_bd_tail = bd_prod_fw; + rxq->rx_cq_head = sw_cq_cons; + rxq->rx_cq_tail = sw_cq_prod; + + /* Update producers */ + bnx2x_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod); + + return sw_cq_cons != hw_cq_cons; +} + +static uint16_t +bnx2x_free_tx_pkt(__rte_unused struct bnx2x_fastpath *fp, struct bnx2x_tx_queue *txq, + uint16_t pkt_idx, uint16_t bd_idx) +{ + struct eth_tx_start_bd *tx_start_bd = + &txq->tx_ring[TX_BD(bd_idx, txq)].start_bd; + uint16_t nbd = rte_le_to_cpu_16(tx_start_bd->nbd); + struct rte_mbuf *tx_mbuf = txq->sw_ring[TX_BD(pkt_idx, txq)]; + + if (likely(tx_mbuf != NULL)) { + rte_pktmbuf_free(tx_mbuf); + } else { + PMD_RX_LOG(ERR, "fp[%02d] lost mbuf %lu", + fp->index, (unsigned long)TX_BD(pkt_idx, txq)); + } + + txq->sw_ring[TX_BD(pkt_idx, txq)] = NULL; + txq->nb_tx_avail += nbd; + + while (nbd--) + bd_idx = NEXT_TX_BD(bd_idx); + + return bd_idx; +} + +/* processes transmit completions */ +uint8_t bnx2x_txeof(__rte_unused struct bnx2x_softc * sc, struct bnx2x_fastpath * fp) +{ + uint16_t bd_cons, hw_cons, sw_cons; + __rte_unused uint16_t tx_bd_avail; + + struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; + + if (unlikely(!txq)) { + PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); + return 0; + } + + bd_cons = txq->tx_bd_head; + hw_cons = rte_le_to_cpu_16(*fp->tx_cons_sb); + sw_cons = txq->tx_pkt_head; + + while (sw_cons != hw_cons) { + bd_cons = bnx2x_free_tx_pkt(fp, txq, sw_cons, bd_cons); + sw_cons++; + } + + txq->tx_pkt_head = sw_cons; + txq->tx_bd_head = bd_cons; + + tx_bd_avail = txq->nb_tx_avail; + + PMD_TX_LOG(DEBUG, "fp[%02d] avail=%u cons_sb=%u, " + "pkt_head=%u pkt_tail=%u bd_head=%u bd_tail=%u", + fp->index, tx_bd_avail, hw_cons, + txq->tx_pkt_head, txq->tx_pkt_tail, + txq->tx_bd_head, txq->tx_bd_tail); + return TRUE; +} + +static void bnx2x_drain_tx_queues(struct bnx2x_softc *sc) +{ + struct bnx2x_fastpath *fp; + int i, count; + + /* wait until all TX fastpath tasks have completed */ + for (i = 0; i < sc->num_queues; i++) { + fp = &sc->fp[i]; + + count = 1000; + + while (bnx2x_has_tx_work(fp)) { + bnx2x_txeof(sc, fp); + + if (count == 0) { + PMD_TX_LOG(ERR, + "Timeout waiting for fp[%d] " + "transmits to complete!", i); + rte_panic("tx drain failure"); + return; + } + + count--; + DELAY(1000); + rmb(); + } + } + + return; +} + +static int +bnx2x_del_all_macs(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *mac_obj, + int mac_type, uint8_t wait_for_comp) +{ + unsigned long ramrod_flags = 0, vlan_mac_flags = 0; + int rc; + + /* wait for completion of requested */ + if (wait_for_comp) { + bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + } + + /* Set the mac type of addresses we want to clear */ + bnx2x_set_bit(mac_type, &vlan_mac_flags); + + rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); + if (rc < 0) + PMD_DRV_LOG(ERR, "Failed to delete MACs (%d)", rc); + + return rc; +} + +int +bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode, + unsigned long *rx_accept_flags, + unsigned long *tx_accept_flags) +{ + /* Clear the flags first */ + *rx_accept_flags = 0; + *tx_accept_flags = 0; + + switch (rx_mode) { + case BNX2X_RX_MODE_NONE: + /* + * 'drop all' supersedes any accept flags that may have been + * passed to the function. + */ + break; + + case BNX2X_RX_MODE_NORMAL: + bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); + + /* internal switching mode */ + bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); + + break; + + case BNX2X_RX_MODE_ALLMULTI: + bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); + + /* internal switching mode */ + bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); + + break; + + case BNX2X_RX_MODE_PROMISC: + /* + * According to deffinition of SI mode, iface in promisc mode + * should receive matched and unmatched (in resolution of port) + * unicast packets. + */ + bnx2x_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); + + /* internal switching mode */ + bnx2x_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); + + if (IS_MF_SI(sc)) { + bnx2x_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); + } else { + bnx2x_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); + } + + break; + + default: + PMD_RX_LOG(ERR, "Unknown rx_mode (%d)", rx_mode); + return -1; + } + + /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ + if (rx_mode != BNX2X_RX_MODE_NONE) { + bnx2x_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); + bnx2x_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); + } + + return 0; +} + +static int +bnx2x_set_q_rx_mode(struct bnx2x_softc *sc, uint8_t cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, unsigned long ramrod_flags) +{ + struct ecore_rx_mode_ramrod_params ramrod_param; + int rc; + + memset(&ramrod_param, 0, sizeof(ramrod_param)); + + /* Prepare ramrod parameters */ + ramrod_param.cid = 0; + ramrod_param.cl_id = cl_id; + ramrod_param.rx_mode_obj = &sc->rx_mode_obj; + ramrod_param.func_id = SC_FUNC(sc); + + ramrod_param.pstate = &sc->sp_state; + ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; + + ramrod_param.rdata = BNX2X_SP(sc, rx_mode_rdata); + ramrod_param.rdata_mapping = + (phys_addr_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata), + bnx2x_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); + + ramrod_param.ramrod_flags = ramrod_flags; + ramrod_param.rx_mode_flags = rx_mode_flags; + + ramrod_param.rx_accept_flags = rx_accept_flags; + ramrod_param.tx_accept_flags = tx_accept_flags; + + rc = ecore_config_rx_mode(sc, &ramrod_param); + if (rc < 0) { + PMD_RX_LOG(ERR, "Set rx_mode %d failed", sc->rx_mode); + return rc; + } + + return 0; +} + +int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc) +{ + unsigned long rx_mode_flags = 0, ramrod_flags = 0; + unsigned long rx_accept_flags = 0, tx_accept_flags = 0; + int rc; + + rc = bnx2x_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, + &tx_accept_flags); + if (rc) { + return rc; + } + + bnx2x_set_bit(RAMROD_RX, &ramrod_flags); + bnx2x_set_bit(RAMROD_TX, &ramrod_flags); + bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + + return bnx2x_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, + rx_accept_flags, tx_accept_flags, + ramrod_flags); +} + +/* returns the "mcp load_code" according to global load_count array */ +static int bnx2x_nic_load_no_mcp(struct bnx2x_softc *sc) +{ + int path = SC_PATH(sc); + int port = SC_PORT(sc); + + PMD_DRV_LOG(INFO, "NO MCP - load counts[%d] %d, %d, %d", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + + load_count[path][0]++; + load_count[path][1 + port]++; + PMD_DRV_LOG(INFO, "NO MCP - new load counts[%d] %d, %d, %d", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + if (load_count[path][0] == 1) + return FW_MSG_CODE_DRV_LOAD_COMMON; + else if (load_count[path][1 + port] == 1) + return FW_MSG_CODE_DRV_LOAD_PORT; + else + return FW_MSG_CODE_DRV_LOAD_FUNCTION; +} + +/* returns the "mcp load_code" according to global load_count array */ +static int bnx2x_nic_unload_no_mcp(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + int path = SC_PATH(sc); + + PMD_DRV_LOG(INFO, "NO MCP - load counts[%d] %d, %d, %d", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + load_count[path][0]--; + load_count[path][1 + port]--; + PMD_DRV_LOG(INFO, "NO MCP - new load counts[%d] %d, %d, %d", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + if (load_count[path][0] == 0) { + return FW_MSG_CODE_DRV_UNLOAD_COMMON; + } else if (load_count[path][1 + port] == 0) { + return FW_MSG_CODE_DRV_UNLOAD_PORT; + } else { + return FW_MSG_CODE_DRV_UNLOAD_FUNCTION; + } +} + +/* request unload mode from the MCP: COMMON, PORT or FUNCTION */ +static uint32_t bnx2x_send_unload_req(struct bnx2x_softc *sc, int unload_mode) +{ + uint32_t reset_code = 0; + + /* Select the UNLOAD request mode */ + if (unload_mode == UNLOAD_NORMAL) { + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + } else { + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + } + + /* Send the request to the MCP */ + if (!BNX2X_NOMCP(sc)) { + reset_code = bnx2x_fw_command(sc, reset_code, 0); + } else { + reset_code = bnx2x_nic_unload_no_mcp(sc); + } + + return reset_code; +} + +/* send UNLOAD_DONE command to the MCP */ +static void bnx2x_send_unload_done(struct bnx2x_softc *sc, uint8_t keep_link) +{ + uint32_t reset_param = + keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; + + /* Report UNLOAD_DONE to MCP */ + if (!BNX2X_NOMCP(sc)) { + bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); + } +} + +static int bnx2x_func_wait_started(struct bnx2x_softc *sc) +{ + int tout = 50; + + if (!sc->port.pmf) { + return 0; + } + + /* + * (assumption: No Attention from MCP at this stage) + * PMF probably in the middle of TX disable/enable transaction + * 1. Sync IRS for default SB + * 2. Sync SP queue - this guarantees us that attention handling started + * 3. Wait, that TX disable/enable transaction completes + * + * 1+2 guarantee that if DCBX attention was scheduled it already changed + * pending bit of transaction from STARTED-->TX_STOPPED, if we already + * received completion for the transaction the state is TX_STOPPED. + * State will return to STARTED after completion of TX_STOPPED-->STARTED + * transaction. + */ + + while (ecore_func_get_state(sc, &sc->func_obj) != + ECORE_F_STATE_STARTED && tout--) { + DELAY(20000); + } + + if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { + /* + * Failed to complete the transaction in a "good way" + * Force both transactions with CLR bit. + */ + struct ecore_func_state_params func_params = { NULL }; + + PMD_DRV_LOG(NOTICE, "Unexpected function state! " + "Forcing STARTED-->TX_STOPPED-->STARTED"); + + func_params.f_obj = &sc->func_obj; + bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); + + /* STARTED-->TX_STOPPED */ + func_params.cmd = ECORE_F_CMD_TX_STOP; + ecore_func_state_change(sc, &func_params); + + /* TX_STOPPED-->STARTED */ + func_params.cmd = ECORE_F_CMD_TX_START; + return ecore_func_state_change(sc, &func_params); + } + + return 0; +} + +static int bnx2x_stop_queue(struct bnx2x_softc *sc, int index) +{ + struct bnx2x_fastpath *fp = &sc->fp[index]; + struct ecore_queue_state_params q_params = { NULL }; + int rc; + + PMD_DRV_LOG(DEBUG, "stopping queue %d cid %d", index, fp->index); + + q_params.q_obj = &sc->sp_objs[fp->index].q_obj; + /* We want to wait for completion in this context */ + bnx2x_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + + /* Stop the primary connection: */ + + /* ...halt the connection */ + q_params.cmd = ECORE_Q_CMD_HALT; + rc = ecore_queue_state_change(sc, &q_params); + if (rc) { + return rc; + } + + /* ...terminate the connection */ + q_params.cmd = ECORE_Q_CMD_TERMINATE; + memset(&q_params.params.terminate, 0, + sizeof(q_params.params.terminate)); + q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; + rc = ecore_queue_state_change(sc, &q_params); + if (rc) { + return rc; + } + + /* ...delete cfc entry */ + q_params.cmd = ECORE_Q_CMD_CFC_DEL; + memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); + q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; + return ecore_queue_state_change(sc, &q_params); +} + +/* wait for the outstanding SP commands */ +static uint8_t bnx2x_wait_sp_comp(struct bnx2x_softc *sc, unsigned long mask) +{ + unsigned long tmp; + int tout = 5000; /* wait for 5 secs tops */ + + while (tout--) { + mb(); + if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { + return TRUE; + } + + DELAY(1000); + } + + mb(); + + tmp = atomic_load_acq_long(&sc->sp_state); + if (tmp & mask) { + PMD_DRV_LOG(INFO, "Filtering completion timed out: " + "sp_state 0x%lx, mask 0x%lx", tmp, mask); + return FALSE; + } + + return FALSE; +} + +static int bnx2x_func_stop(struct bnx2x_softc *sc) +{ + struct ecore_func_state_params func_params = { NULL }; + int rc; + + /* prepare parameters for function state transitions */ + bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + func_params.f_obj = &sc->func_obj; + func_params.cmd = ECORE_F_CMD_STOP; + + /* + * Try to stop the function the 'good way'. If it fails (in case + * of a parity error during bnx2x_chip_cleanup()) and we are + * not in a debug mode, perform a state transaction in order to + * enable further HW_RESET transaction. + */ + rc = ecore_func_state_change(sc, &func_params); + if (rc) { + PMD_DRV_LOG(NOTICE, "FUNC_STOP ramrod failed. " + "Running a dry transaction"); + bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); + return ecore_func_state_change(sc, &func_params); + } + + return 0; +} + +static int bnx2x_reset_hw(struct bnx2x_softc *sc, uint32_t load_code) +{ + struct ecore_func_state_params func_params = { NULL }; + + /* Prepare parameters for function state transitions */ + bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &sc->func_obj; + func_params.cmd = ECORE_F_CMD_HW_RESET; + + func_params.params.hw_init.load_phase = load_code; + + return ecore_func_state_change(sc, &func_params); +} + +static void bnx2x_int_disable_sync(struct bnx2x_softc *sc, int disable_hw) +{ + if (disable_hw) { + /* prevent the HW from sending interrupts */ + bnx2x_int_disable(sc); + } +} + +static void +bnx2x_chip_cleanup(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link) +{ + int port = SC_PORT(sc); + struct ecore_mcast_ramrod_params rparam = { NULL }; + uint32_t reset_code; + int i, rc = 0; + + bnx2x_drain_tx_queues(sc); + + /* give HW time to discard old tx messages */ + DELAY(1000); + + /* Clean all ETH MACs */ + rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, + FALSE); + if (rc < 0) { + PMD_DRV_LOG(NOTICE, "Failed to delete all ETH MACs (%d)", rc); + } + + /* Clean up UC list */ + rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, + TRUE); + if (rc < 0) { + PMD_DRV_LOG(NOTICE, "Failed to delete UC MACs list (%d)", rc); + } + + /* Disable LLH */ + REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0); + + /* Set "drop all" to stop Rx */ + + /* + * We need to take the if_maddr_lock() here in order to prevent + * a race between the completion code and this code. + */ + + if (bnx2x_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { + bnx2x_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); + } else { + bnx2x_set_storm_rx_mode(sc); + } + + /* Clean up multicast configuration */ + rparam.mcast_obj = &sc->mcast_obj; + rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); + if (rc < 0) { + PMD_DRV_LOG(NOTICE, + "Failed to send DEL MCAST command (%d)", rc); + } + + /* + * Send the UNLOAD_REQUEST to the MCP. This will return if + * this function should perform FUNCTION, PORT, or COMMON HW + * reset. + */ + reset_code = bnx2x_send_unload_req(sc, unload_mode); + + /* + * (assumption: No Attention from MCP at this stage) + * PMF probably in the middle of TX disable/enable transaction + */ + rc = bnx2x_func_wait_started(sc); + if (rc) { + PMD_DRV_LOG(NOTICE, "bnx2x_func_wait_started failed"); + } + + /* + * Close multi and leading connections + * Completions for ramrods are collected in a synchronous way + */ + for (i = 0; i < sc->num_queues; i++) { + if (bnx2x_stop_queue(sc, i)) { + goto unload_error; + } + } + + /* + * If SP settings didn't get completed so far - something + * very wrong has happen. + */ + if (!bnx2x_wait_sp_comp(sc, ~0x0UL)) { + PMD_DRV_LOG(NOTICE, "Common slow path ramrods got stuck!"); + } + +unload_error: + + rc = bnx2x_func_stop(sc); + if (rc) { + PMD_DRV_LOG(NOTICE, "Function stop failed!"); + } + + /* disable HW interrupts */ + bnx2x_int_disable_sync(sc, TRUE); + + /* Reset the chip */ + rc = bnx2x_reset_hw(sc, reset_code); + if (rc) { + PMD_DRV_LOG(NOTICE, "Hardware reset failed"); + } + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(sc, keep_link); +} + +static void bnx2x_disable_close_the_gate(struct bnx2x_softc *sc) +{ + uint32_t val; + + PMD_DRV_LOG(DEBUG, "Disabling 'close the gates'"); + + val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); + val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | + MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); + REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); +} + +/* + * Cleans the object that have internal lists without sending + * ramrods. Should be run when interrutps are disabled. + */ +static void bnx2x_squeeze_objects(struct bnx2x_softc *sc) +{ + unsigned long ramrod_flags = 0, vlan_mac_flags = 0; + struct ecore_mcast_ramrod_params rparam = { NULL }; + struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; + int rc; + + /* Cleanup MACs' object first... */ + + /* Wait for completion of requested */ + bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + /* Perform a dry cleanup */ + bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); + + /* Clean ETH primary MAC */ + bnx2x_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); + rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc != 0) { + PMD_DRV_LOG(NOTICE, "Failed to clean ETH MACs (%d)", rc); + } + + /* Cleanup UC list */ + vlan_mac_flags = 0; + bnx2x_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); + rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); + if (rc != 0) { + PMD_DRV_LOG(NOTICE, "Failed to clean UC list MACs (%d)", rc); + } + + /* Now clean mcast object... */ + + rparam.mcast_obj = &sc->mcast_obj; + bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); + + /* Add a DEL command... */ + rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); + if (rc < 0) { + PMD_DRV_LOG(NOTICE, + "Failed to send DEL MCAST command (%d)", rc); + } + + /* now wait until all pending commands are cleared */ + + rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); + while (rc != 0) { + if (rc < 0) { + PMD_DRV_LOG(NOTICE, + "Failed to clean MCAST object (%d)", rc); + return; + } + + rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); + } +} + +/* stop the controller */ +__attribute__ ((noinline)) +int +bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link) +{ + uint8_t global = FALSE; + uint32_t val; + + PMD_DRV_LOG(DEBUG, "Starting NIC unload..."); + + /* stop the periodic callout */ + bnx2x_periodic_stop(sc); + + /* mark driver as unloaded in shmem2 */ + if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { + val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); + SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], + val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); + } + + if (IS_PF(sc) && sc->recovery_state != BNX2X_RECOVERY_DONE && + (sc->state == BNX2X_STATE_CLOSED || sc->state == BNX2X_STATE_ERROR)) { + /* + * We can get here if the driver has been unloaded + * during parity error recovery and is either waiting for a + * leader to complete or for other functions to unload and + * then ifconfig down has been issued. In this case we want to + * unload and let other functions to complete a recovery + * process. + */ + sc->recovery_state = BNX2X_RECOVERY_DONE; + sc->is_leader = 0; + bnx2x_release_leader_lock(sc); + mb(); + + PMD_DRV_LOG(NOTICE, "Can't unload in closed or error state"); + return -1; + } + + /* + * Nothing to do during unload if previous bnx2x_nic_load() + * did not completed succesfully - all resourses are released. + */ + if ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) { + return 0; + } + + sc->state = BNX2X_STATE_CLOSING_WAITING_HALT; + mb(); + + sc->rx_mode = BNX2X_RX_MODE_NONE; + bnx2x_set_rx_mode(sc); + mb(); + + if (IS_PF(sc)) { + /* set ALWAYS_ALIVE bit in shmem */ + sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; + + bnx2x_drv_pulse(sc); + + bnx2x_stats_handle(sc, STATS_EVENT_STOP); + bnx2x_save_statistics(sc); + } + + /* wait till consumers catch up with producers in all queues */ + bnx2x_drain_tx_queues(sc); + + /* if VF indicate to PF this function is going down (PF will delete sp + * elements and clear initializations + */ + if (IS_VF(sc)) { + bnx2x_vf_unload(sc); + } else if (unload_mode != UNLOAD_RECOVERY) { + /* if this is a normal/close unload need to clean up chip */ + bnx2x_chip_cleanup(sc, unload_mode, keep_link); + } else { + /* Send the UNLOAD_REQUEST to the MCP */ + bnx2x_send_unload_req(sc, unload_mode); + + /* + * Prevent transactions to host from the functions on the + * engine that doesn't reset global blocks in case of global + * attention once gloabl blocks are reset and gates are opened + * (the engine which leader will perform the recovery + * last). + */ + if (!CHIP_IS_E1x(sc)) { + bnx2x_pf_disable(sc); + } + + /* disable HW interrupts */ + bnx2x_int_disable_sync(sc, TRUE); + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(sc, FALSE); + } + + /* + * At this stage no more interrupts will arrive so we may safely clean + * the queue'able objects here in case they failed to get cleaned so far. + */ + if (IS_PF(sc)) { + bnx2x_squeeze_objects(sc); + } + + /* There should be no more pending SP commands at this stage */ + sc->sp_state = 0; + + sc->port.pmf = 0; + + if (IS_PF(sc)) { + bnx2x_free_mem(sc); + } + + bnx2x_free_fw_stats_mem(sc); + + sc->state = BNX2X_STATE_CLOSED; + + /* + * Check if there are pending parity attentions. If there are - set + * RECOVERY_IN_PROGRESS. + */ + if (IS_PF(sc) && bnx2x_chk_parity_attn(sc, &global, FALSE)) { + bnx2x_set_reset_in_progress(sc); + + /* Set RESET_IS_GLOBAL if needed */ + if (global) { + bnx2x_set_reset_global(sc); + } + } + + /* + * The last driver must disable a "close the gate" if there is no + * parity attention or "process kill" pending. + */ + if (IS_PF(sc) && !bnx2x_clear_pf_load(sc) && + bnx2x_reset_is_done(sc, SC_PATH(sc))) { + bnx2x_disable_close_the_gate(sc); + } + + PMD_DRV_LOG(DEBUG, "Ended NIC unload"); + + return 0; +} + +/* + * Encapsulte an mbuf cluster into the tx bd chain and makes the memory + * visible to the controller. + * + * If an mbuf is submitted to this routine and cannot be given to the + * controller (e.g. it has too many fragments) then the function may free + * the mbuf and return to the caller. + * + * Returns: + * 0 = Success, !0 = Failure + * Note the side effect that an mbuf may be freed if it causes a problem. + */ +int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head, int m_pkts) +{ + struct rte_mbuf *m0; + struct eth_tx_start_bd *tx_start_bd; + uint16_t bd_prod, pkt_prod; + int m_tx; + struct bnx2x_softc *sc; + uint32_t nbds = 0; + struct bnx2x_fastpath *fp; + + sc = txq->sc; + fp = &sc->fp[txq->queue_id]; + + bd_prod = txq->tx_bd_tail; + pkt_prod = txq->tx_pkt_tail; + + for (m_tx = 0; m_tx < m_pkts; m_tx++) { + + m0 = *m_head++; + + if (unlikely(txq->nb_tx_avail < 3)) { + PMD_TX_LOG(ERR, "no enough bds %d/%d", + bd_prod, txq->nb_tx_avail); + return -ENOMEM; + } + + txq->sw_ring[TX_BD(pkt_prod, txq)] = m0; + + tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd; + + tx_start_bd->addr = + rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0)); + tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len); + tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + tx_start_bd->general_data = + (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); + + tx_start_bd->nbd = rte_cpu_to_le_16(2); + + if (m0->ol_flags & PKT_TX_VLAN_PKT) { + tx_start_bd->vlan_or_ethertype = + rte_cpu_to_le_16(m0->vlan_tci); + tx_start_bd->bd_flags.as_bitfield |= + (X_ETH_OUTBAND_VLAN << + ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); + } else { + if (IS_PF(sc)) + tx_start_bd->vlan_or_ethertype = + rte_cpu_to_le_16(pkt_prod); + else { + struct ether_hdr *eh + = rte_pktmbuf_mtod(m0, struct ether_hdr *); + + tx_start_bd->vlan_or_ethertype + = rte_cpu_to_le_16(rte_be_to_cpu_16(eh->ether_type)); + } + } + + bd_prod = NEXT_TX_BD(bd_prod); + if (IS_VF(sc)) { + struct eth_tx_parse_bd_e2 *tx_parse_bd; + const struct ether_hdr *eh = rte_pktmbuf_mtod(m0, struct ether_hdr *); + uint8_t mac_type = UNICAST_ADDRESS; + + tx_parse_bd = + &txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2; + if (is_multicast_ether_addr(&eh->d_addr)) { + if (is_broadcast_ether_addr(&eh->d_addr)) + mac_type = BROADCAST_ADDRESS; + else + mac_type = MULTICAST_ADDRESS; + } + tx_parse_bd->parsing_data = + (mac_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); + + rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi, + &eh->d_addr.addr_bytes[0], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid, + &eh->d_addr.addr_bytes[2], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo, + &eh->d_addr.addr_bytes[4], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi, + &eh->s_addr.addr_bytes[0], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid, + &eh->s_addr.addr_bytes[2], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo, + &eh->s_addr.addr_bytes[4], 2); + + tx_parse_bd->data.mac_addr.dst_hi = + rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi); + tx_parse_bd->data.mac_addr.dst_mid = + rte_cpu_to_be_16(tx_parse_bd->data. + mac_addr.dst_mid); + tx_parse_bd->data.mac_addr.dst_lo = + rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_lo); + tx_parse_bd->data.mac_addr.src_hi = + rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_hi); + tx_parse_bd->data.mac_addr.src_mid = + rte_cpu_to_be_16(tx_parse_bd->data. + mac_addr.src_mid); + tx_parse_bd->data.mac_addr.src_lo = + rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_lo); + + PMD_TX_LOG(DEBUG, + "PBD dst %x %x %x src %x %x %x p_data %x", + tx_parse_bd->data.mac_addr.dst_hi, + tx_parse_bd->data.mac_addr.dst_mid, + tx_parse_bd->data.mac_addr.dst_lo, + tx_parse_bd->data.mac_addr.src_hi, + tx_parse_bd->data.mac_addr.src_mid, + tx_parse_bd->data.mac_addr.src_lo, + tx_parse_bd->parsing_data); + } + + PMD_TX_LOG(DEBUG, + "start bd: nbytes %d flags %x vlan %x\n", + tx_start_bd->nbytes, + tx_start_bd->bd_flags.as_bitfield, + tx_start_bd->vlan_or_ethertype); + + bd_prod = NEXT_TX_BD(bd_prod); + pkt_prod++; + + if (TX_IDX(bd_prod) < 2) { + nbds++; + } + } + + txq->nb_tx_avail -= m_pkts << 1; + txq->tx_bd_tail = bd_prod; + txq->tx_pkt_tail = pkt_prod; + + mb(); + fp->tx_db.data.prod += (m_pkts << 1) + nbds; + DOORBELL(sc, txq->queue_id, fp->tx_db.raw); + mb(); + + return 0; +} + +static uint16_t bnx2x_cid_ilt_lines(struct bnx2x_softc *sc) +{ + return L2_ILT_LINES(sc); +} + +static void bnx2x_ilt_set_info(struct bnx2x_softc *sc) +{ + struct ilt_client_info *ilt_client; + struct ecore_ilt *ilt = sc->ilt; + uint16_t line = 0; + + PMD_INIT_FUNC_TRACE(); + + ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); + + /* CDU */ + ilt_client = &ilt->clients[ILT_CLIENT_CDU]; + ilt_client->client_num = ILT_CLIENT_CDU; + ilt_client->page_size = CDU_ILT_PAGE_SZ; + ilt_client->flags = ILT_CLIENT_SKIP_MEM; + ilt_client->start = line; + line += bnx2x_cid_ilt_lines(sc); + + if (CNIC_SUPPORT(sc)) { + line += CNIC_ILT_LINES; + } + + ilt_client->end = (line - 1); + + /* QM */ + if (QM_INIT(sc->qm_cid_count)) { + ilt_client = &ilt->clients[ILT_CLIENT_QM]; + ilt_client->client_num = ILT_CLIENT_QM; + ilt_client->page_size = QM_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + + /* 4 bytes for each cid */ + line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, + QM_ILT_PAGE_SZ); + + ilt_client->end = (line - 1); + } + + if (CNIC_SUPPORT(sc)) { + /* SRC */ + ilt_client = &ilt->clients[ILT_CLIENT_SRC]; + ilt_client->client_num = ILT_CLIENT_SRC; + ilt_client->page_size = SRC_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + line += SRC_ILT_LINES; + ilt_client->end = (line - 1); + + /* TM */ + ilt_client = &ilt->clients[ILT_CLIENT_TM]; + ilt_client->client_num = ILT_CLIENT_TM; + ilt_client->page_size = TM_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + line += TM_ILT_LINES; + ilt_client->end = (line - 1); + } + + assert((line <= ILT_MAX_LINES)); +} + +static void bnx2x_set_fp_rx_buf_size(struct bnx2x_softc *sc) +{ + int i; + + for (i = 0; i < sc->num_queues; i++) { + /* get the Rx buffer size for RX frames */ + sc->fp[i].rx_buf_size = + (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); + } +} + +int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc) +{ + + sc->ilt = rte_malloc("", sizeof(struct ecore_ilt), RTE_CACHE_LINE_SIZE); + + return sc->ilt == NULL; +} + +static int bnx2x_alloc_ilt_lines_mem(struct bnx2x_softc *sc) +{ + sc->ilt->lines = rte_calloc("", + sizeof(struct ilt_line), ILT_MAX_LINES, + RTE_CACHE_LINE_SIZE); + return sc->ilt->lines == NULL; +} + +void bnx2x_free_ilt_mem(struct bnx2x_softc *sc) +{ + rte_free(sc->ilt); + sc->ilt = NULL; +} + +static void bnx2x_free_ilt_lines_mem(struct bnx2x_softc *sc) +{ + if (sc->ilt->lines != NULL) { + rte_free(sc->ilt->lines); + sc->ilt->lines = NULL; + } +} + +static void bnx2x_free_mem(struct bnx2x_softc *sc) +{ + uint32_t i; + + for (i = 0; i < L2_ILT_LINES(sc); i++) { + sc->context[i].vcxt = NULL; + sc->context[i].size = 0; + } + + ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); + + bnx2x_free_ilt_lines_mem(sc); +} + +static int bnx2x_alloc_mem(struct bnx2x_softc *sc) +{ + int context_size; + int allocated; + int i; + char cdu_name[RTE_MEMZONE_NAMESIZE]; + + /* + * Allocate memory for CDU context: + * This memory is allocated separately and not in the generic ILT + * functions because CDU differs in few aspects: + * 1. There can be multiple entities allocating memory for context - + * regular L2, CNIC, and SRIOV drivers. Each separately controls + * its own ILT lines. + * 2. Since CDU page-size is not a single 4KB page (which is the case + * for the other ILT clients), to be efficient we want to support + * allocation of sub-page-size in the last entry. + * 3. Context pointers are used by the driver to pass to FW / update + * the context (for the other ILT clients the pointers are used just to + * free the memory during unload). + */ + context_size = (sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(sc)); + for (i = 0, allocated = 0; allocated < context_size; i++) { + sc->context[i].size = min(CDU_ILT_PAGE_SZ, + (context_size - allocated)); + + snprintf(cdu_name, sizeof(cdu_name), "cdu_%d", i); + if (bnx2x_dma_alloc(sc, sc->context[i].size, + &sc->context[i].vcxt_dma, + cdu_name, BNX2X_PAGE_SIZE) != 0) { + bnx2x_free_mem(sc); + return -1; + } + + sc->context[i].vcxt = + (union cdu_context *)sc->context[i].vcxt_dma.vaddr; + + allocated += sc->context[i].size; + } + + bnx2x_alloc_ilt_lines_mem(sc); + + if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { + PMD_DRV_LOG(NOTICE, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed"); + bnx2x_free_mem(sc); + return -1; + } + + return 0; +} + +static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc) +{ + sc->fw_stats_num = 0; + + sc->fw_stats_req_size = 0; + sc->fw_stats_req = NULL; + sc->fw_stats_req_mapping = 0; + + sc->fw_stats_data_size = 0; + sc->fw_stats_data = NULL; + sc->fw_stats_data_mapping = 0; +} + +static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc) +{ + uint8_t num_queue_stats; + int num_groups, vf_headroom = 0; + + /* number of queues for statistics is number of eth queues */ + num_queue_stats = BNX2X_NUM_ETH_QUEUES(sc); + + /* + * Total number of FW statistics requests = + * 1 for port stats + 1 for PF stats + num of queues + */ + sc->fw_stats_num = (2 + num_queue_stats); + + /* + * Request is built from stats_query_header and an array of + * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT + * rules. The real number or requests is configured in the + * stats_query_header. + */ + num_groups = (sc->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT; + if ((sc->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) + num_groups++; + + sc->fw_stats_req_size = + (sizeof(struct stats_query_header) + + (num_groups * sizeof(struct stats_query_cmd_group))); + + /* + * Data for statistics requests + stats_counter. + * stats_counter holds per-STORM counters that are incremented when + * STORM has finished with the current request. Memory for FCoE + * offloaded statistics are counted anyway, even if they will not be sent. + * VF stats are not accounted for here as the data of VF stats is stored + * in memory allocated by the VF, not here. + */ + sc->fw_stats_data_size = + (sizeof(struct stats_counter) + + sizeof(struct per_port_stats) + sizeof(struct per_pf_stats) + + /* sizeof(struct fcoe_statistics_params) + */ + (sizeof(struct per_queue_stats) * num_queue_stats)); + + if (bnx2x_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), + &sc->fw_stats_dma, "fw_stats", + RTE_CACHE_LINE_SIZE) != 0) { + bnx2x_free_fw_stats_mem(sc); + return -1; + } + + /* set up the shortcuts */ + + sc->fw_stats_req = (struct bnx2x_fw_stats_req *)sc->fw_stats_dma.vaddr; + sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; + + sc->fw_stats_data = + (struct bnx2x_fw_stats_data *)((uint8_t *) sc->fw_stats_dma.vaddr + + sc->fw_stats_req_size); + sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + + sc->fw_stats_req_size); + + return 0; +} + +/* + * Bits map: + * 0-7 - Engine0 load counter. + * 8-15 - Engine1 load counter. + * 16 - Engine0 RESET_IN_PROGRESS bit. + * 17 - Engine1 RESET_IN_PROGRESS bit. + * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active + * function on the engine + * 19 - Engine1 ONE_IS_LOADED. + * 20 - Chip reset flow bit. When set none-leader must wait for both engines + * leader to complete (check for both RESET_IN_PROGRESS bits and not + * for just the one belonging to its engine). + */ +#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 +#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff +#define BNX2X_PATH0_LOAD_CNT_SHIFT 0 +#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 +#define BNX2X_PATH1_LOAD_CNT_SHIFT 8 +#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 +#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 +#define BNX2X_GLOBAL_RESET_BIT 0x00040000 + +/* set the GLOBAL_RESET bit, should be run under rtnl lock */ +static void bnx2x_set_reset_global(struct bnx2x_softc *sc) +{ + uint32_t val; + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/* clear the GLOBAL_RESET bit, should be run under rtnl lock */ +static void bnx2x_clear_reset_global(struct bnx2x_softc *sc) +{ + uint32_t val; + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/* checks the GLOBAL_RESET bit, should be run under rtnl lock */ +static uint8_t bnx2x_reset_is_global(struct bnx2x_softc *sc) +{ + return REG_RD(sc, BNX2X_RECOVERY_GLOB_REG) & BNX2X_GLOBAL_RESET_BIT; +} + +/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ +static void bnx2x_set_reset_done(struct bnx2x_softc *sc) +{ + uint32_t val; + uint32_t bit = SC_PATH(sc) ? BNX2X_PATH1_RST_IN_PROG_BIT : + BNX2X_PATH0_RST_IN_PROG_BIT; + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); + + val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + /* Clear the bit */ + val &= ~bit; + REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); + + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ +static void bnx2x_set_reset_in_progress(struct bnx2x_softc *sc) +{ + uint32_t val; + uint32_t bit = SC_PATH(sc) ? BNX2X_PATH1_RST_IN_PROG_BIT : + BNX2X_PATH0_RST_IN_PROG_BIT; + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); + + val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + /* Set the bit */ + val |= bit; + REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); + + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ +static uint8_t bnx2x_reset_is_done(struct bnx2x_softc *sc, int engine) +{ + uint32_t val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + uint32_t bit = engine ? BNX2X_PATH1_RST_IN_PROG_BIT : + BNX2X_PATH0_RST_IN_PROG_BIT; + + /* return false if bit is set */ + return (val & bit) ? FALSE : TRUE; +} + +/* get the load status for an engine, should be run under rtnl lock */ +static uint8_t bnx2x_get_load_status(struct bnx2x_softc *sc, int engine) +{ + uint32_t mask = engine ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK; + uint32_t shift = engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT; + uint32_t val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + + val = ((val & mask) >> shift); + + return val != 0; +} + +/* set pf load mark */ +static void bnx2x_set_pf_load(struct bnx2x_softc *sc) +{ + uint32_t val; + uint32_t val1; + uint32_t mask = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK; + uint32_t shift = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT; + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); + + PMD_INIT_FUNC_TRACE(); + + val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + + /* get the current counter value */ + val1 = ((val & mask) >> shift); + + /* set bit of this PF */ + val1 |= (1 << SC_ABS_FUNC(sc)); + + /* clear the old value */ + val &= ~mask; + + /* set the new one */ + val |= ((val1 << shift) & mask); + + REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); + + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/* clear pf load mark */ +static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc) +{ + uint32_t val1, val; + uint32_t mask = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK; + uint32_t shift = SC_PATH(sc) ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT; + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG); + + /* get the current counter value */ + val1 = (val & mask) >> shift; + + /* clear bit of that PF */ + val1 &= ~(1 << SC_ABS_FUNC(sc)); + + /* clear the old value */ + val &= ~mask; + + /* set the new one */ + val |= ((val1 << shift) & mask); + + REG_WR(sc, BNX2X_RECOVERY_GLOB_REG, val); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); + return val1 != 0; +} + +/* send load requrest to mcp and analyze response */ +static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code) +{ + PMD_INIT_FUNC_TRACE(); + + /* init fw_seq */ + sc->fw_seq = + (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + + PMD_DRV_LOG(DEBUG, "initial fw_seq 0x%04x", sc->fw_seq); + +#ifdef BNX2X_PULSE + /* get the current FW pulse sequence */ + sc->fw_drv_pulse_wr_seq = + (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & + DRV_PULSE_SEQ_MASK); +#else + /* set ALWAYS_ALIVE bit in shmem */ + sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; + bnx2x_drv_pulse(sc); +#endif + + /* load request */ + (*load_code) = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, + DRV_MSG_CODE_LOAD_REQ_WITH_LFA); + + /* if the MCP fails to respond we must abort */ + if (!(*load_code)) { + PMD_DRV_LOG(NOTICE, "MCP response failure!"); + return -1; + } + + /* if MCP refused then must abort */ + if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { + PMD_DRV_LOG(NOTICE, "MCP refused load request"); + return -1; + } + + return 0; +} + +/* + * Check whether another PF has already loaded FW to chip. In virtualized + * environments a pf from anoth VM may have already initialized the device + * including loading FW. + */ +static int bnx2x_nic_load_analyze_req(struct bnx2x_softc *sc, uint32_t load_code) +{ + uint32_t my_fw, loaded_fw; + + /* is another pf loaded on this engine? */ + if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && + (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { + /* build my FW version dword */ + my_fw = (BNX2X_5710_FW_MAJOR_VERSION + + (BNX2X_5710_FW_MINOR_VERSION << 8) + + (BNX2X_5710_FW_REVISION_VERSION << 16) + + (BNX2X_5710_FW_ENGINEERING_VERSION << 24)); + + /* read loaded FW from chip */ + loaded_fw = REG_RD(sc, XSEM_REG_PRAM); + PMD_DRV_LOG(DEBUG, "loaded FW 0x%08x / my FW 0x%08x", + loaded_fw, my_fw); + + /* abort nic load if version mismatch */ + if (my_fw != loaded_fw) { + PMD_DRV_LOG(NOTICE, + "FW 0x%08x already loaded (mine is 0x%08x)", + loaded_fw, my_fw); + return -1; + } + } + + return 0; +} + +/* mark PMF if applicable */ +static void bnx2x_nic_load_pmf(struct bnx2x_softc *sc, uint32_t load_code) +{ + uint32_t ncsi_oem_data_addr; + + PMD_INIT_FUNC_TRACE(); + + if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || + (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || + (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { + /* + * Barrier here for ordering between the writing to sc->port.pmf here + * and reading it from the periodic task. + */ + sc->port.pmf = 1; + mb(); + } else { + sc->port.pmf = 0; + } + + PMD_DRV_LOG(DEBUG, "pmf %d", sc->port.pmf); + + if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { + if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { + ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); + if (ncsi_oem_data_addr) { + REG_WR(sc, + (ncsi_oem_data_addr + + offsetof(struct glob_ncsi_oem_data, + driver_version)), 0); + } + } + } +} + +static void bnx2x_read_mf_cfg(struct bnx2x_softc *sc) +{ + int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); + int abs_func; + int vn; + + if (BNX2X_NOMCP(sc)) { + return; /* what should be the default bvalue in this case */ + } + + /* + * The formula for computing the absolute function number is... + * For 2 port configuration (4 functions per port): + * abs_func = 2 * vn + SC_PORT + SC_PATH + * For 4 port configuration (2 functions per port): + * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH + */ + for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { + abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); + if (abs_func >= E1H_FUNC_MAX) { + break; + } + sc->devinfo.mf_info.mf_config[vn] = + MFCFG_RD(sc, func_mf_config[abs_func].config); + } + + if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & + FUNC_MF_CFG_FUNC_DISABLED) { + PMD_DRV_LOG(DEBUG, "mf_cfg function disabled"); + sc->flags |= BNX2X_MF_FUNC_DIS; + } else { + PMD_DRV_LOG(DEBUG, "mf_cfg function enabled"); + sc->flags &= ~BNX2X_MF_FUNC_DIS; + } +} + +/* acquire split MCP access lock register */ +static int bnx2x_acquire_alr(struct bnx2x_softc *sc) +{ + uint32_t j, val; + + for (j = 0; j < 1000; j++) { + val = (1UL << 31); + REG_WR(sc, GRCBASE_MCP + 0x9c, val); + val = REG_RD(sc, GRCBASE_MCP + 0x9c); + if (val & (1L << 31)) + break; + + DELAY(5000); + } + + if (!(val & (1L << 31))) { + PMD_DRV_LOG(NOTICE, "Cannot acquire MCP access lock register"); + return -1; + } + + return 0; +} + +/* release split MCP access lock register */ +static void bnx2x_release_alr(struct bnx2x_softc *sc) +{ + REG_WR(sc, GRCBASE_MCP + 0x9c, 0); +} + +static void bnx2x_fan_failure(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + uint32_t ext_phy_config; + + /* mark the failure */ + ext_phy_config = + SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); + + ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; + ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; + SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, + ext_phy_config); + + /* log the failure */ + PMD_DRV_LOG(INFO, + "Fan Failure has caused the driver to shutdown " + "the card to prevent permanent damage. " + "Please contact OEM Support for assistance"); + + rte_panic("Schedule task to handle fan failure"); +} + +/* this function is called upon a link interrupt */ +static void bnx2x_link_attn(struct bnx2x_softc *sc) +{ + uint32_t pause_enabled = 0; + struct host_port_stats *pstats; + int cmng_fns; + + /* Make sure that we are synced with the current statistics */ + bnx2x_stats_handle(sc, STATS_EVENT_STOP); + + elink_link_update(&sc->link_params, &sc->link_vars); + + if (sc->link_vars.link_up) { + + /* dropless flow control */ + if (sc->dropless_fc) { + pause_enabled = 0; + + if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { + pause_enabled = 1; + } + + REG_WR(sc, + (BAR_USTRORM_INTMEM + + USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), + pause_enabled); + } + + if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { + pstats = BNX2X_SP(sc, port_stats); + /* reset old mac stats */ + memset(&(pstats->mac_stx[0]), 0, + sizeof(struct mac_stx)); + } + + if (sc->state == BNX2X_STATE_OPEN) { + bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); + } + } + + if (sc->link_vars.link_up && sc->link_vars.line_speed) { + cmng_fns = bnx2x_get_cmng_fns_mode(sc); + + if (cmng_fns != CMNG_FNS_NONE) { + bnx2x_cmng_fns_init(sc, FALSE, cmng_fns); + storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); + } + } + + bnx2x_link_report(sc); + + if (IS_MF(sc)) { + bnx2x_link_sync_notify(sc); + } +} + +static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted) +{ + int port = SC_PORT(sc); + uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; + uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : + NIG_REG_MASK_INTERRUPT_PORT0; + uint32_t aeu_mask; + uint32_t nig_mask = 0; + uint32_t reg_addr; + uint32_t igu_acked; + uint32_t cnt; + + if (sc->attn_state & asserted) { + PMD_DRV_LOG(ERR, "IGU ERROR attn=0x%08x", asserted); + } + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + + aeu_mask = REG_RD(sc, aeu_addr); + + aeu_mask &= ~(asserted & 0x3ff); + + REG_WR(sc, aeu_addr, aeu_mask); + + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + + sc->attn_state |= asserted; + + if (asserted & ATTN_HARD_WIRED_MASK) { + if (asserted & ATTN_NIG_FOR_FUNC) { + + /* save nig interrupt mask */ + nig_mask = REG_RD(sc, nig_int_mask_addr); + + /* If nig_mask is not set, no need to call the update function */ + if (nig_mask) { + REG_WR(sc, nig_int_mask_addr, 0); + + bnx2x_link_attn(sc); + } + + /* handle unicore attn? */ + } + + if (asserted & ATTN_SW_TIMER_4_FUNC) { + PMD_DRV_LOG(DEBUG, "ATTN_SW_TIMER_4_FUNC!"); + } + + if (asserted & GPIO_2_FUNC) { + PMD_DRV_LOG(DEBUG, "GPIO_2_FUNC!"); + } + + if (asserted & GPIO_3_FUNC) { + PMD_DRV_LOG(DEBUG, "GPIO_3_FUNC!"); + } + + if (asserted & GPIO_4_FUNC) { + PMD_DRV_LOG(DEBUG, "GPIO_4_FUNC!"); + } + + if (port == 0) { + if (asserted & ATTN_GENERAL_ATTN_1) { + PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_1!"); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_2) { + PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_2!"); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_3) { + PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_3!"); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); + } + } else { + if (asserted & ATTN_GENERAL_ATTN_4) { + PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_4!"); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_5) { + PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_5!"); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_6) { + PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_6!"); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); + } + } + } + /* hardwired */ + if (sc->devinfo.int_block == INT_BLOCK_HC) { + reg_addr = + (HC_REG_COMMAND_REG + port * 32 + + COMMAND_REG_ATTN_BITS_SET); + } else { + reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER * 8); + } + + PMD_DRV_LOG(DEBUG, "about to mask 0x%08x at %s addr 0x%08x", + asserted, + (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", + reg_addr); + REG_WR(sc, reg_addr, asserted); + + /* now set back the mask */ + if (asserted & ATTN_NIG_FOR_FUNC) { + /* + * Verify that IGU ack through BAR was written before restoring + * NIG mask. This loop should exit after 2-3 iterations max. + */ + if (sc->devinfo.int_block != INT_BLOCK_HC) { + cnt = 0; + + do { + igu_acked = + REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); + } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) + && (++cnt < MAX_IGU_ATTN_ACK_TO)); + + if (!igu_acked) { + PMD_DRV_LOG(ERR, + "Failed to verify IGU ack on time"); + } + + mb(); + } + + REG_WR(sc, nig_int_mask_addr, nig_mask); + + } +} + +static void +bnx2x_print_next_block(__rte_unused struct bnx2x_softc *sc, __rte_unused int idx, + __rte_unused const char *blk) +{ + PMD_DRV_LOG(INFO, "%s%s", idx ? ", " : "", blk); +} + +static int +bnx2x_check_blocks_with_parity0(struct bnx2x_softc *sc, uint32_t sig, int par_num, + uint8_t print) +{ + uint32_t cur_bit = 0; + int i = 0; + + for (i = 0; sig; i++) { + cur_bit = ((uint32_t) 0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "BRB"); + break; + case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "PARSER"); + break; + case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "TSDM"); + break; + case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "SEARCHER"); + break; + case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "TCM"); + break; + case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "TSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "XPB"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static int +bnx2x_check_blocks_with_parity1(struct bnx2x_softc *sc, uint32_t sig, int par_num, + uint8_t * global, uint8_t print) +{ + int i = 0; + uint32_t cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((uint32_t) 0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "PBF"); + break; + case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "QM"); + break; + case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "TM"); + break; + case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "XSDM"); + break; + case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "XCM"); + break; + case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "XSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "DOORBELLQ"); + break; + case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "NIG"); + break; + case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "VAUX PCI CORE"); + *global = TRUE; + break; + case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "DEBUG"); + break; + case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "USDM"); + break; + case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "UCM"); + break; + case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "USEMI"); + break; + case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "UPB"); + break; + case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "CSDM"); + break; + case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "CCM"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static int +bnx2x_check_blocks_with_parity2(struct bnx2x_softc *sc, uint32_t sig, int par_num, + uint8_t print) +{ + uint32_t cur_bit = 0; + int i = 0; + + for (i = 0; sig; i++) { + cur_bit = ((uint32_t) 0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "CSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "PXP"); + break; + case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "PXPPCICLOCKCLIENT"); + break; + case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "CFC"); + break; + case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "CDU"); + break; + case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "DMAE"); + break; + case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "IGU"); + break; + case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "MISC"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static int +bnx2x_check_blocks_with_parity3(struct bnx2x_softc *sc, uint32_t sig, int par_num, + uint8_t * global, uint8_t print) +{ + uint32_t cur_bit = 0; + int i = 0; + + for (i = 0; sig; i++) { + cur_bit = ((uint32_t) 0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: + if (print) + bnx2x_print_next_block(sc, par_num++, + "MCP ROM"); + *global = TRUE; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: + if (print) + bnx2x_print_next_block(sc, par_num++, + "MCP UMP RX"); + *global = TRUE; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: + if (print) + bnx2x_print_next_block(sc, par_num++, + "MCP UMP TX"); + *global = TRUE; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: + if (print) + bnx2x_print_next_block(sc, par_num++, + "MCP SCPAD"); + *global = TRUE; + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static int +bnx2x_check_blocks_with_parity4(struct bnx2x_softc *sc, uint32_t sig, int par_num, + uint8_t print) +{ + uint32_t cur_bit = 0; + int i = 0; + + for (i = 0; sig; i++) { + cur_bit = ((uint32_t) 0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "PGLUE_B"); + break; + case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: + if (print) + bnx2x_print_next_block(sc, par_num++, + "ATC"); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static uint8_t +bnx2x_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print, + uint32_t * sig) +{ + int par_num = 0; + + if ((sig[0] & HW_PRTY_ASSERT_SET_0) || + (sig[1] & HW_PRTY_ASSERT_SET_1) || + (sig[2] & HW_PRTY_ASSERT_SET_2) || + (sig[3] & HW_PRTY_ASSERT_SET_3) || + (sig[4] & HW_PRTY_ASSERT_SET_4)) { + PMD_DRV_LOG(ERR, + "Parity error: HW block parity attention:" + "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x", + (uint32_t) (sig[0] & HW_PRTY_ASSERT_SET_0), + (uint32_t) (sig[1] & HW_PRTY_ASSERT_SET_1), + (uint32_t) (sig[2] & HW_PRTY_ASSERT_SET_2), + (uint32_t) (sig[3] & HW_PRTY_ASSERT_SET_3), + (uint32_t) (sig[4] & HW_PRTY_ASSERT_SET_4)); + + if (print) + PMD_DRV_LOG(INFO, "Parity errors detected in blocks: "); + + par_num = + bnx2x_check_blocks_with_parity0(sc, sig[0] & + HW_PRTY_ASSERT_SET_0, + par_num, print); + par_num = + bnx2x_check_blocks_with_parity1(sc, sig[1] & + HW_PRTY_ASSERT_SET_1, + par_num, global, print); + par_num = + bnx2x_check_blocks_with_parity2(sc, sig[2] & + HW_PRTY_ASSERT_SET_2, + par_num, print); + par_num = + bnx2x_check_blocks_with_parity3(sc, sig[3] & + HW_PRTY_ASSERT_SET_3, + par_num, global, print); + par_num = + bnx2x_check_blocks_with_parity4(sc, sig[4] & + HW_PRTY_ASSERT_SET_4, + par_num, print); + + if (print) + PMD_DRV_LOG(INFO, ""); + + return TRUE; + } + + return FALSE; +} + +static uint8_t +bnx2x_chk_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print) +{ + struct attn_route attn = { {0} }; + int port = SC_PORT(sc); + + attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port * 4); + attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port * 4); + attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port * 4); + attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port * 4); + + if (!CHIP_IS_E1x(sc)) + attn.sig[4] = + REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port * 4); + + return bnx2x_parity_attn(sc, global, print, attn.sig); +} + +static void bnx2x_attn_int_deasserted4(struct bnx2x_softc *sc, uint32_t attn) +{ + uint32_t val; + + if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { + val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); + PMD_DRV_LOG(INFO, "ERROR: PGLUE hw attention 0x%08x", val); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) + PMD_DRV_LOG(INFO, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) + PMD_DRV_LOG(INFO, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) + PMD_DRV_LOG(INFO, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) + PMD_DRV_LOG(INFO, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN"); + if (val & + PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) + PMD_DRV_LOG(INFO, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN"); + if (val & + PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) + PMD_DRV_LOG(INFO, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) + PMD_DRV_LOG(INFO, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) + PMD_DRV_LOG(INFO, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) + PMD_DRV_LOG(INFO, + "ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW"); + } + + if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { + val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); + PMD_DRV_LOG(INFO, "ERROR: ATC hw attention 0x%08x", val); + if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) + PMD_DRV_LOG(INFO, + "ERROR: ATC_ATC_INT_STS_REG_ADDRESS_ERROR"); + if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) + PMD_DRV_LOG(INFO, + "ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND"); + if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) + PMD_DRV_LOG(INFO, + "ERROR: ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS"); + if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) + PMD_DRV_LOG(INFO, + "ERROR: ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT"); + if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) + PMD_DRV_LOG(INFO, + "ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR"); + if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) + PMD_DRV_LOG(INFO, + "ERROR: ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU"); + } + + if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { + PMD_DRV_LOG(INFO, + "ERROR: FATAL parity attention set4 0x%08x", + (uint32_t) (attn & + (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR + | + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); + } +} + +static void bnx2x_e1h_disable(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + + REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 0); +} + +static void bnx2x_e1h_enable(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + + REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1); +} + +/* + * called due to MCP event (on pmf): + * reread new bandwidth configuration + * configure FW + * notify others function about the change + */ +static void bnx2x_config_mf_bw(struct bnx2x_softc *sc) +{ + if (sc->link_vars.link_up) { + bnx2x_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); + bnx2x_link_sync_notify(sc); + } + + storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); +} + +static void bnx2x_set_mf_bw(struct bnx2x_softc *sc) +{ + bnx2x_config_mf_bw(sc); + bnx2x_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); +} + +static void bnx2x_handle_eee_event(struct bnx2x_softc *sc) +{ + bnx2x_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); +} + +#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 + +static void bnx2x_drv_info_ether_stat(struct bnx2x_softc *sc) +{ + struct eth_stats_info *ether_stat = &sc->sp->drv_info_to_mcp.ether_stat; + + strncpy(ether_stat->version, BNX2X_DRIVER_VERSION, + ETH_STAT_INFO_VERSION_LEN); + + sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, + DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, + ether_stat->mac_local + MAC_PAD, + MAC_PAD, ETH_ALEN); + + ether_stat->mtu_size = sc->mtu; + + ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; + ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; + + ether_stat->txq_size = sc->tx_ring_size; + ether_stat->rxq_size = sc->rx_ring_size; +} + +static void bnx2x_handle_drv_info_req(struct bnx2x_softc *sc) +{ + enum drv_info_opcode op_code; + uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); + + /* if drv_info version supported by MFW doesn't match - send NACK */ + if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { + bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); + return; + } + + op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> + DRV_INFO_CONTROL_OP_CODE_SHIFT); + + memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); + + switch (op_code) { + case ETH_STATS_OPCODE: + bnx2x_drv_info_ether_stat(sc); + break; + case FCOE_STATS_OPCODE: + case ISCSI_STATS_OPCODE: + default: + /* if op code isn't supported - send NACK */ + bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); + return; + } + + /* + * If we got drv_info attn from MFW then these fields are defined in + * shmem2 for sure + */ + SHMEM2_WR(sc, drv_info_host_addr_lo, + U64_LO(BNX2X_SP_MAPPING(sc, drv_info_to_mcp))); + SHMEM2_WR(sc, drv_info_host_addr_hi, + U64_HI(BNX2X_SP_MAPPING(sc, drv_info_to_mcp))); + + bnx2x_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); +} + +static void bnx2x_dcc_event(struct bnx2x_softc *sc, uint32_t dcc_event) +{ + if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { +/* + * This is the only place besides the function initialization + * where the sc->flags can change so it is done without any + * locks + */ + if (sc->devinfo. + mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { + PMD_DRV_LOG(DEBUG, "mf_cfg function disabled"); + sc->flags |= BNX2X_MF_FUNC_DIS; + bnx2x_e1h_disable(sc); + } else { + PMD_DRV_LOG(DEBUG, "mf_cfg function enabled"); + sc->flags &= ~BNX2X_MF_FUNC_DIS; + bnx2x_e1h_enable(sc); + } + dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; + } + + if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { + bnx2x_config_mf_bw(sc); + dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; + } + + /* Report results to MCP */ + if (dcc_event) + bnx2x_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); + else + bnx2x_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); +} + +static void bnx2x_pmf_update(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + uint32_t val; + + sc->port.pmf = 1; + + /* + * We need the mb() to ensure the ordering between the writing to + * sc->port.pmf here and reading it from the bnx2x_periodic_task(). + */ + mb(); + + /* enable nig attention */ + val = (0xff0f | (1 << (SC_VN(sc) + 4))); + if (sc->devinfo.int_block == INT_BLOCK_HC) { + REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, val); + REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, val); + } else if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); + REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); + } + + bnx2x_stats_handle(sc, STATS_EVENT_PMF); +} + +static int bnx2x_mc_assert(struct bnx2x_softc *sc) +{ + char last_idx; + int i, rc = 0; + __rte_unused uint32_t row0, row1, row2, row3; + + /* XSTORM */ + last_idx = + REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) + PMD_DRV_LOG(ERR, "XSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); + + /* print the asserts */ + for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { + + row0 = + REG_RD(sc, + BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); + row1 = + REG_RD(sc, + BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + + 4); + row2 = + REG_RD(sc, + BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + + 8); + row3 = + REG_RD(sc, + BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + PMD_DRV_LOG(ERR, + "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + /* TSTORM */ + last_idx = + REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) { + PMD_DRV_LOG(ERR, "TSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); + } + + /* print the asserts */ + for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { + + row0 = + REG_RD(sc, + BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); + row1 = + REG_RD(sc, + BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + + 4); + row2 = + REG_RD(sc, + BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + + 8); + row3 = + REG_RD(sc, + BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + PMD_DRV_LOG(ERR, + "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + /* CSTORM */ + last_idx = + REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) { + PMD_DRV_LOG(ERR, "CSTORM_ASSERT_LIST_INDEX 0x%x", last_idx); + } + + /* print the asserts */ + for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { + + row0 = + REG_RD(sc, + BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); + row1 = + REG_RD(sc, + BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + + 4); + row2 = + REG_RD(sc, + BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + + 8); + row3 = + REG_RD(sc, + BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + PMD_DRV_LOG(ERR, + "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + /* USTORM */ + last_idx = + REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); + if (last_idx) { + PMD_DRV_LOG(ERR, "USTORM_ASSERT_LIST_INDEX 0x%x", last_idx); + } + + /* print the asserts */ + for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { + + row0 = + REG_RD(sc, + BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); + row1 = + REG_RD(sc, + BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + + 4); + row2 = + REG_RD(sc, + BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + + 8); + row3 = + REG_RD(sc, + BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + + 12); + + if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { + PMD_DRV_LOG(ERR, + "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x", + i, row3, row2, row1, row0); + rc++; + } else { + break; + } + } + + return rc; +} + +static void bnx2x_attn_int_deasserted3(struct bnx2x_softc *sc, uint32_t attn) +{ + int func = SC_FUNC(sc); + uint32_t val; + + if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { + + if (attn & BNX2X_PMF_LINK_ASSERT(sc)) { + + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); + bnx2x_read_mf_cfg(sc); + sc->devinfo.mf_info.mf_config[SC_VN(sc)] = + MFCFG_RD(sc, + func_mf_config[SC_ABS_FUNC(sc)].config); + val = + SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); + + if (val & DRV_STATUS_DCC_EVENT_MASK) + bnx2x_dcc_event(sc, + (val & + DRV_STATUS_DCC_EVENT_MASK)); + + if (val & DRV_STATUS_SET_MF_BW) + bnx2x_set_mf_bw(sc); + + if (val & DRV_STATUS_DRV_INFO_REQ) + bnx2x_handle_drv_info_req(sc); + + if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) + bnx2x_pmf_update(sc); + + if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) + bnx2x_handle_eee_event(sc); + + if (sc->link_vars.periodic_flags & + ELINK_PERIODIC_FLAGS_LINK_EVENT) { + /* sync with link */ + sc->link_vars.periodic_flags &= + ~ELINK_PERIODIC_FLAGS_LINK_EVENT; + if (IS_MF(sc)) { + bnx2x_link_sync_notify(sc); + } + bnx2x_link_report(sc); + } + + /* + * Always call it here: bnx2x_link_report() will + * prevent the link indication duplication. + */ + bnx2x_link_status_update(sc); + + } else if (attn & BNX2X_MC_ASSERT_BITS) { + + PMD_DRV_LOG(ERR, "MC assert!"); + bnx2x_mc_assert(sc); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); + rte_panic("MC assert!"); + + } else if (attn & BNX2X_MCP_ASSERT) { + + PMD_DRV_LOG(ERR, "MCP assert!"); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); + + } else { + PMD_DRV_LOG(ERR, + "Unknown HW assert! (attn 0x%08x)", attn); + } + } + + if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { + PMD_DRV_LOG(ERR, "LATCHED attention 0x%08x (masked)", attn); + if (attn & BNX2X_GRC_TIMEOUT) { + val = REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); + PMD_DRV_LOG(ERR, "GRC time-out 0x%08x", val); + } + if (attn & BNX2X_GRC_RSV) { + val = REG_RD(sc, MISC_REG_GRC_RSV_ATTN); + PMD_DRV_LOG(ERR, "GRC reserved 0x%08x", val); + } + REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); + } +} + +static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn) +{ + int port = SC_PORT(sc); + int reg_offset; + uint32_t val0, mask0, val1, mask1; + uint32_t val; + + if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { + val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); + PMD_DRV_LOG(ERR, "CFC hw attention 0x%08x", val); +/* CFC error attention */ + if (val & 0x2) { + PMD_DRV_LOG(ERR, "FATAL error from CFC"); + } + } + + if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { + val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); + PMD_DRV_LOG(ERR, "PXP hw attention-0 0x%08x", val); +/* RQ_USDMDP_FIFO_OVERFLOW */ + if (val & 0x18000) { + PMD_DRV_LOG(ERR, "FATAL error from PXP"); + } + + if (!CHIP_IS_E1x(sc)) { + val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); + PMD_DRV_LOG(ERR, "PXP hw attention-1 0x%08x", val); + } + } +#define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR +#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT + + if (attn & AEU_PXP2_HW_INT_BIT) { +/* CQ47854 workaround do not panic on + * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR + */ + if (!CHIP_IS_E1x(sc)) { + mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); + val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); + mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); + val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); + /* + * If the olny PXP2_EOP_ERROR_BIT is set in + * STS0 and STS1 - clear it + * + * probably we lose additional attentions between + * STS0 and STS_CLR0, in this case user will not + * be notified about them + */ + if (val0 & mask0 & PXP2_EOP_ERROR_BIT && + !(val1 & mask1)) + val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); + + /* print the register, since no one can restore it */ + PMD_DRV_LOG(ERR, + "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x", val0); + + /* + * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR + * then notify + */ + if (val0 & PXP2_EOP_ERROR_BIT) { + PMD_DRV_LOG(ERR, "PXP2_WR_PGLUE_EOP_ERROR"); + + /* + * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is + * set then clear attention from PXP2 block without panic + */ + if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && + ((val1 & mask1) == 0)) + attn &= ~AEU_PXP2_HW_INT_BIT; + } + } + } + + if (attn & HW_INTERRUT_ASSERT_SET_2) { + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); + + val = REG_RD(sc, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); + REG_WR(sc, reg_offset, val); + + PMD_DRV_LOG(ERR, + "FATAL HW block attention set2 0x%x", + (uint32_t) (attn & HW_INTERRUT_ASSERT_SET_2)); + rte_panic("HW block attention set2"); + } +} + +static void bnx2x_attn_int_deasserted1(struct bnx2x_softc *sc, uint32_t attn) +{ + int port = SC_PORT(sc); + int reg_offset; + uint32_t val; + + if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { + val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); + PMD_DRV_LOG(ERR, "DB hw attention 0x%08x", val); +/* DORQ discard attention */ + if (val & 0x2) { + PMD_DRV_LOG(ERR, "FATAL error from DORQ"); + } + } + + if (attn & HW_INTERRUT_ASSERT_SET_1) { + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); + + val = REG_RD(sc, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); + REG_WR(sc, reg_offset, val); + + PMD_DRV_LOG(ERR, + "FATAL HW block attention set1 0x%08x", + (uint32_t) (attn & HW_INTERRUT_ASSERT_SET_1)); + rte_panic("HW block attention set1"); + } +} + +static void bnx2x_attn_int_deasserted0(struct bnx2x_softc *sc, uint32_t attn) +{ + int port = SC_PORT(sc); + int reg_offset; + uint32_t val; + + reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; + + if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { + val = REG_RD(sc, reg_offset); + val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; + REG_WR(sc, reg_offset, val); + + PMD_DRV_LOG(WARNING, "SPIO5 hw attention"); + +/* Fan failure attention */ + elink_hw_reset_phy(&sc->link_params); + bnx2x_fan_failure(sc); + } + + if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { + elink_handle_module_detect_int(&sc->link_params); + } + + if (attn & HW_INTERRUT_ASSERT_SET_0) { + val = REG_RD(sc, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); + REG_WR(sc, reg_offset, val); + + rte_panic("FATAL HW block attention set0 0x%lx", + (attn & HW_INTERRUT_ASSERT_SET_0)); + } +} + +static void bnx2x_attn_int_deasserted(struct bnx2x_softc *sc, uint32_t deasserted) +{ + struct attn_route attn; + struct attn_route *group_mask; + int port = SC_PORT(sc); + int index; + uint32_t reg_addr; + uint32_t val; + uint32_t aeu_mask; + uint8_t global = FALSE; + + /* + * Need to take HW lock because MCP or other port might also + * try to handle this event. + */ + bnx2x_acquire_alr(sc); + + if (bnx2x_chk_parity_attn(sc, &global, TRUE)) { + sc->recovery_state = BNX2X_RECOVERY_INIT; + +/* disable HW interrupts */ + bnx2x_int_disable(sc); + bnx2x_release_alr(sc); + return; + } + + attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port * 4); + attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port * 4); + attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port * 4); + attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port * 4); + if (!CHIP_IS_E1x(sc)) { + attn.sig[4] = + REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port * 4); + } else { + attn.sig[4] = 0; + } + + for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { + if (deasserted & (1 << index)) { + group_mask = &sc->attn_group[index]; + + bnx2x_attn_int_deasserted4(sc, + attn. + sig[4] & group_mask->sig[4]); + bnx2x_attn_int_deasserted3(sc, + attn. + sig[3] & group_mask->sig[3]); + bnx2x_attn_int_deasserted1(sc, + attn. + sig[1] & group_mask->sig[1]); + bnx2x_attn_int_deasserted2(sc, + attn. + sig[2] & group_mask->sig[2]); + bnx2x_attn_int_deasserted0(sc, + attn. + sig[0] & group_mask->sig[0]); + } + } + + bnx2x_release_alr(sc); + + if (sc->devinfo.int_block == INT_BLOCK_HC) { + reg_addr = (HC_REG_COMMAND_REG + port * 32 + + COMMAND_REG_ATTN_BITS_CLR); + } else { + reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER * 8); + } + + val = ~deasserted; + PMD_DRV_LOG(DEBUG, + "about to mask 0x%08x at %s addr 0x%08x", val, + (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", + reg_addr); + REG_WR(sc, reg_addr, val); + + if (~sc->attn_state & deasserted) { + PMD_DRV_LOG(ERR, "IGU error"); + } + + reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; + + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + + aeu_mask = REG_RD(sc, reg_addr); + + aeu_mask |= (deasserted & 0x3ff); + + REG_WR(sc, reg_addr, aeu_mask); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + + sc->attn_state &= ~deasserted; +} + +static void bnx2x_attn_int(struct bnx2x_softc *sc) +{ + /* read local copy of bits */ + uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); + uint32_t attn_ack = + le32toh(sc->def_sb->atten_status_block.attn_bits_ack); + uint32_t attn_state = sc->attn_state; + + /* look for changed bits */ + uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; + uint32_t deasserted = ~attn_bits & attn_ack & attn_state; + + PMD_DRV_LOG(DEBUG, + "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x", + attn_bits, attn_ack, asserted, deasserted); + + if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { + PMD_DRV_LOG(ERR, "BAD attention state"); + } + + /* handle bits that were raised */ + if (asserted) { + bnx2x_attn_int_asserted(sc, asserted); + } + + if (deasserted) { + bnx2x_attn_int_deasserted(sc, deasserted); + } +} + +static uint16_t bnx2x_update_dsb_idx(struct bnx2x_softc *sc) +{ + struct host_sp_status_block *def_sb = sc->def_sb; + uint16_t rc = 0; + + mb(); /* status block is written to by the chip */ + + if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { + sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; + rc |= BNX2X_DEF_SB_ATT_IDX; + } + + if (sc->def_idx != def_sb->sp_sb.running_index) { + sc->def_idx = def_sb->sp_sb.running_index; + rc |= BNX2X_DEF_SB_IDX; + } + + mb(); + + return rc; +} + +static struct ecore_queue_sp_obj *bnx2x_cid_to_q_obj(struct bnx2x_softc *sc, + uint32_t cid) +{ + return &sc->sp_objs[CID_TO_FP(cid, sc)].q_obj; +} + +static void bnx2x_handle_mcast_eqe(struct bnx2x_softc *sc) +{ + struct ecore_mcast_ramrod_params rparam; + int rc; + + memset(&rparam, 0, sizeof(rparam)); + + rparam.mcast_obj = &sc->mcast_obj; + + /* clear pending state for the last command */ + sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); + + /* if there are pending mcast commands - send them */ + if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { + rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); + if (rc < 0) { + PMD_DRV_LOG(INFO, + "Failed to send pending mcast commands (%d)", + rc); + } + } +} + +static void +bnx2x_handle_classification_eqe(struct bnx2x_softc *sc, union event_ring_elem *elem) +{ + unsigned long ramrod_flags = 0; + int rc = 0; + uint32_t cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; + struct ecore_vlan_mac_obj *vlan_mac_obj; + + /* always push next commands out, don't wait here */ + bnx2x_set_bit(RAMROD_CONT, &ramrod_flags); + + switch (le32toh(elem->message.data.eth_event.echo) >> BNX2X_SWCID_SHIFT) { + case ECORE_FILTER_MAC_PENDING: + PMD_DRV_LOG(DEBUG, "Got SETUP_MAC completions"); + vlan_mac_obj = &sc->sp_objs[cid].mac_obj; + break; + + case ECORE_FILTER_MCAST_PENDING: + PMD_DRV_LOG(DEBUG, "Got SETUP_MCAST completions"); + bnx2x_handle_mcast_eqe(sc); + return; + + default: + PMD_DRV_LOG(NOTICE, "Unsupported classification command: %d", + elem->message.data.eth_event.echo); + return; + } + + rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); + + if (rc < 0) { + PMD_DRV_LOG(NOTICE, "Failed to schedule new commands (%d)", rc); + } else if (rc > 0) { + PMD_DRV_LOG(DEBUG, "Scheduled next pending commands..."); + } +} + +static void bnx2x_handle_rx_mode_eqe(struct bnx2x_softc *sc) +{ + bnx2x_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); + + /* send rx_mode command again if was requested */ + if (bnx2x_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state)) { + bnx2x_set_storm_rx_mode(sc); + } +} + +static void bnx2x_update_eq_prod(struct bnx2x_softc *sc, uint16_t prod) +{ + storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); + wmb(); /* keep prod updates ordered */ +} + +static void bnx2x_eq_int(struct bnx2x_softc *sc) +{ + uint16_t hw_cons, sw_cons, sw_prod; + union event_ring_elem *elem; + uint8_t echo; + uint32_t cid; + uint8_t opcode; + int spqe_cnt = 0; + struct ecore_queue_sp_obj *q_obj; + struct ecore_func_sp_obj *f_obj = &sc->func_obj; + struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; + + hw_cons = le16toh(*sc->eq_cons_sb); + + /* + * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. + * when we get to the next-page we need to adjust so the loop + * condition below will be met. The next element is the size of a + * regular element and hence incrementing by 1 + */ + if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { + hw_cons++; + } + + /* + * This function may never run in parallel with itself for a + * specific sc and no need for a read memory barrier here. + */ + sw_cons = sc->eq_cons; + sw_prod = sc->eq_prod; + + for (; + sw_cons != hw_cons; + sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { + + elem = &sc->eq[EQ_DESC(sw_cons)]; + +/* elem CID originates from FW, actually LE */ + cid = SW_CID(elem->message.data.cfc_del_event.cid); + opcode = elem->message.opcode; + +/* handle eq element */ + switch (opcode) { + case EVENT_RING_OPCODE_STAT_QUERY: + PMD_DEBUG_PERIODIC_LOG(DEBUG, "got statistics completion event %d", + sc->stats_comp++); + /* nothing to do with stats comp */ + goto next_spqe; + + case EVENT_RING_OPCODE_CFC_DEL: + /* handle according to cid range */ + /* we may want to verify here that the sc state is HALTING */ + PMD_DRV_LOG(DEBUG, "got delete ramrod for MULTI[%d]", + cid); + q_obj = bnx2x_cid_to_q_obj(sc, cid); + if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { + break; + } + goto next_spqe; + + case EVENT_RING_OPCODE_STOP_TRAFFIC: + PMD_DRV_LOG(DEBUG, "got STOP TRAFFIC"); + if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { + break; + } + goto next_spqe; + + case EVENT_RING_OPCODE_START_TRAFFIC: + PMD_DRV_LOG(DEBUG, "got START TRAFFIC"); + if (f_obj->complete_cmd + (sc, f_obj, ECORE_F_CMD_TX_START)) { + break; + } + goto next_spqe; + + case EVENT_RING_OPCODE_FUNCTION_UPDATE: + echo = elem->message.data.function_update_event.echo; + if (echo == SWITCH_UPDATE) { + PMD_DRV_LOG(DEBUG, + "got FUNC_SWITCH_UPDATE ramrod"); + if (f_obj->complete_cmd(sc, f_obj, + ECORE_F_CMD_SWITCH_UPDATE)) + { + break; + } + } else { + PMD_DRV_LOG(DEBUG, + "AFEX: ramrod completed FUNCTION_UPDATE"); + f_obj->complete_cmd(sc, f_obj, + ECORE_F_CMD_AFEX_UPDATE); + } + goto next_spqe; + + case EVENT_RING_OPCODE_FORWARD_SETUP: + q_obj = &bnx2x_fwd_sp_obj(sc, q_obj); + if (q_obj->complete_cmd(sc, q_obj, + ECORE_Q_CMD_SETUP_TX_ONLY)) { + break; + } + goto next_spqe; + + case EVENT_RING_OPCODE_FUNCTION_START: + PMD_DRV_LOG(DEBUG, "got FUNC_START ramrod"); + if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { + break; + } + goto next_spqe; + + case EVENT_RING_OPCODE_FUNCTION_STOP: + PMD_DRV_LOG(DEBUG, "got FUNC_STOP ramrod"); + if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { + break; + } + goto next_spqe; + } + + switch (opcode | sc->state) { + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPENING_WAITING_PORT): + cid = + elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; + PMD_DRV_LOG(DEBUG, "got RSS_UPDATE ramrod. CID %d", + cid); + rss_raw->clear_pending(rss_raw); + break; + + case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_CLOSING_WAITING_HALT): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): + PMD_DRV_LOG(DEBUG, + "got (un)set mac ramrod"); + bnx2x_handle_classification_eqe(sc, elem); + break; + + case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): + PMD_DRV_LOG(DEBUG, + "got mcast ramrod"); + bnx2x_handle_mcast_eqe(sc); + break; + + case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_CLOSING_WAITING_HALT): + PMD_DRV_LOG(DEBUG, + "got rx_mode ramrod"); + bnx2x_handle_rx_mode_eqe(sc); + break; + + default: + /* unknown event log error and continue */ + PMD_DRV_LOG(INFO, "Unknown EQ event %d, sc->state 0x%x", + elem->message.opcode, sc->state); + } + +next_spqe: + spqe_cnt++; + } /* for */ + + mb(); + atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); + + sc->eq_cons = sw_cons; + sc->eq_prod = sw_prod; + + /* make sure that above mem writes were issued towards the memory */ + wmb(); + + /* update producer */ + bnx2x_update_eq_prod(sc, sc->eq_prod); +} + +static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc) +{ + uint16_t status; + int rc = 0; + + /* what work needs to be performed? */ + status = bnx2x_update_dsb_idx(sc); + + /* HW attentions */ + if (status & BNX2X_DEF_SB_ATT_IDX) { + PMD_DRV_LOG(DEBUG, "---> ATTN INTR <---"); + bnx2x_attn_int(sc); + status &= ~BNX2X_DEF_SB_ATT_IDX; + rc = 1; + } + + /* SP events: STAT_QUERY and others */ + if (status & BNX2X_DEF_SB_IDX) { +/* handle EQ completions */ + PMD_DEBUG_PERIODIC_LOG(DEBUG, "---> EQ INTR <---"); + bnx2x_eq_int(sc); + bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, + le16toh(sc->def_idx), IGU_INT_NOP, 1); + status &= ~BNX2X_DEF_SB_IDX; + } + + /* if status is non zero then something went wrong */ + if (unlikely(status)) { + PMD_DRV_LOG(INFO, + "Got an unknown SP interrupt! (0x%04x)", status); + } + + /* ack status block only if something was actually handled */ + bnx2x_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, + le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); + + return rc; +} + +static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp) +{ + struct bnx2x_softc *sc = fp->sc; + uint8_t more_rx = FALSE; + + /* update the fastpath index */ + bnx2x_update_fp_sb_idx(fp); + + if (scan_fp) { + if (bnx2x_has_rx_work(fp)) { + more_rx = bnx2x_rxeof(sc, fp); + } + + if (more_rx) { + /* still more work to do */ + bnx2x_handle_fp_tq(fp, scan_fp); + return; + } + } + + bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, + le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); +} + +/* + * Legacy interrupt entry point. + * + * Verifies that the controller generated the interrupt and + * then calls a separate routine to handle the various + * interrupt causes: link, RX, and TX. + */ +int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp) +{ + struct bnx2x_fastpath *fp; + uint32_t status, mask; + int i, rc = 0; + + /* + * 0 for ustorm, 1 for cstorm + * the bits returned from ack_int() are 0-15 + * bit 0 = attention status block + * bit 1 = fast path status block + * a mask of 0x2 or more = tx/rx event + * a mask of 1 = slow path event + */ + + status = bnx2x_ack_int(sc); + + /* the interrupt is not for us */ + if (unlikely(status == 0)) { + return 0; + } + + PMD_DEBUG_PERIODIC_LOG(DEBUG, "Interrupt status 0x%04x", status); + //bnx2x_dump_status_block(sc); + + FOR_EACH_ETH_QUEUE(sc, i) { + fp = &sc->fp[i]; + mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); + if (status & mask) { + bnx2x_handle_fp_tq(fp, scan_fp); + status &= ~mask; + } + } + + if (unlikely(status & 0x1)) { + rc = bnx2x_handle_sp_tq(sc); + status &= ~0x1; + } + + if (unlikely(status)) { + PMD_DRV_LOG(WARNING, + "Unexpected fastpath status (0x%08x)!", status); + } + + return rc; +} + +static int bnx2x_init_hw_common_chip(struct bnx2x_softc *sc); +static int bnx2x_init_hw_common(struct bnx2x_softc *sc); +static int bnx2x_init_hw_port(struct bnx2x_softc *sc); +static int bnx2x_init_hw_func(struct bnx2x_softc *sc); +static void bnx2x_reset_common(struct bnx2x_softc *sc); +static void bnx2x_reset_port(struct bnx2x_softc *sc); +static void bnx2x_reset_func(struct bnx2x_softc *sc); +static int bnx2x_init_firmware(struct bnx2x_softc *sc); +static void bnx2x_release_firmware(struct bnx2x_softc *sc); + +static struct +ecore_func_sp_drv_ops bnx2x_func_sp_drv = { + .init_hw_cmn_chip = bnx2x_init_hw_common_chip, + .init_hw_cmn = bnx2x_init_hw_common, + .init_hw_port = bnx2x_init_hw_port, + .init_hw_func = bnx2x_init_hw_func, + + .reset_hw_cmn = bnx2x_reset_common, + .reset_hw_port = bnx2x_reset_port, + .reset_hw_func = bnx2x_reset_func, + + .init_fw = bnx2x_init_firmware, + .release_fw = bnx2x_release_firmware, +}; + +static void bnx2x_init_func_obj(struct bnx2x_softc *sc) +{ + sc->dmae_ready = 0; + + PMD_INIT_FUNC_TRACE(); + + ecore_init_func_obj(sc, + &sc->func_obj, + BNX2X_SP(sc, func_rdata), + (phys_addr_t)BNX2X_SP_MAPPING(sc, func_rdata), + BNX2X_SP(sc, func_afex_rdata), + (phys_addr_t)BNX2X_SP_MAPPING(sc, func_afex_rdata), + &bnx2x_func_sp_drv); +} + +static int bnx2x_init_hw(struct bnx2x_softc *sc, uint32_t load_code) +{ + struct ecore_func_state_params func_params = { NULL }; + int rc; + + PMD_INIT_FUNC_TRACE(); + + /* prepare the parameters for function state transitions */ + bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &sc->func_obj; + func_params.cmd = ECORE_F_CMD_HW_INIT; + + func_params.params.hw_init.load_phase = load_code; + + /* + * Via a plethora of function pointers, we will eventually reach + * bnx2x_init_hw_common(), bnx2x_init_hw_port(), or bnx2x_init_hw_func(). + */ + rc = ecore_func_state_change(sc, &func_params); + + return rc; +} + +static void +bnx2x_fill(struct bnx2x_softc *sc, uint32_t addr, int fill, uint32_t len) +{ + uint32_t i; + + if (!(len % 4) && !(addr % 4)) { + for (i = 0; i < len; i += 4) { + REG_WR(sc, (addr + i), fill); + } + } else { + for (i = 0; i < len; i++) { + REG_WR8(sc, (addr + i), fill); + } + } +} + +/* writes FP SP data to FW - data_size in dwords */ +static void +bnx2x_wr_fp_sb_data(struct bnx2x_softc *sc, int fw_sb_id, uint32_t * sb_data_p, + uint32_t data_size) +{ + uint32_t index; + + for (index = 0; index < data_size; index++) { + REG_WR(sc, + (BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + + (sizeof(uint32_t) * index)), *(sb_data_p + index)); + } +} + +static void bnx2x_zero_fp_sb(struct bnx2x_softc *sc, int fw_sb_id) +{ + struct hc_status_block_data_e2 sb_data_e2; + struct hc_status_block_data_e1x sb_data_e1x; + uint32_t *sb_data_p; + uint32_t data_size = 0; + + if (!CHIP_IS_E1x(sc)) { + memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); + sb_data_e2.common.state = SB_DISABLED; + sb_data_e2.common.p_func.vf_valid = FALSE; + sb_data_p = (uint32_t *) & sb_data_e2; + data_size = (sizeof(struct hc_status_block_data_e2) / + sizeof(uint32_t)); + } else { + memset(&sb_data_e1x, 0, + sizeof(struct hc_status_block_data_e1x)); + sb_data_e1x.common.state = SB_DISABLED; + sb_data_e1x.common.p_func.vf_valid = FALSE; + sb_data_p = (uint32_t *) & sb_data_e1x; + data_size = (sizeof(struct hc_status_block_data_e1x) / + sizeof(uint32_t)); + } + + bnx2x_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); + + bnx2x_fill(sc, + (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 0, + CSTORM_STATUS_BLOCK_SIZE); + bnx2x_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), + 0, CSTORM_SYNC_BLOCK_SIZE); +} + +static void +bnx2x_wr_sp_sb_data(struct bnx2x_softc *sc, + struct hc_sp_status_block_data *sp_sb_data) +{ + uint32_t i; + + for (i = 0; + i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); + i++) { + REG_WR(sc, + (BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + + (i * sizeof(uint32_t))), + *((uint32_t *) sp_sb_data + i)); + } +} + +static void bnx2x_zero_sp_sb(struct bnx2x_softc *sc) +{ + struct hc_sp_status_block_data sp_sb_data; + + memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); + + sp_sb_data.state = SB_DISABLED; + sp_sb_data.p_func.vf_valid = FALSE; + + bnx2x_wr_sp_sb_data(sc, &sp_sb_data); + + bnx2x_fill(sc, + (BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), + 0, CSTORM_SP_STATUS_BLOCK_SIZE); + bnx2x_fill(sc, + (BAR_CSTRORM_INTMEM + + CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), + 0, CSTORM_SP_SYNC_BLOCK_SIZE); +} + +static void +bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, + int igu_seg_id) +{ + hc_sm->igu_sb_id = igu_sb_id; + hc_sm->igu_seg_id = igu_seg_id; + hc_sm->timer_value = 0xFF; + hc_sm->time_to_expire = 0xFFFFFFFF; +} + +static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) +{ + /* zero out state machine indices */ + + /* rx indices */ + index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; + + /* tx indices */ + index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; + + /* map indices */ + + /* rx indices */ + index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= + (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); + + /* tx indices */ + index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= + (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= + (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= + (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= + (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); +} + +static void +bnx2x_init_sb(struct bnx2x_softc *sc, phys_addr_t busaddr, int vfid, + uint8_t vf_valid, int fw_sb_id, int igu_sb_id) +{ + struct hc_status_block_data_e2 sb_data_e2; + struct hc_status_block_data_e1x sb_data_e1x; + struct hc_status_block_sm *hc_sm_p; + uint32_t *sb_data_p; + int igu_seg_id; + int data_size; + + if (CHIP_INT_MODE_IS_BC(sc)) { + igu_seg_id = HC_SEG_ACCESS_NORM; + } else { + igu_seg_id = IGU_SEG_ACCESS_NORM; + } + + bnx2x_zero_fp_sb(sc, fw_sb_id); + + if (!CHIP_IS_E1x(sc)) { + memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); + sb_data_e2.common.state = SB_ENABLED; + sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); + sb_data_e2.common.p_func.vf_id = vfid; + sb_data_e2.common.p_func.vf_valid = vf_valid; + sb_data_e2.common.p_func.vnic_id = SC_VN(sc); + sb_data_e2.common.same_igu_sb_1b = TRUE; + sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); + sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); + hc_sm_p = sb_data_e2.common.state_machine; + sb_data_p = (uint32_t *) & sb_data_e2; + data_size = (sizeof(struct hc_status_block_data_e2) / + sizeof(uint32_t)); + bnx2x_map_sb_state_machines(sb_data_e2.index_data); + } else { + memset(&sb_data_e1x, 0, + sizeof(struct hc_status_block_data_e1x)); + sb_data_e1x.common.state = SB_ENABLED; + sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); + sb_data_e1x.common.p_func.vf_id = 0xff; + sb_data_e1x.common.p_func.vf_valid = FALSE; + sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); + sb_data_e1x.common.same_igu_sb_1b = TRUE; + sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); + sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); + hc_sm_p = sb_data_e1x.common.state_machine; + sb_data_p = (uint32_t *) & sb_data_e1x; + data_size = (sizeof(struct hc_status_block_data_e1x) / + sizeof(uint32_t)); + bnx2x_map_sb_state_machines(sb_data_e1x.index_data); + } + + bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); + bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); + + /* write indices to HW - PCI guarantees endianity of regpairs */ + bnx2x_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); +} + +static uint8_t bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) +{ + if (CHIP_IS_E1x(fp->sc)) { + return fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H; + } else { + return fp->cl_id; + } +} + +static uint32_t +bnx2x_rx_ustorm_prods_offset(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp) +{ + uint32_t offset = BAR_USTRORM_INTMEM; + + if (IS_VF(sc)) { + return PXP_VF_ADDR_USDM_QUEUES_START + + (sc->acquire_resp.resc.hw_qid[fp->index] * + sizeof(struct ustorm_queue_zone_data)); + } else if (!CHIP_IS_E1x(sc)) { + offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); + } else { + offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); + } + + return offset; +} + +static void bnx2x_init_eth_fp(struct bnx2x_softc *sc, int idx) +{ + struct bnx2x_fastpath *fp = &sc->fp[idx]; + uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; + unsigned long q_type = 0; + int cos; + + fp->sc = sc; + fp->index = idx; + + fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); + fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); + + if (CHIP_IS_E1x(sc)) + fp->cl_id = SC_L_ID(sc) + idx; + else +/* want client ID same as IGU SB ID for non-E1 */ + fp->cl_id = fp->igu_sb_id; + fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); + + /* setup sb indices */ + if (!CHIP_IS_E1x(sc)) { + fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; + fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; + } else { + fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; + fp->sb_running_index = + fp->status_block.e1x_sb->sb.running_index; + } + + /* init shortcut */ + fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(sc, fp); + + fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; + + for (cos = 0; cos < sc->max_cos; cos++) { + cids[cos] = idx; + } + fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; + + /* nothing more for a VF to do */ + if (IS_VF(sc)) { + return; + } + + bnx2x_init_sb(sc, fp->sb_dma.paddr, BNX2X_VF_ID_INVALID, FALSE, + fp->fw_sb_id, fp->igu_sb_id); + + bnx2x_update_fp_sb_idx(fp); + + /* Configure Queue State object */ + bnx2x_set_bit(ECORE_Q_TYPE_HAS_RX, &q_type); + bnx2x_set_bit(ECORE_Q_TYPE_HAS_TX, &q_type); + + ecore_init_queue_obj(sc, + &sc->sp_objs[idx].q_obj, + fp->cl_id, + cids, + sc->max_cos, + SC_FUNC(sc), + BNX2X_SP(sc, q_rdata), + (phys_addr_t)BNX2X_SP_MAPPING(sc, q_rdata), + q_type); + + /* configure classification DBs */ + ecore_init_mac_obj(sc, + &sc->sp_objs[idx].mac_obj, + fp->cl_id, + idx, + SC_FUNC(sc), + BNX2X_SP(sc, mac_rdata), + (phys_addr_t)BNX2X_SP_MAPPING(sc, mac_rdata), + ECORE_FILTER_MAC_PENDING, &sc->sp_state, + ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool); +} + +static void +bnx2x_update_rx_prod(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + uint16_t rx_bd_prod, uint16_t rx_cq_prod) +{ + union ustorm_eth_rx_producers rx_prods; + uint32_t i; + + /* update producers */ + rx_prods.prod.bd_prod = rx_bd_prod; + rx_prods.prod.cqe_prod = rx_cq_prod; + rx_prods.prod.reserved = 0; + + /* + * Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes BDs must have buffers. + */ + wmb(); + + for (i = 0; i < (sizeof(rx_prods) / 4); i++) { + REG_WR(sc, + (fp->ustorm_rx_prods_offset + (i * 4)), + rx_prods.raw_data[i]); + } + + wmb(); /* keep prod updates ordered */ +} + +static void bnx2x_init_rx_rings(struct bnx2x_softc *sc) +{ + struct bnx2x_fastpath *fp; + int i; + struct bnx2x_rx_queue *rxq; + + for (i = 0; i < sc->num_queues; i++) { + fp = &sc->fp[i]; + rxq = sc->rx_queues[fp->index]; + if (!rxq) { + PMD_RX_LOG(ERR, "RX queue is NULL"); + return; + } + + rxq->rx_bd_head = 0; + rxq->rx_bd_tail = rxq->nb_rx_desc; + rxq->rx_cq_head = 0; + rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq); + *fp->rx_cq_cons_sb = 0; + + /* + * Activate the BD ring... + * Warning, this will generate an interrupt (to the TSTORM) + * so this can only be done after the chip is initialized + */ + bnx2x_update_rx_prod(sc, fp, rxq->rx_bd_tail, rxq->rx_cq_tail); + + if (i != 0) { + continue; + } + } +} + +static void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp) +{ + struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; + + fp->tx_db.data.header.header = 1 << DOORBELL_HDR_DB_TYPE_SHIFT; + fp->tx_db.data.zero_fill1 = 0; + fp->tx_db.data.prod = 0; + + if (!txq) { + PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); + return; + } + + txq->tx_pkt_tail = 0; + txq->tx_pkt_head = 0; + txq->tx_bd_tail = 0; + txq->tx_bd_head = 0; +} + +static void bnx2x_init_tx_rings(struct bnx2x_softc *sc) +{ + int i; + + for (i = 0; i < sc->num_queues; i++) { + bnx2x_init_tx_ring_one(&sc->fp[i]); + } +} + +static void bnx2x_init_def_sb(struct bnx2x_softc *sc) +{ + struct host_sp_status_block *def_sb = sc->def_sb; + phys_addr_t mapping = sc->def_sb_dma.paddr; + int igu_sp_sb_index; + int igu_seg_id; + int port = SC_PORT(sc); + int func = SC_FUNC(sc); + int reg_offset, reg_offset_en5; + uint64_t section; + int index, sindex; + struct hc_sp_status_block_data sp_sb_data; + + memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); + + if (CHIP_INT_MODE_IS_BC(sc)) { + igu_sp_sb_index = DEF_SB_IGU_ID; + igu_seg_id = HC_SEG_ACCESS_DEF; + } else { + igu_sp_sb_index = sc->igu_dsb_id; + igu_seg_id = IGU_SEG_ACCESS_DEF; + } + + /* attentions */ + section = ((uint64_t) mapping + + offsetof(struct host_sp_status_block, atten_status_block)); + def_sb->atten_status_block.status_block_id = igu_sp_sb_index; + sc->attn_state = 0; + + reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; + + reg_offset_en5 = (port) ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; + + for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { +/* take care of sig[0]..sig[4] */ + for (sindex = 0; sindex < 4; sindex++) { + sc->attn_group[index].sig[sindex] = + REG_RD(sc, + (reg_offset + (sindex * 0x4) + + (0x10 * index))); + } + + if (!CHIP_IS_E1x(sc)) { + /* + * enable5 is separate from the rest of the registers, + * and the address skip is 4 and not 16 between the + * different groups + */ + sc->attn_group[index].sig[4] = + REG_RD(sc, (reg_offset_en5 + (0x4 * index))); + } else { + sc->attn_group[index].sig[4] = 0; + } + } + + if (sc->devinfo.int_block == INT_BLOCK_HC) { + reg_offset = + port ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L; + REG_WR(sc, reg_offset, U64_LO(section)); + REG_WR(sc, (reg_offset + 4), U64_HI(section)); + } else if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); + REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); + } + + section = ((uint64_t) mapping + + offsetof(struct host_sp_status_block, sp_sb)); + + bnx2x_zero_sp_sb(sc); + + /* PCI guarantees endianity of regpair */ + sp_sb_data.state = SB_ENABLED; + sp_sb_data.host_sb_addr.lo = U64_LO(section); + sp_sb_data.host_sb_addr.hi = U64_HI(section); + sp_sb_data.igu_sb_id = igu_sp_sb_index; + sp_sb_data.igu_seg_id = igu_seg_id; + sp_sb_data.p_func.pf_id = func; + sp_sb_data.p_func.vnic_id = SC_VN(sc); + sp_sb_data.p_func.vf_id = 0xff; + + bnx2x_wr_sp_sb_data(sc, &sp_sb_data); + + bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); +} + +static void bnx2x_init_sp_ring(struct bnx2x_softc *sc) +{ + atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); + sc->spq_prod_idx = 0; + sc->dsb_sp_prod = + &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; + sc->spq_prod_bd = sc->spq; + sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); +} + +static void bnx2x_init_eq_ring(struct bnx2x_softc *sc) +{ + union event_ring_elem *elem; + int i; + + for (i = 1; i <= NUM_EQ_PAGES; i++) { + elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; + + elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + + BNX2X_PAGE_SIZE * + (i % NUM_EQ_PAGES))); + elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + + BNX2X_PAGE_SIZE * + (i % NUM_EQ_PAGES))); + } + + sc->eq_cons = 0; + sc->eq_prod = NUM_EQ_DESC; + sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; + + atomic_store_rel_long(&sc->eq_spq_left, + (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), + NUM_EQ_DESC) - 1)); +} + +static void bnx2x_init_internal_common(struct bnx2x_softc *sc) +{ + int i; + + if (IS_MF_SI(sc)) { +/* + * In switch independent mode, the TSTORM needs to accept + * packets that failed classification, since approximate match + * mac addresses aren't written to NIG LLH. + */ + REG_WR8(sc, + (BAR_TSTRORM_INTMEM + + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 2); + } else + REG_WR8(sc, + (BAR_TSTRORM_INTMEM + + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 0); + + /* + * Zero this manually as its initialization is currently missing + * in the initTool. + */ + for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { + REG_WR(sc, + (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), + 0); + } + + if (!CHIP_IS_E1x(sc)) { + REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), + CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : + HC_IGU_NBC_MODE); + } +} + +static void bnx2x_init_internal(struct bnx2x_softc *sc, uint32_t load_code) +{ + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_COMMON: + case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: + bnx2x_init_internal_common(sc); + /* no break */ + + case FW_MSG_CODE_DRV_LOAD_PORT: + /* nothing to do */ + /* no break */ + + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + /* internal memory per function is initialized inside bnx2x_pf_init */ + break; + + default: + PMD_DRV_LOG(NOTICE, "Unknown load_code (0x%x) from MCP", + load_code); + break; + } +} + +static void +storm_memset_func_cfg(struct bnx2x_softc *sc, + struct tstorm_eth_function_common_config *tcfg, + uint16_t abs_fid) +{ + uint32_t addr; + size_t size; + + addr = (BAR_TSTRORM_INTMEM + + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); + size = sizeof(struct tstorm_eth_function_common_config); + ecore_storm_memset_struct(sc, addr, size, (uint32_t *) tcfg); +} + +static void bnx2x_func_init(struct bnx2x_softc *sc, struct bnx2x_func_init_params *p) +{ + struct tstorm_eth_function_common_config tcfg = { 0 }; + + if (CHIP_IS_E1x(sc)) { + storm_memset_func_cfg(sc, &tcfg, p->func_id); + } + + /* Enable the function in the FW */ + storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); + storm_memset_func_en(sc, p->func_id, 1); + + /* spq */ + if (p->func_flgs & FUNC_FLG_SPQ) { + storm_memset_spq_addr(sc, p->spq_map, p->func_id); + REG_WR(sc, + (XSEM_REG_FAST_MEMORY + + XSTORM_SPQ_PROD_OFFSET(p->func_id)), p->spq_prod); + } +} + +/* + * Calculates the sum of vn_min_rates. + * It's needed for further normalizing of the min_rates. + * Returns: + * sum of vn_min_rates. + * or + * 0 - if all the min_rates are 0. + * In the later case fainess algorithm should be deactivated. + * If all min rates are not zero then those that are zeroes will be set to 1. + */ +static void bnx2x_calc_vn_min(struct bnx2x_softc *sc, struct cmng_init_input *input) +{ + uint32_t vn_cfg; + uint32_t vn_min_rate; + int all_zero = 1; + int vn; + + for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { + vn_cfg = sc->devinfo.mf_info.mf_config[vn]; + vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT) * 100); + + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { + /* skip hidden VNs */ + vn_min_rate = 0; + } else if (!vn_min_rate) { + /* If min rate is zero - set it to 100 */ + vn_min_rate = DEF_MIN_RATE; + } else { + all_zero = 0; + } + + input->vnic_min_rate[vn] = vn_min_rate; + } + + /* if ETS or all min rates are zeros - disable fairness */ + if (all_zero) { + input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; + } else { + input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; + } +} + +static uint16_t +bnx2x_extract_max_cfg(__rte_unused struct bnx2x_softc *sc, uint32_t mf_cfg) +{ + uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT); + + if (!max_cfg) { + PMD_DRV_LOG(DEBUG, + "Max BW configured to 0 - using 100 instead"); + max_cfg = 100; + } + + return max_cfg; +} + +static void +bnx2x_calc_vn_max(struct bnx2x_softc *sc, int vn, struct cmng_init_input *input) +{ + uint16_t vn_max_rate; + uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; + uint32_t max_cfg; + + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { + vn_max_rate = 0; + } else { + max_cfg = bnx2x_extract_max_cfg(sc, vn_cfg); + + if (IS_MF_SI(sc)) { + /* max_cfg in percents of linkspeed */ + vn_max_rate = + ((sc->link_vars.line_speed * max_cfg) / 100); + } else { /* SD modes */ + /* max_cfg is absolute in 100Mb units */ + vn_max_rate = (max_cfg * 100); + } + } + + input->vnic_max_rate[vn] = vn_max_rate; +} + +static void +bnx2x_cmng_fns_init(struct bnx2x_softc *sc, uint8_t read_cfg, uint8_t cmng_type) +{ + struct cmng_init_input input; + int vn; + + memset(&input, 0, sizeof(struct cmng_init_input)); + + input.port_rate = sc->link_vars.line_speed; + + if (cmng_type == CMNG_FNS_MINMAX) { +/* read mf conf from shmem */ + if (read_cfg) { + bnx2x_read_mf_cfg(sc); + } + +/* get VN min rate and enable fairness if not 0 */ + bnx2x_calc_vn_min(sc, &input); + +/* get VN max rate */ + if (sc->port.pmf) { + for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { + bnx2x_calc_vn_max(sc, vn, &input); + } + } + +/* always enable rate shaping and fairness */ + input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; + + ecore_init_cmng(&input, &sc->cmng); + return; + } +} + +static int bnx2x_get_cmng_fns_mode(struct bnx2x_softc *sc) +{ + if (CHIP_REV_IS_SLOW(sc)) { + return CMNG_FNS_NONE; + } + + if (IS_MF(sc)) { + return CMNG_FNS_MINMAX; + } + + return CMNG_FNS_NONE; +} + +static void +storm_memset_cmng(struct bnx2x_softc *sc, struct cmng_init *cmng, uint8_t port) +{ + int vn; + int func; + uint32_t addr; + size_t size; + + addr = (BAR_XSTRORM_INTMEM + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); + size = sizeof(struct cmng_struct_per_port); + ecore_storm_memset_struct(sc, addr, size, (uint32_t *) & cmng->port); + + for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { + func = func_by_vn(sc, vn); + + addr = (BAR_XSTRORM_INTMEM + + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); + size = sizeof(struct rate_shaping_vars_per_vn); + ecore_storm_memset_struct(sc, addr, size, + (uint32_t *) & cmng-> + vnic.vnic_max_rate[vn]); + + addr = (BAR_XSTRORM_INTMEM + + XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); + size = sizeof(struct fairness_vars_per_vn); + ecore_storm_memset_struct(sc, addr, size, + (uint32_t *) & cmng-> + vnic.vnic_min_rate[vn]); + } +} + +static void bnx2x_pf_init(struct bnx2x_softc *sc) +{ + struct bnx2x_func_init_params func_init; + struct event_ring_data eq_data; + uint16_t flags; + + memset(&eq_data, 0, sizeof(struct event_ring_data)); + memset(&func_init, 0, sizeof(struct bnx2x_func_init_params)); + + if (!CHIP_IS_E1x(sc)) { +/* reset IGU PF statistics: MSIX + ATTN */ +/* PF */ + REG_WR(sc, + (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + + (BNX2X_IGU_STAS_MSG_VF_CNT * 4) + + ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * + 4)), 0); +/* ATTN */ + REG_WR(sc, + (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + + (BNX2X_IGU_STAS_MSG_VF_CNT * 4) + + (BNX2X_IGU_STAS_MSG_PF_CNT * 4) + + ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * + 4)), 0); + } + + /* function setup flags */ + flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); + + func_init.func_flgs = flags; + func_init.pf_id = SC_FUNC(sc); + func_init.func_id = SC_FUNC(sc); + func_init.spq_map = sc->spq_dma.paddr; + func_init.spq_prod = sc->spq_prod_idx; + + bnx2x_func_init(sc, &func_init); + + memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); + + /* + * Congestion management values depend on the link rate. + * There is no active link so initial link rate is set to 10Gbps. + * When the link comes up the congestion management values are + * re-calculated according to the actual link rate. + */ + sc->link_vars.line_speed = SPEED_10000; + bnx2x_cmng_fns_init(sc, TRUE, bnx2x_get_cmng_fns_mode(sc)); + + /* Only the PMF sets the HW */ + if (sc->port.pmf) { + storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); + } + + /* init Event Queue - PCI bus guarantees correct endainity */ + eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); + eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); + eq_data.producer = sc->eq_prod; + eq_data.index_id = HC_SP_INDEX_EQ_CONS; + eq_data.sb_id = DEF_SB_ID; + storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); +} + +static void bnx2x_hc_int_enable(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; + uint32_t val = REG_RD(sc, addr); + uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) + || (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); + uint8_t single_msix = (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); + uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI); + + if (msix) { + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0); + val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + if (single_msix) { + val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; + } + } else if (msi) { + val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; + val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + } else { + val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + + REG_WR(sc, addr, val); + + val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; + } + + REG_WR(sc, addr, val); + + /* ensure that HC_CONFIG is written before leading/trailing edge config */ + mb(); + + /* init leading/trailing edge */ + if (IS_MF(sc)) { + val = (0xee0f | (1 << (SC_VN(sc) + 4))); + if (sc->port.pmf) { + /* enable nig and gpio3 attention */ + val |= 0x1100; + } + } else { + val = 0xffff; + } + + REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port * 8), val); + REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port * 8), val); + + /* make sure that interrupts are indeed enabled from here on */ + mb(); +} + +static void bnx2x_igu_int_enable(struct bnx2x_softc *sc) +{ + uint32_t val; + uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) + || (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); + uint8_t single_msix = (sc->interrupt_mode == INTR_MODE_SINGLE_MSIX); + uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI); + + val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); + + if (msix) { + val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN); + val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN); + if (single_msix) { + val |= IGU_PF_CONF_SINGLE_ISR_EN; + } + } else if (msi) { + val &= ~IGU_PF_CONF_INT_LINE_EN; + val |= (IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); + } else { + val &= ~IGU_PF_CONF_MSI_MSIX_EN; + val |= (IGU_PF_CONF_INT_LINE_EN | + IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); + } + + /* clean previous status - need to configure igu prior to ack */ + if ((!msix) || single_msix) { + REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); + bnx2x_ack_int(sc); + } + + val |= IGU_PF_CONF_FUNC_EN; + + PMD_DRV_LOG(DEBUG, "write 0x%x to IGU mode %s", + val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); + + REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); + + mb(); + + /* init leading/trailing edge */ + if (IS_MF(sc)) { + val = (0xee0f | (1 << (SC_VN(sc) + 4))); + if (sc->port.pmf) { + /* enable nig and gpio3 attention */ + val |= 0x1100; + } + } else { + val = 0xffff; + } + + REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); + REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); + + /* make sure that interrupts are indeed enabled from here on */ + mb(); +} + +static void bnx2x_int_enable(struct bnx2x_softc *sc) +{ + if (sc->devinfo.int_block == INT_BLOCK_HC) { + bnx2x_hc_int_enable(sc); + } else { + bnx2x_igu_int_enable(sc); + } +} + +static void bnx2x_hc_int_disable(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; + uint32_t val = REG_RD(sc, addr); + + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); + /* flush all outstanding writes */ + mb(); + + REG_WR(sc, addr, val); + if (REG_RD(sc, addr) != val) { + PMD_DRV_LOG(ERR, "proper val not read from HC IGU!"); + } +} + +static void bnx2x_igu_int_disable(struct bnx2x_softc *sc) +{ + uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); + + val &= ~(IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN); + + PMD_DRV_LOG(DEBUG, "write %x to IGU", val); + + /* flush all outstanding writes */ + mb(); + + REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); + if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { + PMD_DRV_LOG(ERR, "proper val not read from IGU!"); + } +} + +static void bnx2x_int_disable(struct bnx2x_softc *sc) +{ + if (sc->devinfo.int_block == INT_BLOCK_HC) { + bnx2x_hc_int_disable(sc); + } else { + bnx2x_igu_int_disable(sc); + } +} + +static void bnx2x_nic_init(struct bnx2x_softc *sc, int load_code) +{ + int i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < sc->num_queues; i++) { + bnx2x_init_eth_fp(sc, i); + } + + rmb(); /* ensure status block indices were read */ + + bnx2x_init_rx_rings(sc); + bnx2x_init_tx_rings(sc); + + if (IS_VF(sc)) { + bnx2x_memset_stats(sc); + return; + } + + /* initialize MOD_ABS interrupts */ + elink_init_mod_abs_int(sc, &sc->link_vars, + sc->devinfo.chip_id, + sc->devinfo.shmem_base, + sc->devinfo.shmem2_base, SC_PORT(sc)); + + bnx2x_init_def_sb(sc); + bnx2x_update_dsb_idx(sc); + bnx2x_init_sp_ring(sc); + bnx2x_init_eq_ring(sc); + bnx2x_init_internal(sc, load_code); + bnx2x_pf_init(sc); + bnx2x_stats_init(sc); + + /* flush all before enabling interrupts */ + mb(); + + bnx2x_int_enable(sc); + + /* check for SPIO5 */ + bnx2x_attn_int_deasserted0(sc, + REG_RD(sc, + (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + + SC_PORT(sc) * 4)) & + AEU_INPUTS_ATTN_BITS_SPIO5); +} + +static void bnx2x_init_objs(struct bnx2x_softc *sc) +{ + /* mcast rules must be added to tx if tx switching is enabled */ + ecore_obj_type o_type; + if (sc->flags & BNX2X_TX_SWITCHING) + o_type = ECORE_OBJ_TYPE_RX_TX; + else + o_type = ECORE_OBJ_TYPE_RX; + + /* RX_MODE controlling object */ + ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); + + /* multicast configuration controlling object */ + ecore_init_mcast_obj(sc, + &sc->mcast_obj, + sc->fp[0].cl_id, + sc->fp[0].index, + SC_FUNC(sc), + SC_FUNC(sc), + BNX2X_SP(sc, mcast_rdata), + (phys_addr_t)BNX2X_SP_MAPPING(sc, mcast_rdata), + ECORE_FILTER_MCAST_PENDING, + &sc->sp_state, o_type); + + /* Setup CAM credit pools */ + ecore_init_mac_credit_pool(sc, + &sc->macs_pool, + SC_FUNC(sc), + CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : + VNICS_PER_PATH(sc)); + + ecore_init_vlan_credit_pool(sc, + &sc->vlans_pool, + SC_ABS_FUNC(sc) >> 1, + CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : + VNICS_PER_PATH(sc)); + + /* RSS configuration object */ + ecore_init_rss_config_obj(&sc->rss_conf_obj, + sc->fp[0].cl_id, + sc->fp[0].index, + SC_FUNC(sc), + SC_FUNC(sc), + BNX2X_SP(sc, rss_rdata), + (phys_addr_t)BNX2X_SP_MAPPING(sc, rss_rdata), + ECORE_FILTER_RSS_CONF_PENDING, + &sc->sp_state, ECORE_OBJ_TYPE_RX); +} + +/* + * Initialize the function. This must be called before sending CLIENT_SETUP + * for the first client. + */ +static int bnx2x_func_start(struct bnx2x_softc *sc) +{ + struct ecore_func_state_params func_params = { NULL }; + struct ecore_func_start_params *start_params = + &func_params.params.start; + + /* Prepare parameters for function state transitions */ + bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &sc->func_obj; + func_params.cmd = ECORE_F_CMD_START; + + /* Function parameters */ + start_params->mf_mode = sc->devinfo.mf_info.mf_mode; + start_params->sd_vlan_tag = OVLAN(sc); + + if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { + start_params->network_cos_mode = STATIC_COS; + } else { /* CHIP_IS_E1X */ + start_params->network_cos_mode = FW_WRR; + } + + start_params->gre_tunnel_mode = 0; + start_params->gre_tunnel_rss = 0; + + return ecore_func_state_change(sc, &func_params); +} + +static int bnx2x_set_power_state(struct bnx2x_softc *sc, uint8_t state) +{ + uint16_t pmcsr; + + /* If there is no power capability, silently succeed */ + if (!(sc->devinfo.pcie_cap_flags & BNX2X_PM_CAPABLE_FLAG)) { + PMD_DRV_LOG(WARNING, "No power capability"); + return 0; + } + + pci_read(sc, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), &pmcsr, + 2); + + switch (state) { + case PCI_PM_D0: + pci_write_word(sc, + (sc->devinfo.pcie_pm_cap_reg + + PCIR_POWER_STATUS), + ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME)); + + if (pmcsr & PCIM_PSTAT_DMASK) { + /* delay required during transition out of D3hot */ + DELAY(20000); + } + + break; + + case PCI_PM_D3hot: + /* don't shut down the power for emulation and FPGA */ + if (CHIP_REV_IS_SLOW(sc)) { + return 0; + } + + pmcsr &= ~PCIM_PSTAT_DMASK; + pmcsr |= PCIM_PSTAT_D3; + + if (sc->wol) { + pmcsr |= PCIM_PSTAT_PMEENABLE; + } + + pci_write_long(sc, + (sc->devinfo.pcie_pm_cap_reg + + PCIR_POWER_STATUS), pmcsr); + + /* + * No more memory access after this point until device is brought back + * to D0 state. + */ + break; + + default: + PMD_DRV_LOG(NOTICE, "Can't support PCI power state = %d", + state); + return -1; + } + + return 0; +} + +/* return true if succeeded to acquire the lock */ +static uint8_t bnx2x_trylock_hw_lock(struct bnx2x_softc *sc, uint32_t resource) +{ + uint32_t lock_status; + uint32_t resource_bit = (1 << resource); + int func = SC_FUNC(sc); + uint32_t hw_lock_control_reg; + + /* Validating that the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + PMD_DRV_LOG(INFO, + "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)", + resource, HW_LOCK_MAX_RESOURCE_VALUE); + return FALSE; + } + + if (func <= 5) { + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func * 8); + } else { + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + (func - 6) * 8); + } + + /* try to acquire the lock */ + REG_WR(sc, hw_lock_control_reg + 4, resource_bit); + lock_status = REG_RD(sc, hw_lock_control_reg); + if (lock_status & resource_bit) { + return TRUE; + } + + PMD_DRV_LOG(NOTICE, "Failed to get a resource lock 0x%x", resource); + + return FALSE; +} + +/* + * Get the recovery leader resource id according to the engine this function + * belongs to. Currently only only 2 engines is supported. + */ +static int bnx2x_get_leader_lock_resource(struct bnx2x_softc *sc) +{ + if (SC_PATH(sc)) { + return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; + } else { + return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; + } +} + +/* try to acquire a leader lock for current engine */ +static uint8_t bnx2x_trylock_leader_lock(struct bnx2x_softc *sc) +{ + return bnx2x_trylock_hw_lock(sc, bnx2x_get_leader_lock_resource(sc)); +} + +static int bnx2x_release_leader_lock(struct bnx2x_softc *sc) +{ + return bnx2x_release_hw_lock(sc, bnx2x_get_leader_lock_resource(sc)); +} + +/* close gates #2, #3 and #4 */ +static void bnx2x_set_234_gates(struct bnx2x_softc *sc, uint8_t close) +{ + uint32_t val; + + /* gates #2 and #4a are closed/opened */ + /* #4 */ + REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, ! !close); + /* #2 */ + REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, ! !close); + + /* #3 */ + if (CHIP_IS_E1x(sc)) { +/* prevent interrupts from HC on both ports */ + val = REG_RD(sc, HC_REG_CONFIG_1); + if (close) + REG_WR(sc, HC_REG_CONFIG_1, (val & ~(uint32_t) + HC_CONFIG_1_REG_BLOCK_DISABLE_1)); + else + REG_WR(sc, HC_REG_CONFIG_1, + (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1)); + + val = REG_RD(sc, HC_REG_CONFIG_0); + if (close) + REG_WR(sc, HC_REG_CONFIG_0, (val & ~(uint32_t) + HC_CONFIG_0_REG_BLOCK_DISABLE_0)); + else + REG_WR(sc, HC_REG_CONFIG_0, + (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0)); + + } else { +/* Prevent incomming interrupts in IGU */ + val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); + + if (close) + REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, + (val & ~(uint32_t) + IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); + else + REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, + (val | + IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); + } + + wmb(); +} + +/* poll for pending writes bit, it should get cleared in no more than 1s */ +static int bnx2x_er_poll_igu_vq(struct bnx2x_softc *sc) +{ + uint32_t cnt = 1000; + uint32_t pend_bits = 0; + + do { + pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); + + if (pend_bits == 0) { + break; + } + + DELAY(1000); + } while (cnt-- > 0); + + if (cnt <= 0) { + PMD_DRV_LOG(NOTICE, "Still pending IGU requests bits=0x%08x!", + pend_bits); + return -1; + } + + return 0; +} + +#define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ + +static void bnx2x_clp_reset_prep(struct bnx2x_softc *sc, uint32_t * magic_val) +{ + /* Do some magic... */ + uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); + *magic_val = val & SHARED_MF_CLP_MAGIC; + MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); +} + +/* restore the value of the 'magic' bit */ +static void bnx2x_clp_reset_done(struct bnx2x_softc *sc, uint32_t magic_val) +{ + /* Restore the 'magic' bit value... */ + uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); + MFCFG_WR(sc, shared_mf_config.clp_mb, + (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); +} + +/* prepare for MCP reset, takes care of CLP configurations */ +static void bnx2x_reset_mcp_prep(struct bnx2x_softc *sc, uint32_t * magic_val) +{ + uint32_t shmem; + uint32_t validity_offset; + + /* set `magic' bit in order to save MF config */ + bnx2x_clp_reset_prep(sc, magic_val); + + /* get shmem offset */ + shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); + validity_offset = + offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); + + /* Clear validity map flags */ + if (shmem > 0) { + REG_WR(sc, shmem + validity_offset, 0); + } +} + +#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ +#define MCP_ONE_TIMEOUT 100 /* 100 ms */ + +static void bnx2x_mcp_wait_one(struct bnx2x_softc *sc) +{ + /* special handling for emulation and FPGA (10 times longer) */ + if (CHIP_REV_IS_SLOW(sc)) { + DELAY((MCP_ONE_TIMEOUT * 10) * 1000); + } else { + DELAY((MCP_ONE_TIMEOUT) * 1000); + } +} + +/* initialize shmem_base and waits for validity signature to appear */ +static int bnx2x_init_shmem(struct bnx2x_softc *sc) +{ + int cnt = 0; + uint32_t val = 0; + + do { + sc->devinfo.shmem_base = + sc->link_params.shmem_base = + REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); + + if (sc->devinfo.shmem_base) { + val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); + if (val & SHR_MEM_VALIDITY_MB) + return 0; + } + + bnx2x_mcp_wait_one(sc); + + } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); + + PMD_DRV_LOG(NOTICE, "BAD MCP validity signature"); + + return -1; +} + +static int bnx2x_reset_mcp_comp(struct bnx2x_softc *sc, uint32_t magic_val) +{ + int rc = bnx2x_init_shmem(sc); + + /* Restore the `magic' bit value */ + bnx2x_clp_reset_done(sc, magic_val); + + return rc; +} + +static void bnx2x_pxp_prep(struct bnx2x_softc *sc) +{ + REG_WR(sc, PXP2_REG_RD_START_INIT, 0); + REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); + wmb(); +} + +/* + * Reset the whole chip except for: + * - PCIE core + * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) + * - IGU + * - MISC (including AEU) + * - GRC + * - RBCN, RBCP + */ +static void bnx2x_process_kill_chip_reset(struct bnx2x_softc *sc, uint8_t global) +{ + uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; + uint32_t global_bits2, stay_reset2; + + /* + * Bits that have to be set in reset_mask2 if we want to reset 'global' + * (per chip) blocks. + */ + global_bits2 = + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; + + /* + * Don't reset the following blocks. + * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be + * reset, as in 4 port device they might still be owned + * by the MCP (there is only one leader per path). + */ + not_reset_mask1 = + MISC_REGISTERS_RESET_REG_1_RST_HC | + MISC_REGISTERS_RESET_REG_1_RST_PXPV | + MISC_REGISTERS_RESET_REG_1_RST_PXP; + + not_reset_mask2 = + MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | + MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | + MISC_REGISTERS_RESET_REG_2_RST_RBCN | + MISC_REGISTERS_RESET_REG_2_RST_GRC | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | + MISC_REGISTERS_RESET_REG_2_RST_ATC | + MISC_REGISTERS_RESET_REG_2_PGLC | + MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | + MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | + MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | + MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | + MISC_REGISTERS_RESET_REG_2_UMAC0 | MISC_REGISTERS_RESET_REG_2_UMAC1; + + /* + * Keep the following blocks in reset: + * - all xxMACs are handled by the elink code. + */ + stay_reset2 = + MISC_REGISTERS_RESET_REG_2_XMAC | + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; + + /* Full reset masks according to the chip */ + reset_mask1 = 0xffffffff; + + if (CHIP_IS_E1H(sc)) + reset_mask2 = 0x1ffff; + else if (CHIP_IS_E2(sc)) + reset_mask2 = 0xfffff; + else /* CHIP_IS_E3 */ + reset_mask2 = 0x3ffffff; + + /* Don't reset global blocks unless we need to */ + if (!global) + reset_mask2 &= ~global_bits2; + + /* + * In case of attention in the QM, we need to reset PXP + * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM + * because otherwise QM reset would release 'close the gates' shortly + * before resetting the PXP, then the PSWRQ would send a write + * request to PGLUE. Then when PXP is reset, PGLUE would try to + * read the payload data from PSWWR, but PSWWR would not + * respond. The write queue in PGLUE would stuck, dmae commands + * would not return. Therefore it's important to reset the second + * reset register (containing the + * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the + * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM + * bit). + */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + reset_mask2 & (~not_reset_mask2)); + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + reset_mask1 & (~not_reset_mask1)); + + mb(); + wmb(); + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + reset_mask2 & (~stay_reset2)); + + mb(); + wmb(); + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); + wmb(); +} + +static int bnx2x_process_kill(struct bnx2x_softc *sc, uint8_t global) +{ + int cnt = 1000; + uint32_t val = 0; + uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; + uint32_t tags_63_32 = 0; + + /* Empty the Tetris buffer, wait for 1s */ + do { + sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); + blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); + port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); + port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); + pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); + if (CHIP_IS_E3(sc)) { + tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); + } + + if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && + ((port_is_idle_0 & 0x1) == 0x1) && + ((port_is_idle_1 & 0x1) == 0x1) && + (pgl_exp_rom2 == 0xffffffff) && + (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) + break; + DELAY(1000); + } while (cnt-- > 0); + + if (cnt <= 0) { + PMD_DRV_LOG(NOTICE, + "ERROR: Tetris buffer didn't get empty or there " + "are still outstanding read requests after 1s! " + "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " + "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x", + sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, + pgl_exp_rom2); + return -1; + } + + mb(); + + /* Close gates #2, #3 and #4 */ + bnx2x_set_234_gates(sc, TRUE); + + /* Poll for IGU VQs for 57712 and newer chips */ + if (!CHIP_IS_E1x(sc) && bnx2x_er_poll_igu_vq(sc)) { + return -1; + } + + /* clear "unprepared" bit */ + REG_WR(sc, MISC_REG_UNPREPARED, 0); + mb(); + + /* Make sure all is written to the chip before the reset */ + wmb(); + + /* + * Wait for 1ms to empty GLUE and PCI-E core queues, + * PSWHST, GRC and PSWRD Tetris buffer. + */ + DELAY(1000); + + /* Prepare to chip reset: */ + /* MCP */ + if (global) { + bnx2x_reset_mcp_prep(sc, &val); + } + + /* PXP */ + bnx2x_pxp_prep(sc); + mb(); + + /* reset the chip */ + bnx2x_process_kill_chip_reset(sc, global); + mb(); + + /* Recover after reset: */ + /* MCP */ + if (global && bnx2x_reset_mcp_comp(sc, val)) { + return -1; + } + + /* Open the gates #2, #3 and #4 */ + bnx2x_set_234_gates(sc, FALSE); + + return 0; +} + +static int bnx2x_leader_reset(struct bnx2x_softc *sc) +{ + int rc = 0; + uint8_t global = bnx2x_reset_is_global(sc); + uint32_t load_code; + + /* + * If not going to reset MCP, load "fake" driver to reset HW while + * driver is owner of the HW. + */ + if (!global && !BNX2X_NOMCP(sc)) { + load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, + DRV_MSG_CODE_LOAD_REQ_WITH_LFA); + if (!load_code) { + PMD_DRV_LOG(NOTICE, "MCP response failure, aborting"); + rc = -1; + goto exit_leader_reset; + } + + if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && + (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { + PMD_DRV_LOG(NOTICE, + "MCP unexpected response, aborting"); + rc = -1; + goto exit_leader_reset2; + } + + load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); + if (!load_code) { + PMD_DRV_LOG(NOTICE, "MCP response failure, aborting"); + rc = -1; + goto exit_leader_reset2; + } + } + + /* try to recover after the failure */ + if (bnx2x_process_kill(sc, global)) { + PMD_DRV_LOG(NOTICE, "Something bad occurred on engine %d!", + SC_PATH(sc)); + rc = -1; + goto exit_leader_reset2; + } + + /* + * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver + * state. + */ + bnx2x_set_reset_done(sc); + if (global) { + bnx2x_clear_reset_global(sc); + } + +exit_leader_reset2: + + /* unload "fake driver" if it was loaded */ + if (!global &&!BNX2X_NOMCP(sc)) { + bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); + bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); + } + +exit_leader_reset: + + sc->is_leader = 0; + bnx2x_release_leader_lock(sc); + + mb(); + return rc; +} + +/* + * prepare INIT transition, parameters configured: + * - HC configuration + * - Queue's CDU context + */ +static void +bnx2x_pf_q_prep_init(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + struct ecore_queue_init_params *init_params) +{ + uint8_t cos; + int cxt_index, cxt_offset; + + bnx2x_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); + bnx2x_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); + + bnx2x_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); + bnx2x_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); + + /* HC rate */ + init_params->rx.hc_rate = + sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; + init_params->tx.hc_rate = + sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; + + /* FW SB ID */ + init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; + + /* CQ index among the SB indices */ + init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; + init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; + + /* set maximum number of COSs supported by this queue */ + init_params->max_cos = sc->max_cos; + + /* set the context pointers queue object */ + for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { + cxt_index = fp->index / ILT_PAGE_CIDS; + cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); + init_params->cxts[cos] = + &sc->context[cxt_index].vcxt[cxt_offset].eth; + } +} + +/* set flags that are common for the Tx-only and not normal connections */ +static unsigned long +bnx2x_get_common_flags(struct bnx2x_softc *sc, uint8_t zero_stats) +{ + unsigned long flags = 0; + + /* PF driver will always initialize the Queue to an ACTIVE state */ + bnx2x_set_bit(ECORE_Q_FLG_ACTIVE, &flags); + + /* + * tx only connections collect statistics (on the same index as the + * parent connection). The statistics are zeroed when the parent + * connection is initialized. + */ + + bnx2x_set_bit(ECORE_Q_FLG_STATS, &flags); + if (zero_stats) { + bnx2x_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); + } + + /* + * tx only connections can support tx-switching, though their + * CoS-ness doesn't survive the loopback + */ + if (sc->flags & BNX2X_TX_SWITCHING) { + bnx2x_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); + } + + bnx2x_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); + + return flags; +} + +static unsigned long bnx2x_get_q_flags(struct bnx2x_softc *sc, uint8_t leading) +{ + unsigned long flags = 0; + + if (IS_MF_SD(sc)) { + bnx2x_set_bit(ECORE_Q_FLG_OV, &flags); + } + + if (leading) { + bnx2x_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); + bnx2x_set_bit(ECORE_Q_FLG_MCAST, &flags); + } + + bnx2x_set_bit(ECORE_Q_FLG_VLAN, &flags); + + /* merge with common flags */ + return flags | bnx2x_get_common_flags(sc, TRUE); +} + +static void +bnx2x_pf_q_prep_general(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + struct ecore_general_setup_params *gen_init, uint8_t cos) +{ + gen_init->stat_id = bnx2x_stats_id(fp); + gen_init->spcl_id = fp->cl_id; + gen_init->mtu = sc->mtu; + gen_init->cos = cos; +} + +static void +bnx2x_pf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + struct rxq_pause_params *pause, + struct ecore_rxq_setup_params *rxq_init) +{ + struct bnx2x_rx_queue *rxq; + + rxq = sc->rx_queues[fp->index]; + if (!rxq) { + PMD_RX_LOG(ERR, "RX queue is NULL"); + return; + } + /* pause */ + pause->bd_th_lo = BD_TH_LO(sc); + pause->bd_th_hi = BD_TH_HI(sc); + + pause->rcq_th_lo = RCQ_TH_LO(sc); + pause->rcq_th_hi = RCQ_TH_HI(sc); + + /* validate rings have enough entries to cross high thresholds */ + if (sc->dropless_fc && + pause->bd_th_hi + FW_PREFETCH_CNT > sc->rx_ring_size) { + PMD_DRV_LOG(WARNING, "rx bd ring threshold limit"); + } + + if (sc->dropless_fc && + pause->rcq_th_hi + FW_PREFETCH_CNT > USABLE_RCQ_ENTRIES(rxq)) { + PMD_DRV_LOG(WARNING, "rcq ring threshold limit"); + } + + pause->pri_map = 1; + + /* rxq setup */ + rxq_init->dscr_map = (phys_addr_t)rxq->rx_ring_phys_addr; + rxq_init->rcq_map = (phys_addr_t)rxq->cq_ring_phys_addr; + rxq_init->rcq_np_map = (phys_addr_t)(rxq->cq_ring_phys_addr + + BNX2X_PAGE_SIZE); + + /* + * This should be a maximum number of data bytes that may be + * placed on the BD (not including paddings). + */ + rxq_init->buf_sz = (fp->rx_buf_size - IP_HEADER_ALIGNMENT_PADDING); + + rxq_init->cl_qzone_id = fp->cl_qzone_id; + rxq_init->rss_engine_id = SC_FUNC(sc); + rxq_init->mcast_engine_id = SC_FUNC(sc); + + rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; + rxq_init->fw_sb_id = fp->fw_sb_id; + + rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; + + /* + * configure silent vlan removal + * if multi function mode is afex, then mask default vlan + */ + if (IS_MF_AFEX(sc)) { + rxq_init->silent_removal_value = + sc->devinfo.mf_info.afex_def_vlan_tag; + rxq_init->silent_removal_mask = EVL_VLID_MASK; + } +} + +static void +bnx2x_pf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + struct ecore_txq_setup_params *txq_init, uint8_t cos) +{ + struct bnx2x_tx_queue *txq = fp->sc->tx_queues[fp->index]; + + if (!txq) { + PMD_TX_LOG(ERR, "ERROR: TX queue is NULL"); + return; + } + txq_init->dscr_map = (phys_addr_t)txq->tx_ring_phys_addr; + txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; + txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; + txq_init->fw_sb_id = fp->fw_sb_id; + + /* + * set the TSS leading client id for TX classfication to the + * leading RSS client id + */ + txq_init->tss_leading_cl_id = BNX2X_FP(sc, 0, cl_id); +} + +/* + * This function performs 2 steps in a queue state machine: + * 1) RESET->INIT + * 2) INIT->SETUP + */ +static int +bnx2x_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, uint8_t leading) +{ + struct ecore_queue_state_params q_params = { NULL }; + struct ecore_queue_setup_params *setup_params = &q_params.params.setup; + int rc; + + PMD_DRV_LOG(DEBUG, "setting up queue %d", fp->index); + + bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); + + q_params.q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj; + + /* we want to wait for completion in this context */ + bnx2x_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + + /* prepare the INIT parameters */ + bnx2x_pf_q_prep_init(sc, fp, &q_params.params.init); + + /* Set the command */ + q_params.cmd = ECORE_Q_CMD_INIT; + + /* Change the state to INIT */ + rc = ecore_queue_state_change(sc, &q_params); + if (rc) { + PMD_DRV_LOG(NOTICE, "Queue(%d) INIT failed", fp->index); + return rc; + } + + PMD_DRV_LOG(DEBUG, "init complete"); + + /* now move the Queue to the SETUP state */ + memset(setup_params, 0, sizeof(*setup_params)); + + /* set Queue flags */ + setup_params->flags = bnx2x_get_q_flags(sc, leading); + + /* set general SETUP parameters */ + bnx2x_pf_q_prep_general(sc, fp, &setup_params->gen_params, + FIRST_TX_COS_INDEX); + + bnx2x_pf_rx_q_prep(sc, fp, + &setup_params->pause_params, + &setup_params->rxq_params); + + bnx2x_pf_tx_q_prep(sc, fp, &setup_params->txq_params, FIRST_TX_COS_INDEX); + + /* Set the command */ + q_params.cmd = ECORE_Q_CMD_SETUP; + + /* change the state to SETUP */ + rc = ecore_queue_state_change(sc, &q_params); + if (rc) { + PMD_DRV_LOG(NOTICE, "Queue(%d) SETUP failed", fp->index); + return rc; + } + + return rc; +} + +static int bnx2x_setup_leading(struct bnx2x_softc *sc) +{ + if (IS_PF(sc)) + return bnx2x_setup_queue(sc, &sc->fp[0], TRUE); + else /* VF */ + return bnx2x_vf_setup_queue(sc, &sc->fp[0], TRUE); +} + +static int +bnx2x_config_rss_pf(struct bnx2x_softc *sc, struct ecore_rss_config_obj *rss_obj, + uint8_t config_hash) +{ + struct ecore_config_rss_params params = { NULL }; + uint32_t i; + + /* + * Although RSS is meaningless when there is a single HW queue we + * still need it enabled in order to have HW Rx hash generated. + */ + + params.rss_obj = rss_obj; + + bnx2x_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); + + bnx2x_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); + + /* RSS configuration */ + bnx2x_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); + bnx2x_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); + bnx2x_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); + bnx2x_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); + if (rss_obj->udp_rss_v4) { + bnx2x_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); + } + if (rss_obj->udp_rss_v6) { + bnx2x_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); + } + + /* Hash bits */ + params.rss_result_mask = MULTI_MASK; + + (void)rte_memcpy(params.ind_table, rss_obj->ind_table, + sizeof(params.ind_table)); + + if (config_hash) { +/* RSS keys */ + for (i = 0; i < sizeof(params.rss_key) / 4; i++) { + params.rss_key[i] = (uint32_t) rte_rand(); + } + + bnx2x_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); + } + + if (IS_PF(sc)) + return ecore_config_rss(sc, ¶ms); + else + return bnx2x_vf_config_rss(sc, ¶ms); +} + +static int bnx2x_config_rss_eth(struct bnx2x_softc *sc, uint8_t config_hash) +{ + return bnx2x_config_rss_pf(sc, &sc->rss_conf_obj, config_hash); +} + +static int bnx2x_init_rss_pf(struct bnx2x_softc *sc) +{ + uint8_t num_eth_queues = BNX2X_NUM_ETH_QUEUES(sc); + uint32_t i; + + /* + * Prepare the initial contents of the indirection table if + * RSS is enabled + */ + for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { + sc->rss_conf_obj.ind_table[i] = + (sc->fp->cl_id + (i % num_eth_queues)); + } + + if (sc->udp_rss) { + sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; + } + + /* + * For 57711 SEARCHER configuration (rss_keys) is + * per-port, so if explicit configuration is needed, do it only + * for a PMF. + * + * For 57712 and newer it's a per-function configuration. + */ + return bnx2x_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)); +} + +static int +bnx2x_set_mac_one(struct bnx2x_softc *sc, uint8_t * mac, + struct ecore_vlan_mac_obj *obj, uint8_t set, int mac_type, + unsigned long *ramrod_flags) +{ + struct ecore_vlan_mac_ramrod_params ramrod_param; + int rc; + + memset(&ramrod_param, 0, sizeof(ramrod_param)); + + /* fill in general parameters */ + ramrod_param.vlan_mac_obj = obj; + ramrod_param.ramrod_flags = *ramrod_flags; + + /* fill a user request section if needed */ + if (!bnx2x_test_bit(RAMROD_CONT, ramrod_flags)) { + (void)rte_memcpy(ramrod_param.user_req.u.mac.mac, mac, + ETH_ALEN); + + bnx2x_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); + +/* Set the command: ADD or DEL */ + ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : + ECORE_VLAN_MAC_DEL; + } + + rc = ecore_config_vlan_mac(sc, &ramrod_param); + + if (rc == ECORE_EXISTS) { + PMD_DRV_LOG(INFO, "Failed to schedule ADD operations (EEXIST)"); +/* do not treat adding same MAC as error */ + rc = 0; + } else if (rc < 0) { + PMD_DRV_LOG(ERR, + "%s MAC failed (%d)", (set ? "Set" : "Delete"), rc); + } + + return rc; +} + +static int bnx2x_set_eth_mac(struct bnx2x_softc *sc, uint8_t set) +{ + unsigned long ramrod_flags = 0; + + PMD_DRV_LOG(DEBUG, "Adding Ethernet MAC"); + + bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + + /* Eth MAC is set on RSS leading client (fp[0]) */ + return bnx2x_set_mac_one(sc, sc->link_params.mac_addr, + &sc->sp_objs->mac_obj, + set, ECORE_ETH_MAC, &ramrod_flags); +} + +static int bnx2x_get_cur_phy_idx(struct bnx2x_softc *sc) +{ + uint32_t sel_phy_idx = 0; + + if (sc->link_params.num_phys <= 1) { + return ELINK_INT_PHY; + } + + if (sc->link_vars.link_up) { + sel_phy_idx = ELINK_EXT_PHY1; +/* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ + if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && + (sc->link_params.phy[ELINK_EXT_PHY2].supported & + ELINK_SUPPORTED_FIBRE)) + sel_phy_idx = ELINK_EXT_PHY2; + } else { + switch (elink_phy_selection(&sc->link_params)) { + case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + sel_phy_idx = ELINK_EXT_PHY1; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + sel_phy_idx = ELINK_EXT_PHY2; + break; + } + } + + return sel_phy_idx; +} + +static int bnx2x_get_link_cfg_idx(struct bnx2x_softc *sc) +{ + uint32_t sel_phy_idx = bnx2x_get_cur_phy_idx(sc); + + /* + * The selected activated PHY is always after swapping (in case PHY + * swapping is enabled). So when swapping is enabled, we need to reverse + * the configuration + */ + + if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { + if (sel_phy_idx == ELINK_EXT_PHY1) + sel_phy_idx = ELINK_EXT_PHY2; + else if (sel_phy_idx == ELINK_EXT_PHY2) + sel_phy_idx = ELINK_EXT_PHY1; + } + + return ELINK_LINK_CONFIG_IDX(sel_phy_idx); +} + +static void bnx2x_set_requested_fc(struct bnx2x_softc *sc) +{ + /* + * Initialize link parameters structure variables + * It is recommended to turn off RX FC for jumbo frames + * for better performance + */ + if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { + sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; + } else { + sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; + } +} + +static void bnx2x_calc_fc_adv(struct bnx2x_softc *sc) +{ + uint8_t cfg_idx = bnx2x_get_link_cfg_idx(sc); + switch (sc->link_vars.ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: + default: + sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; + + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: + sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; + + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: + sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; + break; + } +} + +static uint16_t bnx2x_get_mf_speed(struct bnx2x_softc *sc) +{ + uint16_t line_speed = sc->link_vars.line_speed; + if (IS_MF(sc)) { + uint16_t maxCfg = bnx2x_extract_max_cfg(sc, + sc->devinfo. + mf_info.mf_config[SC_VN + (sc)]); + +/* calculate the current MAX line speed limit for the MF devices */ + if (IS_MF_SI(sc)) { + line_speed = (line_speed * maxCfg) / 100; + } else { /* SD mode */ + uint16_t vn_max_rate = maxCfg * 100; + + if (vn_max_rate < line_speed) { + line_speed = vn_max_rate; + } + } + } + + return line_speed; +} + +static void +bnx2x_fill_report_data(struct bnx2x_softc *sc, struct bnx2x_link_report_data *data) +{ + uint16_t line_speed = bnx2x_get_mf_speed(sc); + + memset(data, 0, sizeof(*data)); + + /* fill the report data with the effective line speed */ + data->line_speed = line_speed; + + /* Link is down */ + if (!sc->link_vars.link_up || (sc->flags & BNX2X_MF_FUNC_DIS)) { + bnx2x_set_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &data->link_report_flags); + } + + /* Full DUPLEX */ + if (sc->link_vars.duplex == DUPLEX_FULL) { + bnx2x_set_bit(BNX2X_LINK_REPORT_FULL_DUPLEX, + &data->link_report_flags); + } + + /* Rx Flow Control is ON */ + if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { + bnx2x_set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags); + } + + /* Tx Flow Control is ON */ + if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { + bnx2x_set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags); + } +} + +/* report link status to OS, should be called under phy_lock */ +static void bnx2x_link_report(struct bnx2x_softc *sc) +{ + struct bnx2x_link_report_data cur_data; + + /* reread mf_cfg */ + if (IS_PF(sc)) { + bnx2x_read_mf_cfg(sc); + } + + /* Read the current link report info */ + bnx2x_fill_report_data(sc, &cur_data); + + /* Don't report link down or exactly the same link status twice */ + if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || + (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &sc->last_reported_link.link_report_flags) && + bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &cur_data.link_report_flags))) { + return; + } + + sc->link_cnt++; + + /* report new link params and remember the state for the next time */ + (void)rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); + + if (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &cur_data.link_report_flags)) { + PMD_DRV_LOG(INFO, "NIC Link is Down"); + } else { + __rte_unused const char *duplex; + __rte_unused const char *flow; + + if (bnx2x_test_and_clear_bit(BNX2X_LINK_REPORT_FULL_DUPLEX, + &cur_data.link_report_flags)) { + duplex = "full"; + } else { + duplex = "half"; + } + +/* + * Handle the FC at the end so that only these flags would be + * possibly set. This way we may easily check if there is no FC + * enabled. + */ + if (cur_data.link_report_flags) { + if (bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON, + &cur_data.link_report_flags) && + bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON, + &cur_data.link_report_flags)) { + flow = "ON - receive & transmit"; + } else if (bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON, + &cur_data.link_report_flags) && + !bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON, + &cur_data.link_report_flags)) { + flow = "ON - receive"; + } else if (!bnx2x_test_bit(BNX2X_LINK_REPORT_RX_FC_ON, + &cur_data.link_report_flags) && + bnx2x_test_bit(BNX2X_LINK_REPORT_TX_FC_ON, + &cur_data.link_report_flags)) { + flow = "ON - transmit"; + } else { + flow = "none"; /* possible? */ + } + } else { + flow = "none"; + } + + PMD_DRV_LOG(INFO, + "NIC Link is Up, %d Mbps %s duplex, Flow control: %s", + cur_data.line_speed, duplex, flow); + } +} + +void bnx2x_link_status_update(struct bnx2x_softc *sc) +{ + if (sc->state != BNX2X_STATE_OPEN) { + return; + } + + if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { + elink_link_status_update(&sc->link_params, &sc->link_vars); + } else { + sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_2500baseX_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause); + sc->port.advertising[0] = sc->port.supported[0]; + + sc->link_params.sc = sc; + sc->link_params.port = SC_PORT(sc); + sc->link_params.req_duplex[0] = DUPLEX_FULL; + sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; + sc->link_params.req_line_speed[0] = SPEED_10000; + sc->link_params.speed_cap_mask[0] = 0x7f0000; + sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; + + if (CHIP_REV_IS_FPGA(sc)) { + sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; + sc->link_vars.line_speed = ELINK_SPEED_1000; + sc->link_vars.link_status = (LINK_STATUS_LINK_UP | + LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); + } else { + sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; + sc->link_vars.line_speed = ELINK_SPEED_10000; + sc->link_vars.link_status = (LINK_STATUS_LINK_UP | + LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); + } + + sc->link_vars.link_up = 1; + + sc->link_vars.duplex = DUPLEX_FULL; + sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; + + if (IS_PF(sc)) { + REG_WR(sc, + NIG_REG_EGRESS_DRAIN0_MODE + + sc->link_params.port * 4, 0); + bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); + bnx2x_link_report(sc); + } + } + + if (IS_PF(sc)) { + if (sc->link_vars.link_up) { + bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); + } else { + bnx2x_stats_handle(sc, STATS_EVENT_STOP); + } + bnx2x_link_report(sc); + } else { + bnx2x_link_report(sc); + bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); + } +} + +static void bnx2x_periodic_start(struct bnx2x_softc *sc) +{ + atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); +} + +static void bnx2x_periodic_stop(struct bnx2x_softc *sc) +{ + atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); +} + +static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode) +{ + int rc, cfg_idx = bnx2x_get_link_cfg_idx(sc); + uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; + struct elink_params *lp = &sc->link_params; + + bnx2x_set_requested_fc(sc); + + if (CHIP_REV_IS_SLOW(sc)) { + uint32_t bond = CHIP_BOND_ID(sc); + uint32_t feat = 0; + + if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { + feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; + } else if (bond & 0x4) { + if (CHIP_IS_E3(sc)) { + feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; + } else { + feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; + } + } else if (bond & 0x8) { + if (CHIP_IS_E3(sc)) { + feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; + } else { + feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; + } + } + +/* disable EMAC for E3 and above */ + if (bond & 0x2) { + feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; + } + + sc->link_params.feature_config_flags |= feat; + } + + if (load_mode == LOAD_DIAG) { + lp->loopback_mode = ELINK_LOOPBACK_XGXS; +/* Prefer doing PHY loopback at 10G speed, if possible */ + if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { + if (lp->speed_cap_mask[cfg_idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { + lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; + } else { + lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; + } + } + } + + if (load_mode == LOAD_LOOPBACK_EXT) { + lp->loopback_mode = ELINK_LOOPBACK_EXT; + } + + rc = elink_phy_init(&sc->link_params, &sc->link_vars); + + bnx2x_calc_fc_adv(sc); + + if (sc->link_vars.link_up) { + bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP); + bnx2x_link_report(sc); + } + + if (!CHIP_REV_IS_SLOW(sc)) { + bnx2x_periodic_start(sc); + } + + sc->link_params.req_line_speed[cfg_idx] = req_line_speed; + return rc; +} + +/* update flags in shmem */ +static void +bnx2x_update_drv_flags(struct bnx2x_softc *sc, uint32_t flags, uint32_t set) +{ + uint32_t drv_flags; + + if (SHMEM2_HAS(sc, drv_flags)) { + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); + drv_flags = SHMEM2_RD(sc, drv_flags); + + if (set) { + drv_flags |= flags; + } else { + drv_flags &= ~flags; + } + + SHMEM2_WR(sc, drv_flags, drv_flags); + + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); + } +} + +/* periodic timer callout routine, only runs when the interface is up */ +void bnx2x_periodic_callout(struct bnx2x_softc *sc) +{ + if ((sc->state != BNX2X_STATE_OPEN) || + (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { + PMD_DRV_LOG(WARNING, "periodic callout exit (state=0x%x)", + sc->state); + return; + } + if (!CHIP_REV_IS_SLOW(sc)) { +/* + * This barrier is needed to ensure the ordering between the writing + * to the sc->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and + * the reading here. + */ + mb(); + if (sc->port.pmf) { + elink_period_func(&sc->link_params, &sc->link_vars); + } + } +#ifdef BNX2X_PULSE + if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { + int mb_idx = SC_FW_MB_IDX(sc); + uint32_t drv_pulse; + uint32_t mcp_pulse; + + ++sc->fw_drv_pulse_wr_seq; + sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; + + drv_pulse = sc->fw_drv_pulse_wr_seq; + bnx2x_drv_pulse(sc); + + mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & + MCP_PULSE_SEQ_MASK); + +/* + * The delta between driver pulse and mcp response should + * be 1 (before mcp response) or 0 (after mcp response). + */ + if ((drv_pulse != mcp_pulse) && + (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { + /* someone lost a heartbeat... */ + PMD_DRV_LOG(ERR, + "drv_pulse (0x%x) != mcp_pulse (0x%x)", + drv_pulse, mcp_pulse); + } + } +#endif +} + +/* start the controller */ +static __attribute__ ((noinline)) +int bnx2x_nic_load(struct bnx2x_softc *sc) +{ + uint32_t val; + uint32_t load_code = 0; + int i, rc = 0; + + PMD_INIT_FUNC_TRACE(); + + sc->state = BNX2X_STATE_OPENING_WAITING_LOAD; + + if (IS_PF(sc)) { +/* must be called before memory allocation and HW init */ + bnx2x_ilt_set_info(sc); + } + + bnx2x_set_fp_rx_buf_size(sc); + + if (IS_PF(sc)) { + if (bnx2x_alloc_mem(sc) != 0) { + sc->state = BNX2X_STATE_CLOSED; + rc = -ENOMEM; + goto bnx2x_nic_load_error0; + } + } + + if (bnx2x_alloc_fw_stats_mem(sc) != 0) { + sc->state = BNX2X_STATE_CLOSED; + rc = -ENOMEM; + goto bnx2x_nic_load_error0; + } + + if (IS_VF(sc)) { + rc = bnx2x_vf_init(sc); + if (rc) { + sc->state = BNX2X_STATE_ERROR; + goto bnx2x_nic_load_error0; + } + } + + if (IS_PF(sc)) { +/* set pf load just before approaching the MCP */ + bnx2x_set_pf_load(sc); + +/* if MCP exists send load request and analyze response */ + if (!BNX2X_NOMCP(sc)) { + /* attempt to load pf */ + if (bnx2x_nic_load_request(sc, &load_code) != 0) { + sc->state = BNX2X_STATE_CLOSED; + rc = -ENXIO; + goto bnx2x_nic_load_error1; + } + + /* what did the MCP say? */ + if (bnx2x_nic_load_analyze_req(sc, load_code) != 0) { + bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); + sc->state = BNX2X_STATE_CLOSED; + rc = -ENXIO; + goto bnx2x_nic_load_error2; + } + } else { + PMD_DRV_LOG(INFO, "Device has no MCP!"); + load_code = bnx2x_nic_load_no_mcp(sc); + } + +/* mark PMF if applicable */ + bnx2x_nic_load_pmf(sc, load_code); + +/* Init Function state controlling object */ + bnx2x_init_func_obj(sc); + +/* Initialize HW */ + if (bnx2x_init_hw(sc, load_code) != 0) { + PMD_DRV_LOG(NOTICE, "HW init failed"); + bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); + sc->state = BNX2X_STATE_CLOSED; + rc = -ENXIO; + goto bnx2x_nic_load_error2; + } + } + + bnx2x_nic_init(sc, load_code); + + /* Init per-function objects */ + if (IS_PF(sc)) { + bnx2x_init_objs(sc); + +/* set AFEX default VLAN tag to an invalid value */ + sc->devinfo.mf_info.afex_def_vlan_tag = -1; + + sc->state = BNX2X_STATE_OPENING_WAITING_PORT; + rc = bnx2x_func_start(sc); + if (rc) { + PMD_DRV_LOG(NOTICE, "Function start failed!"); + bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); + sc->state = BNX2X_STATE_ERROR; + goto bnx2x_nic_load_error3; + } + +/* send LOAD_DONE command to MCP */ + if (!BNX2X_NOMCP(sc)) { + load_code = + bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); + if (!load_code) { + PMD_DRV_LOG(NOTICE, + "MCP response failure, aborting"); + sc->state = BNX2X_STATE_ERROR; + rc = -ENXIO; + goto bnx2x_nic_load_error3; + } + } + } + + rc = bnx2x_setup_leading(sc); + if (rc) { + PMD_DRV_LOG(NOTICE, "Setup leading failed!"); + sc->state = BNX2X_STATE_ERROR; + goto bnx2x_nic_load_error3; + } + + FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { + if (IS_PF(sc)) + rc = bnx2x_setup_queue(sc, &sc->fp[i], FALSE); + else /* IS_VF(sc) */ + rc = bnx2x_vf_setup_queue(sc, &sc->fp[i], FALSE); + + if (rc) { + PMD_DRV_LOG(NOTICE, "Queue(%d) setup failed", i); + sc->state = BNX2X_STATE_ERROR; + goto bnx2x_nic_load_error3; + } + } + + rc = bnx2x_init_rss_pf(sc); + if (rc) { + PMD_DRV_LOG(NOTICE, "PF RSS init failed"); + sc->state = BNX2X_STATE_ERROR; + goto bnx2x_nic_load_error3; + } + + /* now when Clients are configured we are ready to work */ + sc->state = BNX2X_STATE_OPEN; + + /* Configure a ucast MAC */ + if (IS_PF(sc)) { + rc = bnx2x_set_eth_mac(sc, TRUE); + } else { /* IS_VF(sc) */ + rc = bnx2x_vf_set_mac(sc, TRUE); + } + + if (rc) { + PMD_DRV_LOG(NOTICE, "Setting Ethernet MAC failed"); + sc->state = BNX2X_STATE_ERROR; + goto bnx2x_nic_load_error3; + } + + if (sc->port.pmf) { + rc = bnx2x_initial_phy_init(sc, LOAD_OPEN); + if (rc) { + sc->state = BNX2X_STATE_ERROR; + goto bnx2x_nic_load_error3; + } + } + + sc->link_params.feature_config_flags &= + ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; + + /* start the Tx */ + switch (LOAD_OPEN) { + case LOAD_NORMAL: + case LOAD_OPEN: + break; + + case LOAD_DIAG: + case LOAD_LOOPBACK_EXT: + sc->state = BNX2X_STATE_DIAG; + break; + + default: + break; + } + + if (sc->port.pmf) { + bnx2x_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); + } else { + bnx2x_link_status_update(sc); + } + + if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { +/* mark driver is loaded in shmem2 */ + val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); + SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], + (val | + DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | + DRV_FLAGS_CAPABILITIES_LOADED_L2)); + } + + /* start fast path */ + /* Initialize Rx filter */ + bnx2x_set_rx_mode(sc); + + /* wait for all pending SP commands to complete */ + if (IS_PF(sc) && !bnx2x_wait_sp_comp(sc, ~0x0UL)) { + PMD_DRV_LOG(NOTICE, "Timeout waiting for all SPs to complete!"); + bnx2x_periodic_stop(sc); + bnx2x_nic_unload(sc, UNLOAD_CLOSE, FALSE); + return -ENXIO; + } + + PMD_DRV_LOG(DEBUG, "NIC successfully loaded"); + + return 0; + +bnx2x_nic_load_error3: + + if (IS_PF(sc)) { + bnx2x_int_disable_sync(sc, 1); + +/* clean out queued objects */ + bnx2x_squeeze_objects(sc); + } + +bnx2x_nic_load_error2: + + if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { + bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); + bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); + } + + sc->port.pmf = 0; + +bnx2x_nic_load_error1: + + /* clear pf_load status, as it was already set */ + if (IS_PF(sc)) { + bnx2x_clear_pf_load(sc); + } + +bnx2x_nic_load_error0: + + bnx2x_free_fw_stats_mem(sc); + bnx2x_free_mem(sc); + + return rc; +} + +/* +* Handles controller initialization. +*/ +int bnx2x_init(struct bnx2x_softc *sc) +{ + int other_engine = SC_PATH(sc) ? 0 : 1; + uint8_t other_load_status, load_status; + uint8_t global = FALSE; + int rc; + + /* Check if the driver is still running and bail out if it is. */ + if (sc->state != BNX2X_STATE_CLOSED) { + PMD_DRV_LOG(DEBUG, "Init called while driver is running!"); + rc = 0; + goto bnx2x_init_done; + } + + bnx2x_set_power_state(sc, PCI_PM_D0); + + /* + * If parity occurred during the unload, then attentions and/or + * RECOVERY_IN_PROGRESS may still be set. If so we want the first function + * loaded on the current engine to complete the recovery. Parity recovery + * is only relevant for PF driver. + */ + if (IS_PF(sc)) { + other_load_status = bnx2x_get_load_status(sc, other_engine); + load_status = bnx2x_get_load_status(sc, SC_PATH(sc)); + + if (!bnx2x_reset_is_done(sc, SC_PATH(sc)) || + bnx2x_chk_parity_attn(sc, &global, TRUE)) { + do { + /* + * If there are attentions and they are in global blocks, set + * the GLOBAL_RESET bit regardless whether it will be this + * function that will complete the recovery or not. + */ + if (global) { + bnx2x_set_reset_global(sc); + } + + /* + * Only the first function on the current engine should try + * to recover in open. In case of attentions in global blocks + * only the first in the chip should try to recover. + */ + if ((!load_status + && (!global ||!other_load_status)) + && bnx2x_trylock_leader_lock(sc) + && !bnx2x_leader_reset(sc)) { + PMD_DRV_LOG(INFO, + "Recovered during init"); + break; + } + + /* recovery has failed... */ + bnx2x_set_power_state(sc, PCI_PM_D3hot); + + sc->recovery_state = BNX2X_RECOVERY_FAILED; + + PMD_DRV_LOG(NOTICE, + "Recovery flow hasn't properly " + "completed yet, try again later. " + "If you still see this message after a " + "few retries then power cycle is required."); + + rc = -ENXIO; + goto bnx2x_init_done; + } while (0); + } + } + + sc->recovery_state = BNX2X_RECOVERY_DONE; + + rc = bnx2x_nic_load(sc); + +bnx2x_init_done: + + if (rc) { + PMD_DRV_LOG(NOTICE, "Initialization failed, " + "stack notified driver is NOT running!"); + } + + return rc; +} + +static void bnx2x_get_function_num(struct bnx2x_softc *sc) +{ + uint32_t val = 0; + + /* + * Read the ME register to get the function number. The ME register + * holds the relative-function number and absolute-function number. The + * absolute-function number appears only in E2 and above. Before that + * these bits always contained zero, therefore we cannot blindly use them. + */ + + val = REG_RD(sc, BAR_ME_REGISTER); + + sc->pfunc_rel = + (uint8_t) ((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); + sc->path_id = + (uint8_t) ((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & + 1; + + if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { + sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); + } else { + sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); + } + + PMD_DRV_LOG(DEBUG, + "Relative function %d, Absolute function %d, Path %d", + sc->pfunc_rel, sc->pfunc_abs, sc->path_id); +} + +static uint32_t bnx2x_get_shmem_mf_cfg_base(struct bnx2x_softc *sc) +{ + uint32_t shmem2_size; + uint32_t offset; + uint32_t mf_cfg_offset_value; + + /* Non 57712 */ + offset = (SHMEM_ADDR(sc, func_mb) + + (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); + + /* 57712 plus */ + if (sc->devinfo.shmem2_base != 0) { + shmem2_size = SHMEM2_RD(sc, size); + if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { + mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); + if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { + offset = mf_cfg_offset_value; + } + } + } + + return offset; +} + +static uint32_t bnx2x_pcie_capability_read(struct bnx2x_softc *sc, int reg) +{ + uint32_t ret; + struct bnx2x_pci_cap *caps; + + /* ensure PCIe capability is enabled */ + caps = pci_find_cap(sc, PCIY_EXPRESS, BNX2X_PCI_CAP); + if (NULL != caps) { + PMD_DRV_LOG(DEBUG, "Found PCIe capability: " + "id=0x%04X type=0x%04X addr=0x%08X", + caps->id, caps->type, caps->addr); + pci_read(sc, (caps->addr + reg), &ret, 2); + return ret; + } + + PMD_DRV_LOG(WARNING, "PCIe capability NOT FOUND!!!"); + + return 0; +} + +static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc) +{ + return bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA) & + PCIM_EXP_STA_TRANSACTION_PND; +} + +/* +* Walk the PCI capabiites list for the device to find what features are +* supported. These capabilites may be enabled/disabled by firmware so it's +* best to walk the list rather than make assumptions. +*/ +static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc) +{ + PMD_INIT_FUNC_TRACE(); + + struct bnx2x_pci_cap *caps; + uint16_t link_status; +#ifdef RTE_LIBRTE_BNX2X_DEBUG + int reg = 0; +#endif + + /* check if PCI Power Management is enabled */ + caps = pci_find_cap(sc, PCIY_PMG, BNX2X_PCI_CAP); + if (NULL != caps) { + PMD_DRV_LOG(DEBUG, "Found PM capability: " + "id=0x%04X type=0x%04X addr=0x%08X", + caps->id, caps->type, caps->addr); + + sc->devinfo.pcie_cap_flags |= BNX2X_PM_CAPABLE_FLAG; + sc->devinfo.pcie_pm_cap_reg = caps->addr; + } + + link_status = bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA); + + sc->devinfo.pcie_link_speed = (link_status & PCIM_LINK_STA_SPEED); + sc->devinfo.pcie_link_width = + ((link_status & PCIM_LINK_STA_WIDTH) >> 4); + + PMD_DRV_LOG(DEBUG, "PCIe link speed=%d width=%d", + sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); + + sc->devinfo.pcie_cap_flags |= BNX2X_PCIE_CAPABLE_FLAG; + + /* check if MSI capability is enabled */ + caps = pci_find_cap(sc, PCIY_MSI, BNX2X_PCI_CAP); + if (NULL != caps) { + PMD_DRV_LOG(DEBUG, "Found MSI capability at 0x%04x", reg); + + sc->devinfo.pcie_cap_flags |= BNX2X_MSI_CAPABLE_FLAG; + sc->devinfo.pcie_msi_cap_reg = caps->addr; + } + + /* check if MSI-X capability is enabled */ + caps = pci_find_cap(sc, PCIY_MSIX, BNX2X_PCI_CAP); + if (NULL != caps) { + PMD_DRV_LOG(DEBUG, "Found MSI-X capability at 0x%04x", reg); + + sc->devinfo.pcie_cap_flags |= BNX2X_MSIX_CAPABLE_FLAG; + sc->devinfo.pcie_msix_cap_reg = caps->addr; + } +} + +static int bnx2x_get_shmem_mf_cfg_info_sd(struct bnx2x_softc *sc) +{ + struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; + uint32_t val; + + /* get the outer vlan if we're in switch-dependent mode */ + + val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); + mf_info->ext_id = (uint16_t) val; + + mf_info->multi_vnics_mode = 1; + + if (!VALID_OVLAN(mf_info->ext_id)) { + PMD_DRV_LOG(NOTICE, "Invalid VLAN (%d)", mf_info->ext_id); + return 1; + } + + /* get the capabilities */ + if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == + FUNC_MF_CFG_PROTOCOL_ISCSI) { + mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; + } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) + == FUNC_MF_CFG_PROTOCOL_FCOE) { + mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; + } else { + mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; + } + + mf_info->vnics_per_port = + (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; + + return 0; +} + +static uint32_t bnx2x_get_shmem_ext_proto_support_flags(struct bnx2x_softc *sc) +{ + uint32_t retval = 0; + uint32_t val; + + val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); + + if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { + if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { + retval |= MF_PROTO_SUPPORT_ETHERNET; + } + if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { + retval |= MF_PROTO_SUPPORT_ISCSI; + } + if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { + retval |= MF_PROTO_SUPPORT_FCOE; + } + } + + return retval; +} + +static int bnx2x_get_shmem_mf_cfg_info_si(struct bnx2x_softc *sc) +{ + struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; + uint32_t val; + + /* + * There is no outer vlan if we're in switch-independent mode. + * If the mac is valid then assume multi-function. + */ + + val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); + + mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); + + mf_info->mf_protos_supported = + bnx2x_get_shmem_ext_proto_support_flags(sc); + + mf_info->vnics_per_port = + (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; + + return 0; +} + +static int bnx2x_get_shmem_mf_cfg_info_niv(struct bnx2x_softc *sc) +{ + struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; + uint32_t e1hov_tag; + uint32_t func_config; + uint32_t niv_config; + + mf_info->multi_vnics_mode = 1; + + e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); + func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); + niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); + + mf_info->ext_id = + (uint16_t) ((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> + FUNC_MF_CFG_E1HOV_TAG_SHIFT); + + mf_info->default_vlan = + (uint16_t) ((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> + FUNC_MF_CFG_AFEX_VLAN_SHIFT); + + mf_info->niv_allowed_priorities = + (uint8_t) ((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> + FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); + + mf_info->niv_default_cos = + (uint8_t) ((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> + FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); + + mf_info->afex_vlan_mode = + ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> + FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); + + mf_info->niv_mba_enabled = + ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> + FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); + + mf_info->mf_protos_supported = + bnx2x_get_shmem_ext_proto_support_flags(sc); + + mf_info->vnics_per_port = + (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; + + return 0; +} + +static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc) +{ + struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; + uint32_t mf_cfg1; + uint32_t mf_cfg2; + uint32_t ovlan1; + uint32_t ovlan2; + uint8_t i, j; + + /* various MF mode sanity checks... */ + + if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { + PMD_DRV_LOG(NOTICE, + "Enumerated function %d is marked as hidden", + SC_PORT(sc)); + return 1; + } + + if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { + PMD_DRV_LOG(NOTICE, "vnics_per_port=%d multi_vnics_mode=%d", + mf_info->vnics_per_port, mf_info->multi_vnics_mode); + return 1; + } + + if (mf_info->mf_mode == MULTI_FUNCTION_SD) { +/* vnic id > 0 must have valid ovlan in switch-dependent mode */ + if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { + PMD_DRV_LOG(NOTICE, "mf_mode=SD vnic_id=%d ovlan=%d", + SC_VN(sc), OVLAN(sc)); + return 1; + } + + if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { + PMD_DRV_LOG(NOTICE, + "mf_mode=SD multi_vnics_mode=%d ovlan=%d", + mf_info->multi_vnics_mode, OVLAN(sc)); + return 1; + } + +/* + * Verify all functions are either MF or SF mode. If MF, make sure + * sure that all non-hidden functions have a valid ovlan. If SF, + * make sure that all non-hidden functions have an invalid ovlan. + */ + FOREACH_ABS_FUNC_IN_PORT(sc, i) { + mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); + ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); + if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && + (((mf_info->multi_vnics_mode) + && !VALID_OVLAN(ovlan1)) + || ((!mf_info->multi_vnics_mode) + && VALID_OVLAN(ovlan1)))) { + PMD_DRV_LOG(NOTICE, + "mf_mode=SD function %d MF config " + "mismatch, multi_vnics_mode=%d ovlan=%d", + i, mf_info->multi_vnics_mode, + ovlan1); + return 1; + } + } + +/* Verify all funcs on the same port each have a different ovlan. */ + FOREACH_ABS_FUNC_IN_PORT(sc, i) { + mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); + ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); + /* iterate from the next function on the port to the max func */ + for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { + mf_cfg2 = + MFCFG_RD(sc, func_mf_config[j].config); + ovlan2 = + MFCFG_RD(sc, func_mf_config[j].e1hov_tag); + if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) + && VALID_OVLAN(ovlan1) + && !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) + && VALID_OVLAN(ovlan2) + && (ovlan1 == ovlan2)) { + PMD_DRV_LOG(NOTICE, + "mf_mode=SD functions %d and %d " + "have the same ovlan (%d)", + i, j, ovlan1); + return 1; + } + } + } + } + /* MULTI_FUNCTION_SD */ + return 0; +} + +static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc) +{ + struct bnx2x_mf_info *mf_info = &sc->devinfo.mf_info; + uint32_t val, mac_upper; + uint8_t i, vnic; + + /* initialize mf_info defaults */ + mf_info->vnics_per_port = 1; + mf_info->multi_vnics_mode = FALSE; + mf_info->path_has_ovlan = FALSE; + mf_info->mf_mode = SINGLE_FUNCTION; + + if (!CHIP_IS_MF_CAP(sc)) { + return 0; + } + + if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { + PMD_DRV_LOG(NOTICE, "Invalid mf_cfg_base!"); + return 1; + } + + /* get the MF mode (switch dependent / independent / single-function) */ + + val = SHMEM_RD(sc, dev_info.shared_feature_config.config); + + switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) { + case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: + + mac_upper = + MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); + + /* check for legal upper mac bytes */ + if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { + mf_info->mf_mode = MULTI_FUNCTION_SI; + } else { + PMD_DRV_LOG(NOTICE, + "Invalid config for Switch Independent mode"); + } + + break; + + case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: + case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: + + /* get outer vlan configuration */ + val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); + + if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != + FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { + mf_info->mf_mode = MULTI_FUNCTION_SD; + } else { + PMD_DRV_LOG(NOTICE, + "Invalid config for Switch Dependent mode"); + } + + break; + + case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: + + /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ + return 0; + + case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: + + /* + * Mark MF mode as NIV if MCP version includes NPAR-SD support + * and the MAC address is valid. + */ + mac_upper = + MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); + + if ((SHMEM2_HAS(sc, afex_driver_support)) && + (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { + mf_info->mf_mode = MULTI_FUNCTION_AFEX; + } else { + PMD_DRV_LOG(NOTICE, "Invalid config for AFEX mode"); + } + + break; + + default: + + PMD_DRV_LOG(NOTICE, "Unknown MF mode (0x%08x)", + (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); + + return 1; + } + + /* set path mf_mode (which could be different than function mf_mode) */ + if (mf_info->mf_mode == MULTI_FUNCTION_SD) { + mf_info->path_has_ovlan = TRUE; + } else if (mf_info->mf_mode == SINGLE_FUNCTION) { +/* + * Decide on path multi vnics mode. If we're not in MF mode and in + * 4-port mode, this is good enough to check vnic-0 of the other port + * on the same path + */ + if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { + uint8_t other_port = !(PORT_ID(sc) & 1); + uint8_t abs_func_other_port = + (SC_PATH(sc) + (2 * other_port)); + + val = + MFCFG_RD(sc, + func_mf_config + [abs_func_other_port].e1hov_tag); + + mf_info->path_has_ovlan = VALID_OVLAN((uint16_t) val); + } + } + + if (mf_info->mf_mode == SINGLE_FUNCTION) { +/* invalid MF config */ + if (SC_VN(sc) >= 1) { + PMD_DRV_LOG(NOTICE, "VNIC ID >= 1 in SF mode"); + return 1; + } + + return 0; + } + + /* get the MF configuration */ + mf_info->mf_config[SC_VN(sc)] = + MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); + + switch (mf_info->mf_mode) { + case MULTI_FUNCTION_SD: + + bnx2x_get_shmem_mf_cfg_info_sd(sc); + break; + + case MULTI_FUNCTION_SI: + + bnx2x_get_shmem_mf_cfg_info_si(sc); + break; + + case MULTI_FUNCTION_AFEX: + + bnx2x_get_shmem_mf_cfg_info_niv(sc); + break; + + default: + + PMD_DRV_LOG(NOTICE, "Get MF config failed (mf_mode=0x%08x)", + mf_info->mf_mode); + return 1; + } + + /* get the congestion management parameters */ + + vnic = 0; + FOREACH_ABS_FUNC_IN_PORT(sc, i) { +/* get min/max bw */ + val = MFCFG_RD(sc, func_mf_config[i].config); + mf_info->min_bw[vnic] = + ((val & FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT); + mf_info->max_bw[vnic] = + ((val & FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT); + vnic++; + } + + return bnx2x_check_valid_mf_cfg(sc); +} + +static int bnx2x_get_shmem_info(struct bnx2x_softc *sc) +{ + int port; + uint32_t mac_hi, mac_lo, val; + + PMD_INIT_FUNC_TRACE(); + + port = SC_PORT(sc); + mac_hi = mac_lo = 0; + + sc->link_params.sc = sc; + sc->link_params.port = port; + + /* get the hardware config info */ + sc->devinfo.hw_config = SHMEM_RD(sc, dev_info.shared_hw_config.config); + sc->devinfo.hw_config2 = + SHMEM_RD(sc, dev_info.shared_hw_config.config2); + + sc->link_params.hw_led_mode = + ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> + SHARED_HW_CFG_LED_MODE_SHIFT); + + /* get the port feature config */ + sc->port.config = + SHMEM_RD(sc, dev_info.port_feature_config[port].config); + + /* get the link params */ + sc->link_params.speed_cap_mask[ELINK_INT_PHY] = + SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask) + & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; + sc->link_params.speed_cap_mask[ELINK_EXT_PHY1] = + SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2) + & PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; + + /* get the lane config */ + sc->link_params.lane_config = + SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); + + /* get the link config */ + val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); + sc->port.link_config[ELINK_INT_PHY] = val; + sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); + sc->port.link_config[ELINK_EXT_PHY1] = + SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); + + /* get the override preemphasis flag and enable it or turn it off */ + val = SHMEM_RD(sc, dev_info.shared_feature_config.config); + if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { + sc->link_params.feature_config_flags |= + ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; + } else { + sc->link_params.feature_config_flags &= + ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; + } + + /* get the initial value of the link params */ + sc->link_params.multi_phy_config = + SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); + + /* get external phy info */ + sc->port.ext_phy_config = + SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); + + /* get the multifunction configuration */ + bnx2x_get_mf_cfg_info(sc); + + /* get the mac address */ + if (IS_MF(sc)) { + mac_hi = + MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); + mac_lo = + MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); + } else { + mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); + mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); + } + + if ((mac_lo == 0) && (mac_hi == 0)) { + *sc->mac_addr_str = 0; + PMD_DRV_LOG(NOTICE, "No Ethernet address programmed!"); + } else { + sc->link_params.mac_addr[0] = (uint8_t) (mac_hi >> 8); + sc->link_params.mac_addr[1] = (uint8_t) (mac_hi); + sc->link_params.mac_addr[2] = (uint8_t) (mac_lo >> 24); + sc->link_params.mac_addr[3] = (uint8_t) (mac_lo >> 16); + sc->link_params.mac_addr[4] = (uint8_t) (mac_lo >> 8); + sc->link_params.mac_addr[5] = (uint8_t) (mac_lo); + snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), + "%02x:%02x:%02x:%02x:%02x:%02x", + sc->link_params.mac_addr[0], + sc->link_params.mac_addr[1], + sc->link_params.mac_addr[2], + sc->link_params.mac_addr[3], + sc->link_params.mac_addr[4], + sc->link_params.mac_addr[5]); + PMD_DRV_LOG(DEBUG, "Ethernet address: %s", sc->mac_addr_str); + } + + return 0; +} + +static void bnx2x_media_detect(struct bnx2x_softc *sc) +{ + uint32_t phy_idx = bnx2x_get_cur_phy_idx(sc); + switch (sc->link_params.phy[phy_idx].media_type) { + case ELINK_ETH_PHY_SFPP_10G_FIBER: + case ELINK_ETH_PHY_SFP_1G_FIBER: + case ELINK_ETH_PHY_XFP_FIBER: + case ELINK_ETH_PHY_KR: + case ELINK_ETH_PHY_CX4: + PMD_DRV_LOG(INFO, "Found 10GBase-CX4 media."); + sc->media = IFM_10G_CX4; + break; + case ELINK_ETH_PHY_DA_TWINAX: + PMD_DRV_LOG(INFO, "Found 10Gb Twinax media."); + sc->media = IFM_10G_TWINAX; + break; + case ELINK_ETH_PHY_BASE_T: + PMD_DRV_LOG(INFO, "Found 10GBase-T media."); + sc->media = IFM_10G_T; + break; + case ELINK_ETH_PHY_NOT_PRESENT: + PMD_DRV_LOG(INFO, "Media not present."); + sc->media = 0; + break; + case ELINK_ETH_PHY_UNSPECIFIED: + default: + PMD_DRV_LOG(INFO, "Unknown media!"); + sc->media = 0; + break; + } +} + +#define GET_FIELD(value, fname) \ +(((value) & (fname##_MASK)) >> (fname##_SHIFT)) +#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) +#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) + +static int bnx2x_get_igu_cam_info(struct bnx2x_softc *sc) +{ + int pfid = SC_FUNC(sc); + int igu_sb_id; + uint32_t val; + uint8_t fid, igu_sb_cnt = 0; + + sc->igu_base_sb = 0xff; + + if (CHIP_INT_MODE_IS_BC(sc)) { + int vn = SC_VN(sc); + igu_sb_cnt = sc->igu_sb_cnt; + sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * + FP_SB_MAX_E1x); + sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + + (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); + return 0; + } + + /* IGU in normal mode - read CAM */ + for (igu_sb_id = 0; + igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_id++) { + val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); + if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { + continue; + } + fid = IGU_FID(val); + if ((fid & IGU_FID_ENCODE_IS_PF)) { + if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { + continue; + } + if (IGU_VEC(val) == 0) { + /* default status block */ + sc->igu_dsb_id = igu_sb_id; + } else { + if (sc->igu_base_sb == 0xff) { + sc->igu_base_sb = igu_sb_id; + } + igu_sb_cnt++; + } + } + } + + /* + * Due to new PF resource allocation by MFW T7.4 and above, it's optional + * that number of CAM entries will not be equal to the value advertised in + * PCI. Driver should use the minimal value of both as the actual status + * block count + */ + sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); + + if (igu_sb_cnt == 0) { + PMD_DRV_LOG(ERR, "CAM configuration error"); + return -1; + } + + return 0; +} + +/* +* Gather various information from the device config space, the device itself, +* shmem, and the user input. +*/ +static int bnx2x_get_device_info(struct bnx2x_softc *sc) +{ + uint32_t val; + int rc; + + /* get the chip revision (chip metal comes from pci config space) */ + sc->devinfo.chip_id = sc->link_params.chip_id = + (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | + ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | + (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | + ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); + + /* force 57811 according to MISC register */ + if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { + if (CHIP_IS_57810(sc)) { + sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | + (sc-> + devinfo.chip_id & 0x0000ffff)); + } else if (CHIP_IS_57810_MF(sc)) { + sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | + (sc-> + devinfo.chip_id & 0x0000ffff)); + } + sc->devinfo.chip_id |= 0x1; + } + + PMD_DRV_LOG(DEBUG, + "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)", + sc->devinfo.chip_id, + ((sc->devinfo.chip_id >> 16) & 0xffff), + ((sc->devinfo.chip_id >> 12) & 0xf), + ((sc->devinfo.chip_id >> 4) & 0xff), + ((sc->devinfo.chip_id >> 0) & 0xf)); + + val = (REG_RD(sc, 0x2874) & 0x55); + if ((sc->devinfo.chip_id & 0x1) || (CHIP_IS_E1H(sc) && (val == 0x55))) { + sc->flags |= BNX2X_ONE_PORT_FLAG; + PMD_DRV_LOG(DEBUG, "single port device"); + } + + /* set the doorbell size */ + sc->doorbell_size = (1 << BNX2X_DB_SHIFT); + + /* determine whether the device is in 2 port or 4 port mode */ + sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1h */ + if (CHIP_IS_E2E3(sc)) { +/* + * Read port4mode_en_ovwr[0]: + * If 1, four port mode is in port4mode_en_ovwr[1]. + * If 0, four port mode is in port4mode_en[0]. + */ + val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); + if (val & 1) { + val = ((val >> 1) & 1); + } else { + val = REG_RD(sc, MISC_REG_PORT4MODE_EN); + } + + sc->devinfo.chip_port_mode = + (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; + + PMD_DRV_LOG(DEBUG, "Port mode = %s", (val) ? "4" : "2"); + } + + /* get the function and path info for the device */ + bnx2x_get_function_num(sc); + + /* get the shared memory base address */ + sc->devinfo.shmem_base = + sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); + sc->devinfo.shmem2_base = + REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : + MISC_REG_GENERIC_CR_0)); + + if (!sc->devinfo.shmem_base) { +/* this should ONLY prevent upcoming shmem reads */ + PMD_DRV_LOG(INFO, "MCP not active"); + sc->flags |= BNX2X_NO_MCP_FLAG; + return 0; + } + + /* make sure the shared memory contents are valid */ + val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); + if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != + (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { + PMD_DRV_LOG(NOTICE, "Invalid SHMEM validity signature: 0x%08x", + val); + return 0; + } + + /* get the bootcode version */ + sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); + snprintf(sc->devinfo.bc_ver_str, + sizeof(sc->devinfo.bc_ver_str), + "%d.%d.%d", + ((sc->devinfo.bc_ver >> 24) & 0xff), + ((sc->devinfo.bc_ver >> 16) & 0xff), + ((sc->devinfo.bc_ver >> 8) & 0xff)); + PMD_DRV_LOG(INFO, "Bootcode version: %s", sc->devinfo.bc_ver_str); + + /* get the bootcode shmem address */ + sc->devinfo.mf_cfg_base = bnx2x_get_shmem_mf_cfg_base(sc); + + /* clean indirect addresses as they're not used */ + pci_write_long(sc, PCICFG_GRC_ADDRESS, 0); + if (IS_PF(sc)) { + REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); + REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); + REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); + REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); + if (CHIP_IS_E1x(sc)) { + REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); + REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); + REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); + REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); + } + +/* + * Enable internal target-read (in case we are probed after PF + * FLR). Must be done prior to any BAR read access. Only for + * 57712 and up + */ + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, + 1); + } + } + + /* get the nvram size */ + val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); + sc->devinfo.flash_size = + (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); + + bnx2x_set_power_state(sc, PCI_PM_D0); + /* get various configuration parameters from shmem */ + bnx2x_get_shmem_info(sc); + + /* initialize IGU parameters */ + if (CHIP_IS_E1x(sc)) { + sc->devinfo.int_block = INT_BLOCK_HC; + sc->igu_dsb_id = DEF_SB_IGU_ID; + sc->igu_base_sb = 0; + } else { + sc->devinfo.int_block = INT_BLOCK_IGU; + +/* do not allow device reset during IGU info preocessing */ + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); + + val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); + + if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { + int tout = 5000; + + val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); + REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); + REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); + + while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { + tout--; + DELAY(1000); + } + + if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { + PMD_DRV_LOG(NOTICE, + "FORCING IGU Normal Mode failed!!!"); + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); + return -1; + } + } + + if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { + PMD_DRV_LOG(DEBUG, "IGU Backward Compatible Mode"); + sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; + } else { + PMD_DRV_LOG(DEBUG, "IGU Normal Mode"); + } + + rc = bnx2x_get_igu_cam_info(sc); + + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); + + if (rc) { + return rc; + } + } + + /* + * Get base FW non-default (fast path) status block ID. This value is + * used to initialize the fw_sb_id saved on the fp/queue structure to + * determine the id used by the FW. + */ + if (CHIP_IS_E1x(sc)) { + sc->base_fw_ndsb = + ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); + } else { +/* + * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of + * the same queue are indicated on the same IGU SB). So we prefer + * FW and IGU SBs to be the same value. + */ + sc->base_fw_ndsb = sc->igu_base_sb; + } + + elink_phy_probe(&sc->link_params); + + return 0; +} + +static void +bnx2x_link_settings_supported(struct bnx2x_softc *sc, uint32_t switch_cfg) +{ + uint32_t cfg_size = 0; + uint32_t idx; + uint8_t port = SC_PORT(sc); + + /* aggregation of supported attributes of all external phys */ + sc->port.supported[0] = 0; + sc->port.supported[1] = 0; + + switch (sc->link_params.num_phys) { + case 1: + sc->port.supported[0] = + sc->link_params.phy[ELINK_INT_PHY].supported; + cfg_size = 1; + break; + case 2: + sc->port.supported[0] = + sc->link_params.phy[ELINK_EXT_PHY1].supported; + cfg_size = 1; + break; + case 3: + if (sc->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) { + sc->port.supported[1] = + sc->link_params.phy[ELINK_EXT_PHY1].supported; + sc->port.supported[0] = + sc->link_params.phy[ELINK_EXT_PHY2].supported; + } else { + sc->port.supported[0] = + sc->link_params.phy[ELINK_EXT_PHY1].supported; + sc->port.supported[1] = + sc->link_params.phy[ELINK_EXT_PHY2].supported; + } + cfg_size = 2; + break; + } + + if (!(sc->port.supported[0] || sc->port.supported[1])) { + PMD_DRV_LOG(ERR, + "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)", + SHMEM_RD(sc, + dev_info.port_hw_config + [port].external_phy_config), + SHMEM_RD(sc, + dev_info.port_hw_config + [port].external_phy_config2)); + return; + } + + if (CHIP_IS_E3(sc)) + sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); + else { + switch (switch_cfg) { + case ELINK_SWITCH_CFG_1G: + sc->port.phy_addr = + REG_RD(sc, + NIG_REG_SERDES0_CTRL_PHY_ADDR + port * 0x10); + break; + case ELINK_SWITCH_CFG_10G: + sc->port.phy_addr = + REG_RD(sc, + NIG_REG_XGXS0_CTRL_PHY_ADDR + port * 0x18); + break; + default: + PMD_DRV_LOG(ERR, + "Invalid switch config in" + "link_config=0x%08x", + sc->port.link_config[0]); + return; + } + } + + PMD_DRV_LOG(INFO, "PHY addr 0x%08x", sc->port.phy_addr); + + /* mask what we support according to speed_cap_mask per configuration */ + for (idx = 0; idx < cfg_size; idx++) { + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_10baseT_Half; + } + + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_10baseT_Full; + } + + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_100baseT_Half; + } + + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_100baseT_Full; + } + + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_1000baseT_Full; + } + + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_2500baseX_Full; + } + + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_10000baseT_Full; + } + + if (!(sc->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { + sc->port.supported[idx] &= + ~ELINK_SUPPORTED_20000baseKR2_Full; + } + } + + PMD_DRV_LOG(INFO, "PHY supported 0=0x%08x 1=0x%08x", + sc->port.supported[0], sc->port.supported[1]); +} + +static void bnx2x_link_settings_requested(struct bnx2x_softc *sc) +{ + uint32_t link_config; + uint32_t idx; + uint32_t cfg_size = 0; + + sc->port.advertising[0] = 0; + sc->port.advertising[1] = 0; + + switch (sc->link_params.num_phys) { + case 1: + case 2: + cfg_size = 1; + break; + case 3: + cfg_size = 2; + break; + } + + for (idx = 0; idx < cfg_size; idx++) { + sc->link_params.req_duplex[idx] = DUPLEX_FULL; + link_config = sc->port.link_config[idx]; + + switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { + case PORT_FEATURE_LINK_SPEED_AUTO: + if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_AUTO_NEG; + sc->port.advertising[idx] |= + sc->port.supported[idx]; + if (sc->link_params.phy[ELINK_EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) + sc->port.advertising[idx] |= + (ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full); + } else { + /* force 10G, no AN */ + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_10000; + sc->port.advertising[idx] |= + (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + continue; + } + break; + + case PORT_FEATURE_LINK_SPEED_10M_FULL: + if (sc-> + port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) + { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_10; + sc->port.advertising[idx] |= + (ADVERTISED_10baseT_Full | ADVERTISED_TP); + } else { + PMD_DRV_LOG(ERR, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", + link_config, + sc-> + link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_10M_HALF: + if (sc-> + port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) + { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_10; + sc->link_params.req_duplex[idx] = DUPLEX_HALF; + sc->port.advertising[idx] |= + (ADVERTISED_10baseT_Half | ADVERTISED_TP); + } else { + PMD_DRV_LOG(ERR, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", + link_config, + sc-> + link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_100M_FULL: + if (sc-> + port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) + { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_100; + sc->port.advertising[idx] |= + (ADVERTISED_100baseT_Full | ADVERTISED_TP); + } else { + PMD_DRV_LOG(ERR, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", + link_config, + sc-> + link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_100M_HALF: + if (sc-> + port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) + { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_100; + sc->link_params.req_duplex[idx] = DUPLEX_HALF; + sc->port.advertising[idx] |= + (ADVERTISED_100baseT_Half | ADVERTISED_TP); + } else { + PMD_DRV_LOG(ERR, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", + link_config, + sc-> + link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_1G: + if (sc->port.supported[idx] & + ELINK_SUPPORTED_1000baseT_Full) { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_1000; + sc->port.advertising[idx] |= + (ADVERTISED_1000baseT_Full | ADVERTISED_TP); + } else { + PMD_DRV_LOG(ERR, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", + link_config, + sc-> + link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_2_5G: + if (sc->port.supported[idx] & + ELINK_SUPPORTED_2500baseX_Full) { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_2500; + sc->port.advertising[idx] |= + (ADVERTISED_2500baseX_Full | ADVERTISED_TP); + } else { + PMD_DRV_LOG(ERR, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", + link_config, + sc-> + link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_10G_CX4: + if (sc->port.supported[idx] & + ELINK_SUPPORTED_10000baseT_Full) { + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_10000; + sc->port.advertising[idx] |= + (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + } else { + PMD_DRV_LOG(ERR, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", + link_config, + sc-> + link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_20G: + sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; + break; + + default: + PMD_DRV_LOG(ERR, + "Invalid NVRAM config link_config=0x%08x " + "speed_cap_mask=0x%08x", link_config, + sc->link_params.speed_cap_mask[idx]); + sc->link_params.req_line_speed[idx] = + ELINK_SPEED_AUTO_NEG; + sc->port.advertising[idx] = sc->port.supported[idx]; + break; + } + + sc->link_params.req_flow_ctrl[idx] = + (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); + + if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { + if (! + (sc-> + port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { + sc->link_params.req_flow_ctrl[idx] = + ELINK_FLOW_CTRL_NONE; + } else { + bnx2x_set_requested_fc(sc); + } + } + } +} + +static void bnx2x_get_phy_info(struct bnx2x_softc *sc) +{ + uint8_t port = SC_PORT(sc); + uint32_t eee_mode; + + PMD_INIT_FUNC_TRACE(); + + /* shmem data already read in bnx2x_get_shmem_info() */ + + bnx2x_link_settings_supported(sc, sc->link_params.switch_cfg); + bnx2x_link_settings_requested(sc); + + /* configure link feature according to nvram value */ + eee_mode = + (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) + & PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> + PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); + if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { + sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | + ELINK_EEE_MODE_ENABLE_LPI | + ELINK_EEE_MODE_OUTPUT_TIME); + } else { + sc->link_params.eee_mode = 0; + } + + /* get the media type */ + bnx2x_media_detect(sc); +} + +static void bnx2x_set_modes_bitmap(struct bnx2x_softc *sc) +{ + uint32_t flags = MODE_ASIC | MODE_PORT2; + + if (CHIP_IS_E2(sc)) { + flags |= MODE_E2; + } else if (CHIP_IS_E3(sc)) { + flags |= MODE_E3; + if (CHIP_REV(sc) == CHIP_REV_Ax) { + flags |= MODE_E3_A0; + } else { /*if (CHIP_REV(sc) == CHIP_REV_Bx) */ + + flags |= MODE_E3_B0 | MODE_COS3; + } + } + + if (IS_MF(sc)) { + flags |= MODE_MF; + switch (sc->devinfo.mf_info.mf_mode) { + case MULTI_FUNCTION_SD: + flags |= MODE_MF_SD; + break; + case MULTI_FUNCTION_SI: + flags |= MODE_MF_SI; + break; + case MULTI_FUNCTION_AFEX: + flags |= MODE_MF_AFEX; + break; + } + } else { + flags |= MODE_SF; + } + +#if defined(__LITTLE_ENDIAN) + flags |= MODE_LITTLE_ENDIAN; +#else /* __BIG_ENDIAN */ + flags |= MODE_BIG_ENDIAN; +#endif + + INIT_MODE_FLAGS(sc) = flags; +} + +int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc) +{ + struct bnx2x_fastpath *fp; + char buf[32]; + uint32_t i; + + if (IS_PF(sc)) { +/************************/ +/* DEFAULT STATUS BLOCK */ +/************************/ + + if (bnx2x_dma_alloc(sc, sizeof(struct host_sp_status_block), + &sc->def_sb_dma, "def_sb", + RTE_CACHE_LINE_SIZE) != 0) { + return -1; + } + + sc->def_sb = + (struct host_sp_status_block *)sc->def_sb_dma.vaddr; +/***************/ +/* EVENT QUEUE */ +/***************/ + + if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE, + &sc->eq_dma, "ev_queue", + RTE_CACHE_LINE_SIZE) != 0) { + sc->def_sb = NULL; + return -1; + } + + sc->eq = (union event_ring_elem *)sc->eq_dma.vaddr; + +/*************/ +/* SLOW PATH */ +/*************/ + + if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_slowpath), + &sc->sp_dma, "sp", + RTE_CACHE_LINE_SIZE) != 0) { + sc->eq = NULL; + sc->def_sb = NULL; + return -1; + } + + sc->sp = (struct bnx2x_slowpath *)sc->sp_dma.vaddr; + +/*******************/ +/* SLOW PATH QUEUE */ +/*******************/ + + if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE, + &sc->spq_dma, "sp_queue", + RTE_CACHE_LINE_SIZE) != 0) { + sc->sp = NULL; + sc->eq = NULL; + sc->def_sb = NULL; + return -1; + } + + sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; + +/***************************/ +/* FW DECOMPRESSION BUFFER */ +/***************************/ + + if (bnx2x_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, + "fw_dec_buf", RTE_CACHE_LINE_SIZE) != 0) { + sc->spq = NULL; + sc->sp = NULL; + sc->eq = NULL; + sc->def_sb = NULL; + return -1; + } + + sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; + } + + /*************/ + /* FASTPATHS */ + /*************/ + + /* allocate DMA memory for each fastpath structure */ + for (i = 0; i < sc->num_queues; i++) { + fp = &sc->fp[i]; + fp->sc = sc; + fp->index = i; + +/*******************/ +/* FP STATUS BLOCK */ +/*******************/ + + snprintf(buf, sizeof(buf), "fp_%d_sb", i); + if (bnx2x_dma_alloc(sc, sizeof(union bnx2x_host_hc_status_block), + &fp->sb_dma, buf, RTE_CACHE_LINE_SIZE) != 0) { + PMD_DRV_LOG(NOTICE, "Failed to alloc %s", buf); + return -1; + } else { + if (CHIP_IS_E2E3(sc)) { + fp->status_block.e2_sb = + (struct host_hc_status_block_e2 *) + fp->sb_dma.vaddr; + } else { + fp->status_block.e1x_sb = + (struct host_hc_status_block_e1x *) + fp->sb_dma.vaddr; + } + } + } + + return 0; +} + +void bnx2x_free_hsi_mem(struct bnx2x_softc *sc) +{ + struct bnx2x_fastpath *fp; + int i; + + for (i = 0; i < sc->num_queues; i++) { + fp = &sc->fp[i]; + +/*******************/ +/* FP STATUS BLOCK */ +/*******************/ + + memset(&fp->status_block, 0, sizeof(fp->status_block)); + } + + /***************************/ + /* FW DECOMPRESSION BUFFER */ + /***************************/ + + sc->gz_buf = NULL; + + /*******************/ + /* SLOW PATH QUEUE */ + /*******************/ + + sc->spq = NULL; + + /*************/ + /* SLOW PATH */ + /*************/ + + sc->sp = NULL; + + /***************/ + /* EVENT QUEUE */ + /***************/ + + sc->eq = NULL; + + /************************/ + /* DEFAULT STATUS BLOCK */ + /************************/ + + sc->def_sb = NULL; + +} + +/* +* Previous driver DMAE transaction may have occurred when pre-boot stage +* ended and boot began. This would invalidate the addresses of the +* transaction, resulting in was-error bit set in the PCI causing all +* hw-to-host PCIe transactions to timeout. If this happened we want to clear +* the interrupt which detected this from the pglueb and the was-done bit +*/ +static void bnx2x_prev_interrupted_dmae(struct bnx2x_softc *sc) +{ + uint32_t val; + + if (!CHIP_IS_E1x(sc)) { + val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { + REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, + 1 << SC_FUNC(sc)); + } + } +} + +static int bnx2x_prev_mcp_done(struct bnx2x_softc *sc) +{ + uint32_t rc = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, + DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); + if (!rc) { + PMD_DRV_LOG(NOTICE, "MCP response failure, aborting"); + return -1; + } + + return 0; +} + +static struct bnx2x_prev_list_node *bnx2x_prev_path_get_entry(struct bnx2x_softc *sc) +{ + struct bnx2x_prev_list_node *tmp; + + LIST_FOREACH(tmp, &bnx2x_prev_list, node) { + if ((sc->pcie_bus == tmp->bus) && + (sc->pcie_device == tmp->slot) && + (SC_PATH(sc) == tmp->path)) { + return tmp; + } + } + + return NULL; +} + +static uint8_t bnx2x_prev_is_path_marked(struct bnx2x_softc *sc) +{ + struct bnx2x_prev_list_node *tmp; + int rc = FALSE; + + rte_spinlock_lock(&bnx2x_prev_mtx); + + tmp = bnx2x_prev_path_get_entry(sc); + if (tmp) { + if (tmp->aer) { + PMD_DRV_LOG(DEBUG, + "Path %d/%d/%d was marked by AER", + sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); + } else { + rc = TRUE; + PMD_DRV_LOG(DEBUG, + "Path %d/%d/%d was already cleaned from previous drivers", + sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); + } + } + + rte_spinlock_unlock(&bnx2x_prev_mtx); + + return rc; +} + +static int bnx2x_prev_mark_path(struct bnx2x_softc *sc, uint8_t after_undi) +{ + struct bnx2x_prev_list_node *tmp; + + rte_spinlock_lock(&bnx2x_prev_mtx); + + /* Check whether the entry for this path already exists */ + tmp = bnx2x_prev_path_get_entry(sc); + if (tmp) { + if (!tmp->aer) { + PMD_DRV_LOG(DEBUG, + "Re-marking AER in path %d/%d/%d", + sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); + } else { + PMD_DRV_LOG(DEBUG, + "Removing AER indication from path %d/%d/%d", + sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); + tmp->aer = 0; + } + + rte_spinlock_unlock(&bnx2x_prev_mtx); + return 0; + } + + rte_spinlock_unlock(&bnx2x_prev_mtx); + + /* Create an entry for this path and add it */ + tmp = rte_malloc("", sizeof(struct bnx2x_prev_list_node), + RTE_CACHE_LINE_SIZE); + if (!tmp) { + PMD_DRV_LOG(NOTICE, "Failed to allocate 'bnx2x_prev_list_node'"); + return -1; + } + + tmp->bus = sc->pcie_bus; + tmp->slot = sc->pcie_device; + tmp->path = SC_PATH(sc); + tmp->aer = 0; + tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; + + rte_spinlock_lock(&bnx2x_prev_mtx); + + LIST_INSERT_HEAD(&bnx2x_prev_list, tmp, node); + + rte_spinlock_unlock(&bnx2x_prev_mtx); + + return 0; +} + +static int bnx2x_do_flr(struct bnx2x_softc *sc) +{ + int i; + + /* only E2 and onwards support FLR */ + if (CHIP_IS_E1x(sc)) { + PMD_DRV_LOG(WARNING, "FLR not supported in E1H"); + return -1; + } + + /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ + if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { + PMD_DRV_LOG(WARNING, + "FLR not supported by BC_VER: 0x%08x", + sc->devinfo.bc_ver); + return -1; + } + + /* Wait for Transaction Pending bit clean */ + for (i = 0; i < 4; i++) { + if (i) { + DELAY(((1 << (i - 1)) * 100) * 1000); + } + + if (!bnx2x_is_pcie_pending(sc)) { + goto clear; + } + } + + PMD_DRV_LOG(NOTICE, "PCIE transaction is not cleared, " + "proceeding with reset anyway"); + +clear: + bnx2x_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); + + return 0; +} + +struct bnx2x_mac_vals { + uint32_t xmac_addr; + uint32_t xmac_val; + uint32_t emac_addr; + uint32_t emac_val; + uint32_t umac_addr; + uint32_t umac_val; + uint32_t bmac_addr; + uint32_t bmac_val[2]; +}; + +static void +bnx2x_prev_unload_close_mac(struct bnx2x_softc *sc, struct bnx2x_mac_vals *vals) +{ + uint32_t val, base_addr, offset, mask, reset_reg; + uint8_t mac_stopped = FALSE; + uint8_t port = SC_PORT(sc); + uint32_t wb_data[2]; + + /* reset addresses as they also mark which values were changed */ + vals->bmac_addr = 0; + vals->umac_addr = 0; + vals->xmac_addr = 0; + vals->emac_addr = 0; + + reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); + + if (!CHIP_IS_E3(sc)) { + val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); + mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; + if ((mask & reset_reg) && val) { + base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM + : NIG_REG_INGRESS_BMAC0_MEM; + offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL + : BIGMAC_REGISTER_BMAC_CONTROL; + + /* + * use rd/wr since we cannot use dmae. This is safe + * since MCP won't access the bus due to the request + * to unload, and no function on the path can be + * loaded at this time. + */ + wb_data[0] = REG_RD(sc, base_addr + offset); + wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); + vals->bmac_addr = base_addr + offset; + vals->bmac_val[0] = wb_data[0]; + vals->bmac_val[1] = wb_data[1]; + wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; + REG_WR(sc, vals->bmac_addr, wb_data[0]); + REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); + } + + vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc) * 4; + vals->emac_val = REG_RD(sc, vals->emac_addr); + REG_WR(sc, vals->emac_addr, 0); + mac_stopped = TRUE; + } else { + if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { + base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); + REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, + val & ~(1 << 1)); + REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, + val | (1 << 1)); + vals->xmac_addr = base_addr + XMAC_REG_CTRL; + vals->xmac_val = REG_RD(sc, vals->xmac_addr); + REG_WR(sc, vals->xmac_addr, 0); + mac_stopped = TRUE; + } + + mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; + if (mask & reset_reg) { + base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; + vals->umac_val = REG_RD(sc, vals->umac_addr); + REG_WR(sc, vals->umac_addr, 0); + mac_stopped = TRUE; + } + } + + if (mac_stopped) { + DELAY(20000); + } +} + +#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) +#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) +#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) +#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) + +static void +bnx2x_prev_unload_undi_inc(struct bnx2x_softc *sc, uint8_t port, uint8_t inc) +{ + uint16_t rcq, bd; + uint32_t tmp_reg = REG_RD(sc, BNX2X_PREV_UNDI_PROD_ADDR(port)); + + rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; + bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; + + tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); + REG_WR(sc, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); +} + +static int bnx2x_prev_unload_common(struct bnx2x_softc *sc) +{ + uint32_t reset_reg, tmp_reg = 0, rc; + uint8_t prev_undi = FALSE; + struct bnx2x_mac_vals mac_vals; + uint32_t timer_count = 1000; + uint32_t prev_brb; + + /* + * It is possible a previous function received 'common' answer, + * but hasn't loaded yet, therefore creating a scenario of + * multiple functions receiving 'common' on the same path. + */ + memset(&mac_vals, 0, sizeof(mac_vals)); + + if (bnx2x_prev_is_path_marked(sc)) { + return bnx2x_prev_mcp_done(sc); + } + + reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); + + /* Reset should be performed after BRB is emptied */ + if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { + /* Close the MAC Rx to prevent BRB from filling up */ + bnx2x_prev_unload_close_mac(sc, &mac_vals); + + /* close LLH filters towards the BRB */ + elink_set_rx_filter(&sc->link_params, 0); + + /* + * Check if the UNDI driver was previously loaded. + * UNDI driver initializes CID offset for normal bell to 0x7 + */ + if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { + tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); + if (tmp_reg == 0x7) { + PMD_DRV_LOG(DEBUG, "UNDI previously loaded"); + prev_undi = TRUE; + /* clear the UNDI indication */ + REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); + /* clear possible idle check errors */ + REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); + } + } + + /* wait until BRB is empty */ + tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); + while (timer_count) { + prev_brb = tmp_reg; + + tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); + if (!tmp_reg) { + break; + } + + PMD_DRV_LOG(DEBUG, "BRB still has 0x%08x", tmp_reg); + + /* reset timer as long as BRB actually gets emptied */ + if (prev_brb > tmp_reg) { + timer_count = 1000; + } else { + timer_count--; + } + + /* If UNDI resides in memory, manually increment it */ + if (prev_undi) { + bnx2x_prev_unload_undi_inc(sc, SC_PORT(sc), 1); + } + + DELAY(10); + } + + if (!timer_count) { + PMD_DRV_LOG(NOTICE, "Failed to empty BRB"); + } + } + + /* No packets are in the pipeline, path is ready for reset */ + bnx2x_reset_common(sc); + + if (mac_vals.xmac_addr) { + REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); + } + if (mac_vals.umac_addr) { + REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); + } + if (mac_vals.emac_addr) { + REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); + } + if (mac_vals.bmac_addr) { + REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); + REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); + } + + rc = bnx2x_prev_mark_path(sc, prev_undi); + if (rc) { + bnx2x_prev_mcp_done(sc); + return rc; + } + + return bnx2x_prev_mcp_done(sc); +} + +static int bnx2x_prev_unload_uncommon(struct bnx2x_softc *sc) +{ + int rc; + + /* Test if previous unload process was already finished for this path */ + if (bnx2x_prev_is_path_marked(sc)) { + return bnx2x_prev_mcp_done(sc); + } + + /* + * If function has FLR capabilities, and existing FW version matches + * the one required, then FLR will be sufficient to clean any residue + * left by previous driver + */ + rc = bnx2x_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); + if (!rc) { + /* fw version is good */ + rc = bnx2x_do_flr(sc); + } + + if (!rc) { + /* FLR was performed */ + return 0; + } + + PMD_DRV_LOG(INFO, "Could not FLR"); + + /* Close the MCP request, return failure */ + rc = bnx2x_prev_mcp_done(sc); + if (!rc) { + rc = BNX2X_PREV_WAIT_NEEDED; + } + + return rc; +} + +static int bnx2x_prev_unload(struct bnx2x_softc *sc) +{ + int time_counter = 10; + uint32_t fw, hw_lock_reg, hw_lock_val; + uint32_t rc = 0; + + /* + * Clear HW from errors which may have resulted from an interrupted + * DMAE transaction. + */ + bnx2x_prev_interrupted_dmae(sc); + + /* Release previously held locks */ + if (SC_FUNC(sc) <= 5) + hw_lock_reg = (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8); + else + hw_lock_reg = + (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); + + hw_lock_val = (REG_RD(sc, hw_lock_reg)); + if (hw_lock_val) { + if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { + REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, + (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); + } + REG_WR(sc, hw_lock_reg, 0xffffffff); + } + + if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { + REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); + } + + do { + /* Lock MCP using an unload request */ + fw = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); + if (!fw) { + PMD_DRV_LOG(NOTICE, "MCP response failure, aborting"); + rc = -1; + break; + } + + if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { + rc = bnx2x_prev_unload_common(sc); + break; + } + + /* non-common reply from MCP might require looping */ + rc = bnx2x_prev_unload_uncommon(sc); + if (rc != BNX2X_PREV_WAIT_NEEDED) { + break; + } + + DELAY(20000); + } while (--time_counter); + + if (!time_counter || rc) { + PMD_DRV_LOG(NOTICE, "Failed to unload previous driver!"); + rc = -1; + } + + return rc; +} + +static void +bnx2x_dcbx_set_state(struct bnx2x_softc *sc, uint8_t dcb_on, uint32_t dcbx_enabled) +{ + if (!CHIP_IS_E1x(sc)) { + sc->dcb_state = dcb_on; + sc->dcbx_enabled = dcbx_enabled; + } else { + sc->dcb_state = FALSE; + sc->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID; + } + PMD_DRV_LOG(DEBUG, + "DCB state [%s:%s]", + dcb_on ? "ON" : "OFF", + (dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ? "user-mode" : + (dcbx_enabled == + BNX2X_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" + : (dcbx_enabled == + BNX2X_DCBX_ENABLED_ON_NEG_ON) ? + "on-chip with negotiation" : "invalid"); +} + +static int bnx2x_set_qm_cid_count(struct bnx2x_softc *sc) +{ + int cid_count = BNX2X_L2_MAX_CID(sc); + + if (CNIC_SUPPORT(sc)) { + cid_count += CNIC_CID_MAX; + } + + return roundup(cid_count, QM_CID_ROUND); +} + +static void bnx2x_init_multi_cos(struct bnx2x_softc *sc) +{ + int pri, cos; + + uint32_t pri_map = 0; + + for (pri = 0; pri < BNX2X_MAX_PRIORITY; pri++) { + cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); + if (cos < sc->max_cos) { + sc->prio_to_cos[pri] = cos; + } else { + PMD_DRV_LOG(WARNING, + "Invalid COS %d for priority %d " + "(max COS is %d), setting to 0", cos, pri, + (sc->max_cos - 1)); + sc->prio_to_cos[pri] = 0; + } + } +} + +static int bnx2x_pci_get_caps(struct bnx2x_softc *sc) +{ + struct { + uint8_t id; + uint8_t next; + } pci_cap; + uint16_t status; + struct bnx2x_pci_cap *cap; + + cap = sc->pci_caps = rte_zmalloc("caps", sizeof(struct bnx2x_pci_cap), + RTE_CACHE_LINE_SIZE); + if (!cap) { + PMD_DRV_LOG(NOTICE, "Failed to allocate memory"); + return -ENOMEM; + } + +#ifndef __FreeBSD__ + pci_read(sc, PCI_STATUS, &status, 2); + if (!(status & PCI_STATUS_CAP_LIST)) { +#else + pci_read(sc, PCIR_STATUS, &status, 2); + if (!(status & PCIM_STATUS_CAPPRESENT)) { +#endif + PMD_DRV_LOG(NOTICE, "PCIe capability reading failed"); + return -1; + } + +#ifndef __FreeBSD__ + pci_read(sc, PCI_CAPABILITY_LIST, &pci_cap.next, 1); +#else + pci_read(sc, PCIR_CAP_PTR, &pci_cap.next, 1); +#endif + while (pci_cap.next) { + cap->addr = pci_cap.next & ~3; + pci_read(sc, pci_cap.next & ~3, &pci_cap, 2); + if (pci_cap.id == 0xff) + break; + cap->id = pci_cap.id; + cap->type = BNX2X_PCI_CAP; + cap->next = rte_zmalloc("pci_cap", + sizeof(struct bnx2x_pci_cap), + RTE_CACHE_LINE_SIZE); + if (!cap->next) { + PMD_DRV_LOG(NOTICE, "Failed to allocate memory"); + return -ENOMEM; + } + cap = cap->next; + } + + return 0; +} + +static void bnx2x_init_rte(struct bnx2x_softc *sc) +{ + if (IS_VF(sc)) { + sc->max_tx_queues = BNX2X_VF_MAX_QUEUES_PER_VF; + sc->max_rx_queues = BNX2X_VF_MAX_QUEUES_PER_VF; + } else { + sc->max_tx_queues = 128; + sc->max_rx_queues = 128; + } +} + +#define FW_HEADER_LEN 104 +#define FW_NAME_57711 "/lib/firmware/bnx2x/bnx2x-e1h-7.2.51.0.fw" +#define FW_NAME_57810 "/lib/firmware/bnx2x/bnx2x-e2-7.2.51.0.fw" + +void bnx2x_load_firmware(struct bnx2x_softc *sc) +{ + const char *fwname; + int f; + struct stat st; + + fwname = sc->devinfo.device_id == BNX2X_DEV_ID_57711 + ? FW_NAME_57711 : FW_NAME_57810; + f = open(fwname, O_RDONLY); + if (f < 0) { + PMD_DRV_LOG(NOTICE, "Can't open firmware file"); + return; + } + + if (fstat(f, &st) < 0) { + PMD_DRV_LOG(NOTICE, "Can't stat firmware file"); + close(f); + return; + } + + sc->firmware = rte_zmalloc("bnx2x_fw", st.st_size, RTE_CACHE_LINE_SIZE); + if (!sc->firmware) { + PMD_DRV_LOG(NOTICE, "Can't allocate memory for firmware"); + close(f); + return; + } + + if (read(f, sc->firmware, st.st_size) != st.st_size) { + PMD_DRV_LOG(NOTICE, "Can't read firmware data"); + close(f); + return; + } + close(f); + + sc->fw_len = st.st_size; + if (sc->fw_len < FW_HEADER_LEN) { + PMD_DRV_LOG(NOTICE, "Invalid fw size: %" PRIu64, sc->fw_len); + return; + } + PMD_DRV_LOG(DEBUG, "fw_len = %" PRIu64, sc->fw_len); +} + +static void +bnx2x_data_to_init_ops(uint8_t * data, struct raw_op *dst, uint32_t len) +{ + uint32_t *src = (uint32_t *) data; + uint32_t i, j, tmp; + + for (i = 0, j = 0; i < len / 8; ++i, j += 2) { + tmp = rte_be_to_cpu_32(src[j]); + dst[i].op = (tmp >> 24) & 0xFF; + dst[i].offset = tmp & 0xFFFFFF; + dst[i].raw_data = rte_be_to_cpu_32(src[j + 1]); + } +} + +static void +bnx2x_data_to_init_offsets(uint8_t * data, uint16_t * dst, uint32_t len) +{ + uint16_t *src = (uint16_t *) data; + uint32_t i; + + for (i = 0; i < len / 2; ++i) + dst[i] = rte_be_to_cpu_16(src[i]); +} + +static void bnx2x_data_to_init_data(uint8_t * data, uint32_t * dst, uint32_t len) +{ + uint32_t *src = (uint32_t *) data; + uint32_t i; + + for (i = 0; i < len / 4; ++i) + dst[i] = rte_be_to_cpu_32(src[i]); +} + +static void bnx2x_data_to_iro_array(uint8_t * data, struct iro *dst, uint32_t len) +{ + uint32_t *src = (uint32_t *) data; + uint32_t i, j, tmp; + + for (i = 0, j = 0; i < len / sizeof(struct iro); ++i, ++j) { + dst[i].base = rte_be_to_cpu_32(src[j++]); + tmp = rte_be_to_cpu_32(src[j]); + dst[i].m1 = (tmp >> 16) & 0xFFFF; + dst[i].m2 = tmp & 0xFFFF; + ++j; + tmp = rte_be_to_cpu_32(src[j]); + dst[i].m3 = (tmp >> 16) & 0xFFFF; + dst[i].size = tmp & 0xFFFF; + } +} + +/* +* Device attach function. +* +* Allocates device resources, performs secondary chip identification, and +* initializes driver instance variables. This function is called from driver +* load after a successful probe. +* +* Returns: +* 0 = Success, >0 = Failure +*/ +int bnx2x_attach(struct bnx2x_softc *sc) +{ + int rc; + + PMD_DRV_LOG(DEBUG, "Starting attach..."); + + rc = bnx2x_pci_get_caps(sc); + if (rc) { + PMD_DRV_LOG(NOTICE, "PCIe caps reading was failed"); + return rc; + } + + sc->state = BNX2X_STATE_CLOSED; + + /* Init RTE stuff */ + bnx2x_init_rte(sc); + + pci_write_long(sc, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); + + sc->igu_base_addr = IS_VF(sc) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; + + /* get PCI capabilites */ + bnx2x_probe_pci_caps(sc); + + if (sc->devinfo.pcie_msix_cap_reg != 0) { + uint32_t val; + pci_read(sc, + (sc->devinfo.pcie_msix_cap_reg + PCIR_MSIX_CTRL), &val, + 2); + sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); + } else { + sc->igu_sb_cnt = 1; + } + + if (IS_PF(sc)) { +/* get device info and set params */ + if (bnx2x_get_device_info(sc) != 0) { + PMD_DRV_LOG(NOTICE, "getting device info"); + return -ENXIO; + } + +/* get phy settings from shmem and 'and' against admin settings */ + bnx2x_get_phy_info(sc); + } else { +/* Left mac of VF unfilled, PF should set it for VF */ + memset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN); + } + + sc->wol = 0; + + /* set the default MTU (changed via ifconfig) */ + sc->mtu = ETHER_MTU; + + bnx2x_set_modes_bitmap(sc); + + /* need to reset chip if UNDI was active */ + if (IS_PF(sc) && !BNX2X_NOMCP(sc)) { +/* init fw_seq */ + sc->fw_seq = + (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + bnx2x_prev_unload(sc); + } + + bnx2x_dcbx_set_state(sc, FALSE, BNX2X_DCBX_ENABLED_OFF); + + /* calculate qm_cid_count */ + sc->qm_cid_count = bnx2x_set_qm_cid_count(sc); + + sc->max_cos = 1; + bnx2x_init_multi_cos(sc); + + return 0; +} + +static void +bnx2x_igu_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t segment, + uint16_t index, uint8_t op, uint8_t update) +{ + uint32_t igu_addr = sc->igu_base_addr; + igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; + bnx2x_igu_ack_sb_gen(sc, segment, index, op, update, igu_addr); +} + +static void +bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t storm, + uint16_t index, uint8_t op, uint8_t update) +{ + if (unlikely(sc->devinfo.int_block == INT_BLOCK_HC)) + bnx2x_hc_ack_sb(sc, igu_sb_id, storm, index, op, update); + else { + uint8_t segment; + if (CHIP_INT_MODE_IS_BC(sc)) { + segment = storm; + } else if (igu_sb_id != sc->igu_dsb_id) { + segment = IGU_SEG_ACCESS_DEF; + } else if (storm == ATTENTION_ID) { + segment = IGU_SEG_ACCESS_ATTN; + } else { + segment = IGU_SEG_ACCESS_DEF; + } + bnx2x_igu_ack_sb(sc, igu_sb_id, segment, index, op, update); + } +} + +static void +bnx2x_igu_clear_sb_gen(struct bnx2x_softc *sc, uint8_t func, uint8_t idu_sb_id, + uint8_t is_pf) +{ + uint32_t data, ctl, cnt = 100; + uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; + uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; + uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + + (idu_sb_id / 32) * 4; + uint32_t sb_bit = 1 << (idu_sb_id % 32); + uint32_t func_encode = func | + (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; + uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; + + /* Not supported in BC mode */ + if (CHIP_INT_MODE_IS_BC(sc)) { + return; + } + + data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << + IGU_REGULAR_CLEANUP_TYPE_SHIFT) | + IGU_REGULAR_CLEANUP_SET | IGU_REGULAR_BCLEANUP); + + ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | + (func_encode << IGU_CTRL_REG_FID_SHIFT) | + (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); + + REG_WR(sc, igu_addr_data, data); + + mb(); + + PMD_DRV_LOG(DEBUG, "write 0x%08x to IGU(via GRC) addr 0x%x", + ctl, igu_addr_ctl); + REG_WR(sc, igu_addr_ctl, ctl); + + mb(); + + /* wait for clean up to finish */ + while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { + DELAY(20000); + } + + if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { + PMD_DRV_LOG(DEBUG, + "Unable to finish IGU cleanup: " + "idu_sb_id %d offset %d bit %d (cnt %d)", + idu_sb_id, idu_sb_id / 32, idu_sb_id % 32, cnt); + } +} + +static void bnx2x_igu_clear_sb(struct bnx2x_softc *sc, uint8_t idu_sb_id) +{ + bnx2x_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); +} + +/*******************/ +/* ECORE CALLBACKS */ +/*******************/ + +static void bnx2x_reset_common(struct bnx2x_softc *sc) +{ + uint32_t val = 0x1400; + + PMD_INIT_FUNC_TRACE(); + + /* reset_common */ + REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), + 0xd3ffff7f); + + if (CHIP_IS_E3(sc)) { + val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + + REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); +} + +static void bnx2x_common_init_phy(struct bnx2x_softc *sc) +{ + uint32_t shmem_base[2]; + uint32_t shmem2_base[2]; + + /* Avoid common init in case MFW supports LFA */ + if (SHMEM2_RD(sc, size) > + (uint32_t) offsetof(struct shmem2_region, + lfa_host_addr[SC_PORT(sc)])) { + return; + } + + shmem_base[0] = sc->devinfo.shmem_base; + shmem2_base[0] = sc->devinfo.shmem2_base; + + if (!CHIP_IS_E1x(sc)) { + shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); + shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); + } + + elink_common_init_phy(sc, shmem_base, shmem2_base, + sc->devinfo.chip_id, 0); +} + +static void bnx2x_pf_disable(struct bnx2x_softc *sc) +{ + uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); + + val &= ~IGU_PF_CONF_FUNC_EN; + + REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); +} + +static void bnx2x_init_pxp(struct bnx2x_softc *sc) +{ + uint16_t devctl; + int r_order, w_order; + + devctl = bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL); + + w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); + r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); + + ecore_init_pxp_arb(sc, r_order, w_order); +} + +static uint32_t bnx2x_get_pretend_reg(struct bnx2x_softc *sc) +{ + uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; + uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); + return base + (SC_ABS_FUNC(sc)) * stride; +} + +/* + * Called only on E1H or E2. + * When pretending to be PF, the pretend value is the function number 0..7. + * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID + * combination. + */ +static int bnx2x_pretend_func(struct bnx2x_softc *sc, uint16_t pretend_func_val) +{ + uint32_t pretend_reg; + + if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) + return -1; + + /* get my own pretend register */ + pretend_reg = bnx2x_get_pretend_reg(sc); + REG_WR(sc, pretend_reg, pretend_func_val); + REG_RD(sc, pretend_reg); + return 0; +} + +static void bnx2x_setup_fan_failure_detection(struct bnx2x_softc *sc) +{ + int is_required; + uint32_t val; + int port; + + is_required = 0; + val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & + SHARED_HW_CFG_FAN_FAILURE_MASK); + + if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { + is_required = 1; + } + /* + * The fan failure mechanism is usually related to the PHY type since + * the power consumption of the board is affected by the PHY. Currently, + * fan is required for most designs with SFX7101, BNX2X8727 and BNX2X8481. + */ + else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { + for (port = PORT_0; port < PORT_MAX; port++) { + is_required |= elink_fan_failure_det_req(sc, + sc-> + devinfo.shmem_base, + sc-> + devinfo.shmem2_base, + port); + } + } + + if (is_required == 0) { + return; + } + + /* Fan failure is indicated by SPIO 5 */ + bnx2x_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); + + /* set to active low mode */ + val = REG_RD(sc, MISC_REG_SPIO_INT); + val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); + REG_WR(sc, MISC_REG_SPIO_INT, val); + + /* enable interrupt to signal the IGU */ + val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); + val |= MISC_SPIO_SPIO5; + REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); +} + +static void bnx2x_enable_blocks_attention(struct bnx2x_softc *sc) +{ + uint32_t val; + + REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); + } else { + REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); + } + REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); + REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); + /* + * mask read length error interrupts in brb for parser + * (parsing unit and 'checksum and crc' unit) + * these errors are legal (PU reads fixed length and CAC can cause + * read length error on truncated packets) + */ + REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); + REG_WR(sc, QM_REG_QM_INT_MASK, 0); + REG_WR(sc, TM_REG_TM_INT_MASK, 0); + REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); + REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); + REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); + /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ + /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ + REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); + REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); + REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); + /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ + /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ + REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); + REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); + REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); + REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); + /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ + /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ + + val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | + PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | + PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); + if (!CHIP_IS_E1x(sc)) { + val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | + PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); + } + REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); + + REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); + REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); + REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); + /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ + + if (!CHIP_IS_E1x(sc)) { +/* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ + REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); + } + + REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); + REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); + /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ + REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ +} + +/** + * bnx2x_init_hw_common - initialize the HW at the COMMON phase. + * + * @sc: driver handle + */ +static int bnx2x_init_hw_common(struct bnx2x_softc *sc) +{ + uint8_t abs_func_id; + uint32_t val; + + PMD_DRV_LOG(DEBUG, "starting common init for func %d", SC_ABS_FUNC(sc)); + + /* + * take the RESET lock to protect undi_unload flow from accessing + * registers while we are resetting the chip + */ + bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); + + bnx2x_reset_common(sc); + + REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); + + val = 0xfffc; + if (CHIP_IS_E3(sc)) { + val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + + REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); + + bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); + + ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); + + if (!CHIP_IS_E1x(sc)) { +/* + * 4-port mode or 2-port mode we need to turn off master-enable for + * everyone. After that we turn it back on for self. So, we disregard + * multi-function, and always disable all functions on the given path, + * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 + */ + for (abs_func_id = SC_PATH(sc); + abs_func_id < (E2_FUNC_MAX * 2); abs_func_id += 2) { + if (abs_func_id == SC_ABS_FUNC(sc)) { + REG_WR(sc, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, + 1); + continue; + } + + bnx2x_pretend_func(sc, abs_func_id); + + /* clear pf enable */ + bnx2x_pf_disable(sc); + + bnx2x_pretend_func(sc, SC_ABS_FUNC(sc)); + } + } + + ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); + + ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); + bnx2x_init_pxp(sc); + +#ifdef __BIG_ENDIAN + REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); + REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); + REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); + REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); + REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); + /* make sure this value is 0 */ + REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); + + //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); + REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); + REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); + REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); + REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); +#endif + + ecore_ilt_init_page_size(sc, INITOP_SET); + + if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { + REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); + } + + /* let the HW do it's magic... */ + DELAY(100000); + + /* finish PXP init */ + + val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); + if (val != 1) { + PMD_DRV_LOG(NOTICE, "PXP2 CFG failed"); + return -1; + } + val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); + if (val != 1) { + PMD_DRV_LOG(NOTICE, "PXP2 RD_INIT failed"); + return -1; + } + + /* + * Timer bug workaround for E2 only. We need to set the entire ILT to have + * entries with value "0" and valid bit on. This needs to be done by the + * first PF that is loaded in a path (i.e. common phase) + */ + if (!CHIP_IS_E1x(sc)) { +/* + * In E2 there is a bug in the timers block that can cause function 6 / 7 + * (i.e. vnic3) to start even if it is marked as "scan-off". + * This occurs when a different function (func2,3) is being marked + * as "scan-off". Real-life scenario for example: if a driver is being + * load-unloaded while func6,7 are down. This will cause the timer to access + * the ilt, translate to a logical address and send a request to read/write. + * Since the ilt for the function that is down is not valid, this will cause + * a translation error which is unrecoverable. + * The Workaround is intended to make sure that when this happens nothing + * fatal will occur. The workaround: + * 1. First PF driver which loads on a path will: + * a. After taking the chip out of reset, by using pretend, + * it will write "0" to the following registers of + * the other vnics. + * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); + * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); + * And for itself it will write '1' to + * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable + * dmae-operations (writing to pram for example.) + * note: can be done for only function 6,7 but cleaner this + * way. + * b. Write zero+valid to the entire ILT. + * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of + * VNIC3 (of that port). The range allocated will be the + * entire ILT. This is needed to prevent ILT range error. + * 2. Any PF driver load flow: + * a. ILT update with the physical addresses of the allocated + * logical pages. + * b. Wait 20msec. - note that this timeout is needed to make + * sure there are no requests in one of the PXP internal + * queues with "old" ILT addresses. + * c. PF enable in the PGLC. + * d. Clear the was_error of the PF in the PGLC. (could have + * occurred while driver was down) + * e. PF enable in the CFC (WEAK + STRONG) + * f. Timers scan enable + * 3. PF driver unload flow: + * a. Clear the Timers scan_en. + * b. Polling for scan_on=0 for that PF. + * c. Clear the PF enable bit in the PXP. + * d. Clear the PF enable in the CFC (WEAK + STRONG) + * e. Write zero+valid to all ILT entries (The valid bit must + * stay set) + * f. If this is VNIC 3 of a port then also init + * first_timers_ilt_entry to zero and last_timers_ilt_entry + * to the last enrty in the ILT. + * + * Notes: + * Currently the PF error in the PGLC is non recoverable. + * In the future the there will be a recovery routine for this error. + * Currently attention is masked. + * Having an MCP lock on the load/unload process does not guarantee that + * there is no Timer disable during Func6/7 enable. This is because the + * Timers scan is currently being cleared by the MCP on FLR. + * Step 2.d can be done only for PF6/7 and the driver can also check if + * there is error before clearing it. But the flow above is simpler and + * more general. + * All ILT entries are written by zero+valid and not just PF6/7 + * ILT entries since in the future the ILT entries allocation for + * PF-s might be dynamic. + */ + struct ilt_client_info ilt_cli; + struct ecore_ilt ilt; + + memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); + memset(&ilt, 0, sizeof(struct ecore_ilt)); + +/* initialize dummy TM client */ + ilt_cli.start = 0; + ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; + ilt_cli.client_num = ILT_CLIENT_TM; + +/* + * Step 1: set zeroes to all ilt page entries with valid bit on + * Step 2: set the timers first/last ilt entry to point + * to the entire range to prevent ILT range error for 3rd/4th + * vnic (this code assumes existence of the vnic) + * + * both steps performed by call to ecore_ilt_client_init_op() + * with dummy TM client + * + * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT + * and his brother are split registers + */ + + bnx2x_pretend_func(sc, (SC_PATH(sc) + 6)); + ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); + bnx2x_pretend_func(sc, SC_ABS_FUNC(sc)); + + REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); + REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); + REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); + } + + REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); + REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); + + if (!CHIP_IS_E1x(sc)) { + int factor = 0; + + ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); + ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); + +/* let the HW do it's magic... */ + do { + DELAY(200000); + val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); + } while (factor-- && (val != 1)); + + if (val != 1) { + PMD_DRV_LOG(NOTICE, "ATC_INIT failed"); + return -1; + } + } + + ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); + + /* clean the DMAE memory */ + sc->dmae_ready = 1; + ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8); + + ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); + + ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); + + ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); + + ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); + + bnx2x_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); + + ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); + + /* QM queues pointers table */ + ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); + + /* soft reset pulse */ + REG_WR(sc, QM_REG_SOFT_RESET, 1); + REG_WR(sc, QM_REG_SOFT_RESET, 0); + + if (CNIC_SUPPORT(sc)) + ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); + + ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); + REG_WR(sc, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); + + if (!CHIP_REV_IS_SLOW(sc)) { +/* enable hw interrupt from doorbell Q */ + REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); + } + + ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); + + ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); + REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); + REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); + + if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { + if (IS_MF_AFEX(sc)) { + /* + * configure that AFEX and VLAN headers must be + * received in AFEX mode + */ + REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); + REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); + REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); + REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); + REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); + } else { + /* + * Bit-map indicating which L2 hdrs may appear + * after the basic Ethernet header + */ + REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, + sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); + } + } + + ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); + ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); + ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); + ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); + + if (!CHIP_IS_E1x(sc)) { +/* reset VFC memories */ + REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, + VFC_MEMORIES_RST_REG_CAM_RST | + VFC_MEMORIES_RST_REG_RAM_RST); + REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, + VFC_MEMORIES_RST_REG_CAM_RST | + VFC_MEMORIES_RST_REG_RAM_RST); + + DELAY(20000); + } + + ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); + ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); + ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); + ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); + + /* sync semi rtc */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000); + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000); + + ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); + ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); + ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); + + if (!CHIP_IS_E1x(sc)) { + if (IS_MF_AFEX(sc)) { + /* + * configure that AFEX and VLAN headers must be + * sent in AFEX mode + */ + REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); + REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); + REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); + REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); + REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); + } else { + REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, + sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); + } + } + + REG_WR(sc, SRC_REG_SOFT_RST, 1); + + ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); + + if (CNIC_SUPPORT(sc)) { + REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); + REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); + REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); + REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); + REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); + REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); + REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); + REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); + REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); + REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); + } + REG_WR(sc, SRC_REG_SOFT_RST, 0); + + if (sizeof(union cdu_context) != 1024) { +/* we currently assume that a context is 1024 bytes */ + PMD_DRV_LOG(NOTICE, + "please adjust the size of cdu_context(%ld)", + (long)sizeof(union cdu_context)); + } + + ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); + val = (4 << 24) + (0 << 12) + 1024; + REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); + + ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); + + REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); + /* enable context validation interrupt from CFC */ + REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); + + /* set the thresholds to prevent CFC/CDU race */ + REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); + ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); + + if (!CHIP_IS_E1x(sc) && BNX2X_NOMCP(sc)) { + REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); + } + + ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); + ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); + + /* Reset PCIE errors for debug */ + REG_WR(sc, 0x2814, 0xffffffff); + REG_WR(sc, 0x3820, 0xffffffff); + + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, + (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | + PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); + REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, + (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | + PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | + PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); + REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, + (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | + PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | + PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); + } + + ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); + + /* in E3 this done in per-port section */ + if (!CHIP_IS_E3(sc)) + REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); + + if (CHIP_IS_E1H(sc)) { +/* not applicable for E2 (and above ...) */ + REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); + } + + if (CHIP_REV_IS_SLOW(sc)) { + DELAY(200000); + } + + /* finish CFC init */ + val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); + if (val != 1) { + PMD_DRV_LOG(NOTICE, "CFC LL_INIT failed"); + return -1; + } + val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); + if (val != 1) { + PMD_DRV_LOG(NOTICE, "CFC AC_INIT failed"); + return -1; + } + val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); + if (val != 1) { + PMD_DRV_LOG(NOTICE, "CFC CAM_INIT failed"); + return -1; + } + REG_WR(sc, CFC_REG_DEBUG0, 0); + + bnx2x_setup_fan_failure_detection(sc); + + /* clear PXP2 attentions */ + REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); + + bnx2x_enable_blocks_attention(sc); + + if (!CHIP_REV_IS_SLOW(sc)) { + ecore_enable_blocks_parity(sc); + } + + if (!BNX2X_NOMCP(sc)) { + if (CHIP_IS_E1x(sc)) { + bnx2x_common_init_phy(sc); + } + } + + return 0; +} + +/** + * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. + * + * @sc: driver handle + */ +static int bnx2x_init_hw_common_chip(struct bnx2x_softc *sc) +{ + int rc = bnx2x_init_hw_common(sc); + + if (rc) { + return rc; + } + + /* In E2 2-PORT mode, same ext phy is used for the two paths */ + if (!BNX2X_NOMCP(sc)) { + bnx2x_common_init_phy(sc); + } + + return 0; +} + +static int bnx2x_init_hw_port(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; + uint32_t low, high; + uint32_t val; + + PMD_DRV_LOG(DEBUG, "starting port init for port %d", port); + + REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0); + + ecore_init_block(sc, BLOCK_MISC, init_phase); + ecore_init_block(sc, BLOCK_PXP, init_phase); + ecore_init_block(sc, BLOCK_PXP2, init_phase); + + /* + * Timers bug workaround: disables the pf_master bit in pglue at + * common phase, we need to enable it here before any dmae access are + * attempted. Therefore we manually added the enable-master to the + * port phase (it also happens in the function phase) + */ + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); + } + + ecore_init_block(sc, BLOCK_ATC, init_phase); + ecore_init_block(sc, BLOCK_DMAE, init_phase); + ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); + ecore_init_block(sc, BLOCK_QM, init_phase); + + ecore_init_block(sc, BLOCK_TCM, init_phase); + ecore_init_block(sc, BLOCK_UCM, init_phase); + ecore_init_block(sc, BLOCK_CCM, init_phase); + ecore_init_block(sc, BLOCK_XCM, init_phase); + + /* QM cid (connection) count */ + ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); + + if (CNIC_SUPPORT(sc)) { + ecore_init_block(sc, BLOCK_TM, init_phase); + REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port * 4, 20); + REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port * 4, 31); + } + + ecore_init_block(sc, BLOCK_DORQ, init_phase); + + ecore_init_block(sc, BLOCK_BRB1, init_phase); + + if (CHIP_IS_E1H(sc)) { + if (IS_MF(sc)) { + low = (BNX2X_ONE_PORT(sc) ? 160 : 246); + } else if (sc->mtu > 4096) { + if (BNX2X_ONE_PORT(sc)) { + low = 160; + } else { + val = sc->mtu; + /* (24*1024 + val*4)/256 */ + low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); + } + } else { + low = (BNX2X_ONE_PORT(sc) ? 80 : 160); + } + high = (low + 56); /* 14*1024/256 */ + REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port * 4, low); + REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port * 4, high); + } + + if (CHIP_IS_MODE_4_PORT(sc)) { + REG_WR(sc, SC_PORT(sc) ? + BRB1_REG_MAC_GUARANTIED_1 : + BRB1_REG_MAC_GUARANTIED_0, 40); + } + + ecore_init_block(sc, BLOCK_PRS, init_phase); + if (CHIP_IS_E3B0(sc)) { + if (IS_MF_AFEX(sc)) { + /* configure headers for AFEX mode */ + if (SC_PORT(sc)) { + REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC_PORT_1, + 0xE); + REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0_PORT_1, + 0x6); + REG_WR(sc, PRS_REG_MUST_HAVE_HDRS_PORT_1, 0xA); + } else { + REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC_PORT_0, + 0xE); + REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0_PORT_0, + 0x6); + REG_WR(sc, PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); + } + } else { + /* Ovlan exists only if we are in multi-function + + * switch-dependent mode, in switch-independent there + * is no ovlan headers + */ + REG_WR(sc, SC_PORT(sc) ? + PRS_REG_HDRS_AFTER_BASIC_PORT_1 : + PRS_REG_HDRS_AFTER_BASIC_PORT_0, + (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); + } + } + + ecore_init_block(sc, BLOCK_TSDM, init_phase); + ecore_init_block(sc, BLOCK_CSDM, init_phase); + ecore_init_block(sc, BLOCK_USDM, init_phase); + ecore_init_block(sc, BLOCK_XSDM, init_phase); + + ecore_init_block(sc, BLOCK_TSEM, init_phase); + ecore_init_block(sc, BLOCK_USEM, init_phase); + ecore_init_block(sc, BLOCK_CSEM, init_phase); + ecore_init_block(sc, BLOCK_XSEM, init_phase); + + ecore_init_block(sc, BLOCK_UPB, init_phase); + ecore_init_block(sc, BLOCK_XPB, init_phase); + + ecore_init_block(sc, BLOCK_PBF, init_phase); + + if (CHIP_IS_E1x(sc)) { +/* configure PBF to work without PAUSE mtu 9000 */ + REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 0); + +/* update threshold */ + REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, (9040 / 16)); +/* update init credit */ + REG_WR(sc, PBF_REG_P0_INIT_CRD + port * 4, + (9040 / 16) + 553 - 22); + +/* probe changes */ + REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 1); + DELAY(50); + REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0); + } + + if (CNIC_SUPPORT(sc)) { + ecore_init_block(sc, BLOCK_SRC, init_phase); + } + + ecore_init_block(sc, BLOCK_CDU, init_phase); + ecore_init_block(sc, BLOCK_CFC, init_phase); + ecore_init_block(sc, BLOCK_HC, init_phase); + ecore_init_block(sc, BLOCK_IGU, init_phase); + ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); + /* init aeu_mask_attn_func_0/1: + * - SF mode: bits 3-7 are masked. only bits 0-2 are in use + * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF + * bits 4-7 are used for "per vn group attention" */ + val = IS_MF(sc) ? 0xF7 : 0x7; + val |= 0x10; + REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4, val); + + ecore_init_block(sc, BLOCK_NIG, init_phase); + + if (!CHIP_IS_E1x(sc)) { +/* Bit-map indicating which L2 hdrs may appear after the + * basic Ethernet header + */ + if (IS_MF_AFEX(sc)) { + REG_WR(sc, SC_PORT(sc) ? + NIG_REG_P1_HDRS_AFTER_BASIC : + NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); + } else { + REG_WR(sc, SC_PORT(sc) ? + NIG_REG_P1_HDRS_AFTER_BASIC : + NIG_REG_P0_HDRS_AFTER_BASIC, + IS_MF_SD(sc) ? 7 : 6); + } + + if (CHIP_IS_E3(sc)) { + REG_WR(sc, SC_PORT(sc) ? + NIG_REG_LLH1_MF_MODE : + NIG_REG_LLH_MF_MODE, IS_MF(sc)); + } + } + if (!CHIP_IS_E3(sc)) { + REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1); + } + + /* 0x2 disable mf_ov, 0x1 enable */ + REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port * 4, + (IS_MF_SD(sc) ? 0x1 : 0x2)); + + if (!CHIP_IS_E1x(sc)) { + val = 0; + switch (sc->devinfo.mf_info.mf_mode) { + case MULTI_FUNCTION_SD: + val = 1; + break; + case MULTI_FUNCTION_SI: + case MULTI_FUNCTION_AFEX: + val = 2; + break; + } + + REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : + NIG_REG_LLH0_CLS_TYPE), val); + } + REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port * 4, 0); + REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port * 4, 0); + REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port * 4, 1); + + /* If SPIO5 is set to generate interrupts, enable it for this port */ + val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); + if (val & MISC_SPIO_SPIO5) { + uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + val = REG_RD(sc, reg_addr); + val |= AEU_INPUTS_ATTN_BITS_SPIO5; + REG_WR(sc, reg_addr, val); + } + + return 0; +} + +static uint32_t +bnx2x_flr_clnup_reg_poll(struct bnx2x_softc *sc, uint32_t reg, + uint32_t expected, uint32_t poll_count) +{ + uint32_t cur_cnt = poll_count; + uint32_t val; + + while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { + DELAY(FLR_WAIT_INTERVAL); + } + + return val; +} + +static int +bnx2x_flr_clnup_poll_hw_counter(struct bnx2x_softc *sc, uint32_t reg, + __rte_unused const char *msg, uint32_t poll_cnt) +{ + uint32_t val = bnx2x_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); + + if (val != 0) { + PMD_DRV_LOG(NOTICE, "%s usage count=%d", msg, val); + return -1; + } + + return 0; +} + +/* Common routines with VF FLR cleanup */ +static uint32_t bnx2x_flr_clnup_poll_count(struct bnx2x_softc *sc) +{ + /* adjust polling timeout */ + if (CHIP_REV_IS_EMUL(sc)) { + return FLR_POLL_CNT * 2000; + } + + if (CHIP_REV_IS_FPGA(sc)) { + return FLR_POLL_CNT * 120; + } + + return FLR_POLL_CNT; +} + +static int bnx2x_poll_hw_usage_counters(struct bnx2x_softc *sc, uint32_t poll_cnt) +{ + /* wait for CFC PF usage-counter to zero (includes all the VFs) */ + if (bnx2x_flr_clnup_poll_hw_counter(sc, + CFC_REG_NUM_LCIDS_INSIDE_PF, + "CFC PF usage counter timed out", + poll_cnt)) { + return -1; + } + + /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(sc, + DORQ_REG_PF_USAGE_CNT, + "DQ PF usage counter timed out", + poll_cnt)) { + return -1; + } + + /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(sc, + QM_REG_PF_USG_CNT_0 + 4 * SC_FUNC(sc), + "QM PF usage counter timed out", + poll_cnt)) { + return -1; + } + + /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(sc, + TM_REG_LIN0_VNIC_UC + 4 * SC_PORT(sc), + "Timers VNIC usage counter timed out", + poll_cnt)) { + return -1; + } + + if (bnx2x_flr_clnup_poll_hw_counter(sc, + TM_REG_LIN0_NUM_SCANS + + 4 * SC_PORT(sc), + "Timers NUM_SCANS usage counter timed out", + poll_cnt)) { + return -1; + } + + /* Wait DMAE PF usage counter to zero */ + if (bnx2x_flr_clnup_poll_hw_counter(sc, + dmae_reg_go_c[INIT_DMAE_C(sc)], + "DMAE dommand register timed out", + poll_cnt)) { + return -1; + } + + return 0; +} + +#define OP_GEN_PARAM(param) \ + (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) +#define OP_GEN_TYPE(type) \ + (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) +#define OP_GEN_AGG_VECT(index) \ + (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) + +static int +bnx2x_send_final_clnup(struct bnx2x_softc *sc, uint8_t clnup_func, + uint32_t poll_cnt) +{ + uint32_t op_gen_command = 0; + uint32_t comp_addr = (BAR_CSTRORM_INTMEM + + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); + int ret = 0; + + if (REG_RD(sc, comp_addr)) { + PMD_DRV_LOG(NOTICE, + "Cleanup complete was not 0 before sending"); + return -1; + } + + op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); + op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); + op_gen_command |= OP_GEN_AGG_VECT(clnup_func); + op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; + + REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); + + if (bnx2x_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { + PMD_DRV_LOG(NOTICE, "FW final cleanup did not succeed"); + PMD_DRV_LOG(DEBUG, "At timeout completion address contained %x", + (REG_RD(sc, comp_addr))); + rte_panic("FLR cleanup failed"); + return -1; + } + + /* Zero completion for nxt FLR */ + REG_WR(sc, comp_addr, 0); + + return ret; +} + +static void +bnx2x_pbf_pN_buf_flushed(struct bnx2x_softc *sc, struct pbf_pN_buf_regs *regs, + uint32_t poll_count) +{ + uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; + uint32_t cur_cnt = poll_count; + + crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); + crd = crd_start = REG_RD(sc, regs->crd); + init_crd = REG_RD(sc, regs->init_crd); + + while ((crd != init_crd) && + ((uint32_t) ((int32_t) crd_freed - (int32_t) crd_freed_start) < + (init_crd - crd_start))) { + if (cur_cnt--) { + DELAY(FLR_WAIT_INTERVAL); + crd = REG_RD(sc, regs->crd); + crd_freed = REG_RD(sc, regs->crd_freed); + } else { + break; + } + } +} + +static void +bnx2x_pbf_pN_cmd_flushed(struct bnx2x_softc *sc, struct pbf_pN_cmd_regs *regs, + uint32_t poll_count) +{ + uint32_t occup, to_free, freed, freed_start; + uint32_t cur_cnt = poll_count; + + occup = to_free = REG_RD(sc, regs->lines_occup); + freed = freed_start = REG_RD(sc, regs->lines_freed); + + while (occup && + ((uint32_t) ((int32_t) freed - (int32_t) freed_start) < + to_free)) { + if (cur_cnt--) { + DELAY(FLR_WAIT_INTERVAL); + occup = REG_RD(sc, regs->lines_occup); + freed = REG_RD(sc, regs->lines_freed); + } else { + break; + } + } +} + +static void bnx2x_tx_hw_flushed(struct bnx2x_softc *sc, uint32_t poll_count) +{ + struct pbf_pN_cmd_regs cmd_regs[] = { + {0, (CHIP_IS_E3B0(sc)) ? + PBF_REG_TQ_OCCUPANCY_Q0 : PBF_REG_P0_TQ_OCCUPANCY, + (CHIP_IS_E3B0(sc)) ? + PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT}, + {1, (CHIP_IS_E3B0(sc)) ? + PBF_REG_TQ_OCCUPANCY_Q1 : PBF_REG_P1_TQ_OCCUPANCY, + (CHIP_IS_E3B0(sc)) ? + PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT}, + {4, (CHIP_IS_E3B0(sc)) ? + PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY, + (CHIP_IS_E3B0(sc)) ? + PBF_REG_TQ_LINES_FREED_CNT_LB_Q : + PBF_REG_P4_TQ_LINES_FREED_CNT} + }; + + struct pbf_pN_buf_regs buf_regs[] = { + {0, (CHIP_IS_E3B0(sc)) ? + PBF_REG_INIT_CRD_Q0 : PBF_REG_P0_INIT_CRD, + (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q0 : PBF_REG_P0_CREDIT, + (CHIP_IS_E3B0(sc)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : + PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, + {1, (CHIP_IS_E3B0(sc)) ? + PBF_REG_INIT_CRD_Q1 : PBF_REG_P1_INIT_CRD, + (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q1 : PBF_REG_P1_CREDIT, + (CHIP_IS_E3B0(sc)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : + PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, + {4, (CHIP_IS_E3B0(sc)) ? + PBF_REG_INIT_CRD_LB_Q : PBF_REG_P4_INIT_CRD, + (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_LB_Q : PBF_REG_P4_CREDIT, + (CHIP_IS_E3B0(sc)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : + PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, + }; + + uint32_t i; + + /* Verify the command queues are flushed P0, P1, P4 */ + for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { + bnx2x_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); + } + + /* Verify the transmission buffers are flushed P0, P1, P4 */ + for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { + bnx2x_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); + } +} + +static void bnx2x_hw_enable_status(struct bnx2x_softc *sc) +{ + __rte_unused uint32_t val; + + val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); + PMD_DRV_LOG(DEBUG, "CFC_REG_WEAK_ENABLE_PF is 0x%x", val); + + val = REG_RD(sc, PBF_REG_DISABLE_PF); + PMD_DRV_LOG(DEBUG, "PBF_REG_DISABLE_PF is 0x%x", val); + + val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); + PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSI_EN is 0x%x", val); + + val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); + PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSIX_EN is 0x%x", val); + + val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); + PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x", val); + + val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); + PMD_DRV_LOG(DEBUG, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x", val); + + val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); + PMD_DRV_LOG(DEBUG, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x", val); + + val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); + PMD_DRV_LOG(DEBUG, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x", + val); +} + +/** + * bnx2x_pf_flr_clnup + * a. re-enable target read on the PF + * b. poll cfc per function usgae counter + * c. poll the qm perfunction usage counter + * d. poll the tm per function usage counter + * e. poll the tm per function scan-done indication + * f. clear the dmae channel associated wit hthe PF + * g. zero the igu 'trailing edge' and 'leading edge' regs (attentions) + * h. call the common flr cleanup code with -1 (pf indication) + */ +static int bnx2x_pf_flr_clnup(struct bnx2x_softc *sc) +{ + uint32_t poll_cnt = bnx2x_flr_clnup_poll_count(sc); + + /* Re-enable PF target read access */ + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); + + /* Poll HW usage counters */ + if (bnx2x_poll_hw_usage_counters(sc, poll_cnt)) { + return -1; + } + + /* Zero the igu 'trailing edge' and 'leading edge' */ + + /* Send the FW cleanup command */ + if (bnx2x_send_final_clnup(sc, (uint8_t) SC_FUNC(sc), poll_cnt)) { + return -1; + } + + /* ATC cleanup */ + + /* Verify TX hw is flushed */ + bnx2x_tx_hw_flushed(sc, poll_cnt); + + /* Wait 100ms (not adjusted according to platform) */ + DELAY(100000); + + /* Verify no pending pci transactions */ + if (bnx2x_is_pcie_pending(sc)) { + PMD_DRV_LOG(NOTICE, "PCIE Transactions still pending"); + } + + /* Debug */ + bnx2x_hw_enable_status(sc); + + /* + * Master enable - Due to WB DMAE writes performed before this + * register is re-initialized as part of the regular function init + */ + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); + + return 0; +} + +static int bnx2x_init_hw_func(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + int func = SC_FUNC(sc); + int init_phase = PHASE_PF0 + func; + struct ecore_ilt *ilt = sc->ilt; + uint16_t cdu_ilt_start; + uint32_t addr, val; + uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; + int main_mem_width, rc; + uint32_t i; + + PMD_DRV_LOG(DEBUG, "starting func init for func %d", func); + + /* FLR cleanup */ + if (!CHIP_IS_E1x(sc)) { + rc = bnx2x_pf_flr_clnup(sc); + if (rc) { + PMD_DRV_LOG(NOTICE, "FLR cleanup failed!"); + return rc; + } + } + + /* set MSI reconfigure capability */ + if (sc->devinfo.int_block == INT_BLOCK_HC) { + addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); + val = REG_RD(sc, addr); + val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; + REG_WR(sc, addr, val); + } + + ecore_init_block(sc, BLOCK_PXP, init_phase); + ecore_init_block(sc, BLOCK_PXP2, init_phase); + + ilt = sc->ilt; + cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; + + for (i = 0; i < L2_ILT_LINES(sc); i++) { + ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; + ilt->lines[cdu_ilt_start + i].page_mapping = + (phys_addr_t)sc->context[i].vcxt_dma.paddr; + ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; + } + ecore_ilt_init_op(sc, INITOP_SET); + + REG_WR(sc, PRS_REG_NIC_MODE, 1); + + if (!CHIP_IS_E1x(sc)) { + uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; + +/* Turn on a single ISR mode in IGU if driver is going to use + * INT#x or MSI + */ + if ((sc->interrupt_mode != INTR_MODE_MSIX) + || (sc->interrupt_mode != INTR_MODE_SINGLE_MSIX)) { + pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; + } + +/* + * Timers workaround bug: function init part. + * Need to wait 20msec after initializing ILT, + * needed to make sure there are no requests in + * one of the PXP internal queues with "old" ILT addresses + */ + DELAY(20000); + +/* + * Master enable - Due to WB DMAE writes performed before this + * register is re-initialized as part of the regular function + * init + */ + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); +/* Enable the function in IGU */ + REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); + } + + sc->dmae_ready = 1; + + ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); + + if (!CHIP_IS_E1x(sc)) + REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); + + ecore_init_block(sc, BLOCK_ATC, init_phase); + ecore_init_block(sc, BLOCK_DMAE, init_phase); + ecore_init_block(sc, BLOCK_NIG, init_phase); + ecore_init_block(sc, BLOCK_SRC, init_phase); + ecore_init_block(sc, BLOCK_MISC, init_phase); + ecore_init_block(sc, BLOCK_TCM, init_phase); + ecore_init_block(sc, BLOCK_UCM, init_phase); + ecore_init_block(sc, BLOCK_CCM, init_phase); + ecore_init_block(sc, BLOCK_XCM, init_phase); + ecore_init_block(sc, BLOCK_TSEM, init_phase); + ecore_init_block(sc, BLOCK_USEM, init_phase); + ecore_init_block(sc, BLOCK_CSEM, init_phase); + ecore_init_block(sc, BLOCK_XSEM, init_phase); + + if (!CHIP_IS_E1x(sc)) + REG_WR(sc, QM_REG_PF_EN, 1); + + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + } + ecore_init_block(sc, BLOCK_QM, init_phase); + + ecore_init_block(sc, BLOCK_TM, init_phase); + ecore_init_block(sc, BLOCK_DORQ, init_phase); + + ecore_init_block(sc, BLOCK_BRB1, init_phase); + ecore_init_block(sc, BLOCK_PRS, init_phase); + ecore_init_block(sc, BLOCK_TSDM, init_phase); + ecore_init_block(sc, BLOCK_CSDM, init_phase); + ecore_init_block(sc, BLOCK_USDM, init_phase); + ecore_init_block(sc, BLOCK_XSDM, init_phase); + ecore_init_block(sc, BLOCK_UPB, init_phase); + ecore_init_block(sc, BLOCK_XPB, init_phase); + ecore_init_block(sc, BLOCK_PBF, init_phase); + if (!CHIP_IS_E1x(sc)) + REG_WR(sc, PBF_REG_DISABLE_PF, 0); + + ecore_init_block(sc, BLOCK_CDU, init_phase); + + ecore_init_block(sc, BLOCK_CFC, init_phase); + + if (!CHIP_IS_E1x(sc)) + REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); + + if (IS_MF(sc)) { + REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port * 8, 1); + REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, OVLAN(sc)); + } + + ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); + + /* HC init per function */ + if (sc->devinfo.int_block == INT_BLOCK_HC) { + if (CHIP_IS_E1H(sc)) { + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); + + REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0); + REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0); + } + ecore_init_block(sc, BLOCK_HC, init_phase); + + } else { + uint32_t num_segs, sb_idx, prod_offset; + + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func * 4, 0); + + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); + REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); + } + + ecore_init_block(sc, BLOCK_IGU, init_phase); + + if (!CHIP_IS_E1x(sc)) { + int dsb_idx = 0; + /** + * Producer memory: + * E2 mode: address 0-135 match to the mapping memory; + * 136 - PF0 default prod; 137 - PF1 default prod; + * 138 - PF2 default prod; 139 - PF3 default prod; + * 140 - PF0 attn prod; 141 - PF1 attn prod; + * 142 - PF2 attn prod; 143 - PF3 attn prod; + * 144-147 reserved. + * + * E1.5 mode - In backward compatible mode; + * for non default SB; each even line in the memory + * holds the U producer and each odd line hold + * the C producer. The first 128 producers are for + * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 + * producers are for the DSB for each PF. + * Each PF has five segments: (the order inside each + * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; + * 132-135 C prods; 136-139 X prods; 140-143 T prods; + * 144-147 attn prods; + */ + /* non-default-status-blocks */ + num_segs = CHIP_INT_MODE_IS_BC(sc) ? + IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; + for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { + prod_offset = (sc->igu_base_sb + sb_idx) * + num_segs; + + for (i = 0; i < num_segs; i++) { + addr = IGU_REG_PROD_CONS_MEMORY + + (prod_offset + i) * 4; + REG_WR(sc, addr, 0); + } + /* send consumer update with value 0 */ + bnx2x_ack_sb(sc, sc->igu_base_sb + sb_idx, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); + } + + /* default-status-blocks */ + num_segs = CHIP_INT_MODE_IS_BC(sc) ? + IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; + + if (CHIP_IS_MODE_4_PORT(sc)) + dsb_idx = SC_FUNC(sc); + else + dsb_idx = SC_VN(sc); + + prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? + IGU_BC_BASE_DSB_PROD + dsb_idx : + IGU_NORM_BASE_DSB_PROD + dsb_idx); + + /* + * igu prods come in chunks of E1HVN_MAX (4) - + * does not matters what is the current chip mode + */ + for (i = 0; i < (num_segs * E1HVN_MAX); i += E1HVN_MAX) { + addr = IGU_REG_PROD_CONS_MEMORY + + (prod_offset + i) * 4; + REG_WR(sc, addr, 0); + } + /* send consumer update with 0 */ + if (CHIP_INT_MODE_IS_BC(sc)) { + bnx2x_ack_sb(sc, sc->igu_dsb_id, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(sc, sc->igu_dsb_id, + CSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(sc, sc->igu_dsb_id, + XSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(sc, sc->igu_dsb_id, + TSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(sc, sc->igu_dsb_id, + ATTENTION_ID, 0, IGU_INT_NOP, 1); + } else { + bnx2x_ack_sb(sc, sc->igu_dsb_id, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(sc, sc->igu_dsb_id, + ATTENTION_ID, 0, IGU_INT_NOP, 1); + } + bnx2x_igu_clear_sb(sc, sc->igu_dsb_id); + + /* !!! these should become driver const once + rf-tool supports split-68 const */ + REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); + REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); + REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); + REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); + REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); + REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); + } + } + + /* Reset PCIE errors for debug */ + REG_WR(sc, 0x2114, 0xffffffff); + REG_WR(sc, 0x2120, 0xffffffff); + + if (CHIP_IS_E1x(sc)) { + main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords */ + main_mem_base = HC_REG_MAIN_MEMORY + + SC_PORT(sc) * (main_mem_size * 4); + main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; + main_mem_width = 8; + + val = REG_RD(sc, main_mem_prty_clr); + if (val) { + PMD_DRV_LOG(DEBUG, + "Parity errors in HC block during function init (0x%x)!", + val); + } + +/* Clear "false" parity errors in MSI-X table */ + for (i = main_mem_base; + i < main_mem_base + main_mem_size * 4; + i += main_mem_width) { + bnx2x_read_dmae(sc, i, main_mem_width / 4); + bnx2x_write_dmae(sc, BNX2X_SP_MAPPING(sc, wb_data), + i, main_mem_width / 4); + } +/* Clear HC parity attention */ + REG_RD(sc, main_mem_prty_clr); + } + + /* Enable STORMs SP logging */ + REG_WR8(sc, BAR_USTRORM_INTMEM + + USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); + REG_WR8(sc, BAR_TSTRORM_INTMEM + + TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); + REG_WR8(sc, BAR_CSTRORM_INTMEM + + CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); + REG_WR8(sc, BAR_XSTRORM_INTMEM + + XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); + + elink_phy_probe(&sc->link_params); + + return 0; +} + +static void bnx2x_link_reset(struct bnx2x_softc *sc) +{ + if (!BNX2X_NOMCP(sc)) { + elink_lfa_reset(&sc->link_params, &sc->link_vars); + } else { + if (!CHIP_REV_IS_SLOW(sc)) { + PMD_DRV_LOG(WARNING, + "Bootcode is missing - cannot reset link"); + } + } +} + +static void bnx2x_reset_port(struct bnx2x_softc *sc) +{ + int port = SC_PORT(sc); + uint32_t val; + + /* reset physical Link */ + bnx2x_link_reset(sc); + + REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0); + + /* Do not rcv packets to BRB */ + REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port * 4, 0x0); + /* Do not direct rcv packets that are not for MCP to the BRB */ + REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); + + /* Configure AEU */ + REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port * 4, 0); + + DELAY(100000); + + /* Check for BRB port occupancy */ + val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port * 4); + if (val) { + PMD_DRV_LOG(DEBUG, + "BRB1 is not empty, %d blocks are occupied", val); + } +} + +static void bnx2x_ilt_wr(struct bnx2x_softc *sc, uint32_t index, phys_addr_t addr) +{ + int reg; + uint32_t wb_write[2]; + + reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index * 8; + + wb_write[0] = ONCHIP_ADDR1(addr); + wb_write[1] = ONCHIP_ADDR2(addr); + REG_WR_DMAE(sc, reg, wb_write, 2); +} + +static void bnx2x_clear_func_ilt(struct bnx2x_softc *sc, uint32_t func) +{ + uint32_t i, base = FUNC_ILT_BASE(func); + for (i = base; i < base + ILT_PER_FUNC; i++) { + bnx2x_ilt_wr(sc, i, 0); + } +} + +static void bnx2x_reset_func(struct bnx2x_softc *sc) +{ + struct bnx2x_fastpath *fp; + int port = SC_PORT(sc); + int func = SC_FUNC(sc); + int i; + + /* Disable the function in the FW */ + REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); + + /* FP SBs */ + FOR_EACH_ETH_QUEUE(sc, i) { + fp = &sc->fp[i]; + REG_WR8(sc, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), + SB_DISABLED); + } + + /* SP SB */ + REG_WR8(sc, BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), SB_DISABLED); + + for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { + REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), + 0); + } + + /* Configure IGU */ + if (sc->devinfo.int_block == INT_BLOCK_HC) { + REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0); + REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0); + } else { + REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); + REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); + } + + if (CNIC_LOADED(sc)) { +/* Disable Timer scan */ + REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port * 4, 0); +/* + * Wait for at least 10ms and up to 2 second for the timers + * scan to complete + */ + for (i = 0; i < 200; i++) { + DELAY(10000); + if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port * 4)) + break; + } + } + + /* Clear ILT */ + bnx2x_clear_func_ilt(sc, func); + + /* + * Timers workaround bug for E2: if this is vnic-3, + * we need to set the entire ilt range for this timers. + */ + if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { + struct ilt_client_info ilt_cli; +/* use dummy TM client */ + memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); + ilt_cli.start = 0; + ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; + ilt_cli.client_num = ILT_CLIENT_TM; + + ecore_ilt_boundry_init_op(sc, &ilt_cli, 0); + } + + /* this assumes that reset_port() called before reset_func() */ + if (!CHIP_IS_E1x(sc)) { + bnx2x_pf_disable(sc); + } + + sc->dmae_ready = 0; +} + +static void bnx2x_release_firmware(struct bnx2x_softc *sc) +{ + rte_free(sc->init_ops); + rte_free(sc->init_ops_offsets); + rte_free(sc->init_data); + rte_free(sc->iro_array); +} + +static int bnx2x_init_firmware(struct bnx2x_softc *sc) +{ + uint32_t len, i; + uint8_t *p = sc->firmware; + uint32_t off[24]; + + for (i = 0; i < 24; ++i) + off[i] = rte_be_to_cpu_32(*((uint32_t *) sc->firmware + i)); + + len = off[0]; + sc->init_ops = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); + if (!sc->init_ops) + goto alloc_failed; + bnx2x_data_to_init_ops(p + off[1], sc->init_ops, len); + + len = off[2]; + sc->init_ops_offsets = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); + if (!sc->init_ops_offsets) + goto alloc_failed; + bnx2x_data_to_init_offsets(p + off[3], sc->init_ops_offsets, len); + + len = off[4]; + sc->init_data = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); + if (!sc->init_data) + goto alloc_failed; + bnx2x_data_to_init_data(p + off[5], sc->init_data, len); + + sc->tsem_int_table_data = p + off[7]; + sc->tsem_pram_data = p + off[9]; + sc->usem_int_table_data = p + off[11]; + sc->usem_pram_data = p + off[13]; + sc->csem_int_table_data = p + off[15]; + sc->csem_pram_data = p + off[17]; + sc->xsem_int_table_data = p + off[19]; + sc->xsem_pram_data = p + off[21]; + + len = off[22]; + sc->iro_array = rte_zmalloc("", len, RTE_CACHE_LINE_SIZE); + if (!sc->iro_array) + goto alloc_failed; + bnx2x_data_to_iro_array(p + off[23], sc->iro_array, len); + + return 0; + +alloc_failed: + bnx2x_release_firmware(sc); + return -1; +} + +static int cut_gzip_prefix(const uint8_t * zbuf, int len) +{ +#define MIN_PREFIX_SIZE (10) + + int n = MIN_PREFIX_SIZE; + uint16_t xlen; + + if (!(zbuf[0] == 0x1f && zbuf[1] == 0x8b && zbuf[2] == Z_DEFLATED) || + len <= MIN_PREFIX_SIZE) { + return -1; + } + + /* optional extra fields are present */ + if (zbuf[3] & 0x4) { + xlen = zbuf[13]; + xlen <<= 8; + xlen += zbuf[12]; + + n += xlen; + } + /* file name is present */ + if (zbuf[3] & 0x8) { + while ((zbuf[n++] != 0) && (n < len)) ; + } + + return n; +} + +static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t * zbuf, int len) +{ + int ret; + int data_begin = cut_gzip_prefix(zbuf, len); + + PMD_DRV_LOG(DEBUG, "ecore_gunzip %d", len); + + if (data_begin <= 0) { + PMD_DRV_LOG(NOTICE, "bad gzip prefix"); + return -1; + } + + memset(&zlib_stream, 0, sizeof(zlib_stream)); + zlib_stream.next_in = zbuf + data_begin; + zlib_stream.avail_in = len - data_begin; + zlib_stream.next_out = sc->gz_buf; + zlib_stream.avail_out = FW_BUF_SIZE; + + ret = inflateInit2(&zlib_stream, -MAX_WBITS); + if (ret != Z_OK) { + PMD_DRV_LOG(NOTICE, "zlib inflateInit2 error"); + return ret; + } + + ret = inflate(&zlib_stream, Z_FINISH); + if ((ret != Z_STREAM_END) && (ret != Z_OK)) { + PMD_DRV_LOG(NOTICE, "zlib inflate error: %d %s", ret, + zlib_stream.msg); + } + + sc->gz_outlen = zlib_stream.total_out; + if (sc->gz_outlen & 0x3) { + PMD_DRV_LOG(NOTICE, "firmware is not aligned. gz_outlen == %d", + sc->gz_outlen); + } + sc->gz_outlen >>= 2; + + inflateEnd(&zlib_stream); + + if (ret == Z_STREAM_END) + return 0; + + return ret; +} + +static void +ecore_write_dmae_phys_len(struct bnx2x_softc *sc, phys_addr_t phys_addr, + uint32_t addr, uint32_t len) +{ + bnx2x_write_dmae_phys_len(sc, phys_addr, addr, len); +} + +void +ecore_storm_memset_struct(struct bnx2x_softc *sc, uint32_t addr, size_t size, + uint32_t * data) +{ + uint8_t i; + for (i = 0; i < size / 4; i++) { + REG_WR(sc, addr + (i * 4), data[i]); + } +} + +static const char *get_ext_phy_type(uint32_t ext_phy_type) +{ + uint32_t phy_type_idx = ext_phy_type >> 8; + static const char *types[] = + { "DIRECT", "BNX2X-8071", "BNX2X-8072", "BNX2X-8073", + "BNX2X-8705", "BNX2X-8706", "BNX2X-8726", "BNX2X-8481", "SFX-7101", + "BNX2X-8727", + "BNX2X-8727-NOC", "BNX2X-84823", "NOT_CONN", "FAILURE" + }; + + if (phy_type_idx < 12) + return types[phy_type_idx]; + else if (PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN == ext_phy_type) + return types[12]; + else + return types[13]; +} + +static const char *get_state(uint32_t state) +{ + uint32_t state_idx = state >> 12; + static const char *states[] = { "CLOSED", "OPENING_WAIT4_LOAD", + "OPENING_WAIT4_PORT", "OPEN", "CLOSING_WAIT4_HALT", + "CLOSING_WAIT4_DELETE", "CLOSING_WAIT4_UNLOAD", + "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", + "UNKNOWN", "DISABLED", "DIAG", "ERROR", "UNDEFINED" + }; + + if (state_idx <= 0xF) + return states[state_idx]; + else + return states[0x10]; +} + +static const char *get_recovery_state(uint32_t state) +{ + static const char *states[] = { "NONE", "DONE", "INIT", + "WAIT", "FAILED", "NIC_LOADING" + }; + return states[state]; +} + +static const char *get_rx_mode(uint32_t mode) +{ + static const char *modes[] = { "NONE", "NORMAL", "ALLMULTI", + "PROMISC", "MAX_MULTICAST", "ERROR" + }; + + if (mode < 0x4) + return modes[mode]; + else if (BNX2X_MAX_MULTICAST == mode) + return modes[4]; + else + return modes[5]; +} + +#define BNX2X_INFO_STR_MAX 256 +static const char *get_bnx2x_flags(uint32_t flags) +{ + int i; + static const char *flag[] = { "ONE_PORT ", "NO_ISCSI ", + "NO_FCOE ", "NO_WOL ", "USING_DAC ", "USING_MSIX ", + "USING_MSI ", "DISABLE_MSI ", "UNKNOWN ", "NO_MCP ", + "SAFC_TX_FLAG ", "MF_FUNC_DIS ", "TX_SWITCHING " + }; + static char flag_str[BNX2X_INFO_STR_MAX]; + memset(flag_str, 0, BNX2X_INFO_STR_MAX); + + for (i = 0; i < 5; i++) + if (flags & (1 << i)) { + strcat(flag_str, flag[i]); + flags ^= (1 << i); + } + if (flags) { + static char unknown[BNX2X_INFO_STR_MAX]; + snprintf(unknown, 32, "Unknown flag mask %x", flags); + strcat(flag_str, unknown); + } + return flag_str; +} + +/* + * Prints useful adapter info. + */ +void bnx2x_print_adapter_info(struct bnx2x_softc *sc) +{ + int i = 0; + __rte_unused uint32_t ext_phy_type; + + PMD_INIT_FUNC_TRACE(); + if (sc->link_vars.phy_flags & PHY_XGXS_FLAG) + ext_phy_type = ELINK_XGXS_EXT_PHY_TYPE(REG_RD(sc, + sc-> + devinfo.shmem_base + + offsetof(struct + shmem_region, + dev_info.port_hw_config + [0].external_phy_config))); + else + ext_phy_type = ELINK_SERDES_EXT_PHY_TYPE(REG_RD(sc, + sc-> + devinfo.shmem_base + + + offsetof(struct + shmem_region, + dev_info.port_hw_config + [0].external_phy_config))); + + PMD_INIT_LOG(DEBUG, "\n\n===================================\n"); + /* Hardware chip info. */ + PMD_INIT_LOG(DEBUG, "%12s : %#08x", "ASIC", sc->devinfo.chip_id); + PMD_INIT_LOG(DEBUG, "%12s : %c%d", "Rev", (CHIP_REV(sc) >> 12) + 'A', + (CHIP_METAL(sc) >> 4)); + + /* Bus info. */ + PMD_INIT_LOG(DEBUG, "%12s : %d, ", "Bus PCIe", sc->devinfo.pcie_link_width); + switch (sc->devinfo.pcie_link_speed) { + case 1: + PMD_INIT_LOG(DEBUG, "%23s", "2.5 Gbps"); + break; + case 2: + PMD_INIT_LOG(DEBUG, "%21s", "5 Gbps"); + break; + case 4: + PMD_INIT_LOG(DEBUG, "%21s", "8 Gbps"); + break; + default: + PMD_INIT_LOG(DEBUG, "%33s", "Unknown link speed"); + } + + /* Device features. */ + PMD_INIT_LOG(DEBUG, "%12s : ", "Flags"); + + /* Miscellaneous flags. */ + if (sc->devinfo.pcie_cap_flags & BNX2X_MSI_CAPABLE_FLAG) { + PMD_INIT_LOG(DEBUG, "%18s", "MSI"); + i++; + } + + if (sc->devinfo.pcie_cap_flags & BNX2X_MSIX_CAPABLE_FLAG) { + if (i > 0) + PMD_INIT_LOG(DEBUG, "|"); + PMD_INIT_LOG(DEBUG, "%20s", "MSI-X"); + i++; + } + + if (IS_PF(sc)) { + PMD_INIT_LOG(DEBUG, "%12s : ", "Queues"); + switch (sc->sp->rss_rdata.rss_mode) { + case ETH_RSS_MODE_DISABLED: + PMD_INIT_LOG(DEBUG, "%19s", "None"); + break; + case ETH_RSS_MODE_REGULAR: + PMD_INIT_LOG(DEBUG, "%18s : %d", "RSS", sc->num_queues); + break; + default: + PMD_INIT_LOG(DEBUG, "%22s", "Unknown"); + break; + } + } + + /* RTE and Driver versions */ + PMD_INIT_LOG(DEBUG, "%12s : %s", "DPDK", + rte_version()); + PMD_INIT_LOG(DEBUG, "%12s : %s", "Driver", + bnx2x_pmd_version()); + + /* Firmware versions and device features. */ + PMD_INIT_LOG(DEBUG, "%12s : %d.%d.%d", + "Firmware", + BNX2X_5710_FW_MAJOR_VERSION, + BNX2X_5710_FW_MINOR_VERSION, + BNX2X_5710_FW_REVISION_VERSION); + PMD_INIT_LOG(DEBUG, "%12s : %s", + "Bootcode", sc->devinfo.bc_ver_str); + + PMD_INIT_LOG(DEBUG, "\n\n===================================\n"); + PMD_INIT_LOG(DEBUG, "%12s : %u", "Bnx2x Func", sc->pcie_func); + PMD_INIT_LOG(DEBUG, "%12s : %s", "Bnx2x Flags", get_bnx2x_flags(sc->flags)); + PMD_INIT_LOG(DEBUG, "%12s : %s", "DMAE Is", + (sc->dmae_ready ? "Ready" : "Not Ready")); + PMD_INIT_LOG(DEBUG, "%12s : %s", "OVLAN", (OVLAN(sc) ? "YES" : "NO")); + PMD_INIT_LOG(DEBUG, "%12s : %s", "MF", (IS_MF(sc) ? "YES" : "NO")); + PMD_INIT_LOG(DEBUG, "%12s : %u", "MTU", sc->mtu); + PMD_INIT_LOG(DEBUG, "%12s : %s", "PHY Type", get_ext_phy_type(ext_phy_type)); + PMD_INIT_LOG(DEBUG, "%12s : %x:%x:%x:%x:%x:%x", "MAC Addr", + sc->link_params.mac_addr[0], + sc->link_params.mac_addr[1], + sc->link_params.mac_addr[2], + sc->link_params.mac_addr[3], + sc->link_params.mac_addr[4], + sc->link_params.mac_addr[5]); + PMD_INIT_LOG(DEBUG, "%12s : %s", "RX Mode", get_rx_mode(sc->rx_mode)); + PMD_INIT_LOG(DEBUG, "%12s : %s", "State", get_state(sc->state)); + if (sc->recovery_state) + PMD_INIT_LOG(DEBUG, "%12s : %s", "Recovery", + get_recovery_state(sc->recovery_state)); + PMD_INIT_LOG(DEBUG, "%12s : CQ = %lx, EQ = %lx", "SPQ Left", + sc->cq_spq_left, sc->eq_spq_left); + PMD_INIT_LOG(DEBUG, "%12s : %x", "Switch", sc->link_params.switch_cfg); + PMD_INIT_LOG(DEBUG, "\n\n===================================\n"); +} diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h new file mode 100644 index 00000000..135a6eb1 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x.h @@ -0,0 +1,1966 @@ +/*- + * Copyright (c) 2007-2013 Broadcom Corporation. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#ifndef __BNX2X_H__ +#define __BNX2X_H__ + +#include <rte_byteorder.h> + +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN RTE_LITTLE_ENDIAN +#endif +#undef __BIG_ENDIAN +#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN +#ifndef __BIG_ENDIAN +#define __BIG_ENDIAN RTE_BIG_ENDIAN +#endif +#undef __LITTLE_ENDIAN +#endif + +#include "bnx2x_ethdev.h" +#include "ecore_mfw_req.h" +#include "ecore_fw_defs.h" +#include "ecore_hsi.h" +#include "ecore_reg.h" +#include "bnx2x_stats.h" +#include "bnx2x_vfpf.h" + +#include "elink.h" + +#ifndef __FreeBSD__ +#include <linux/pci_regs.h> + +#define PCIY_PMG PCI_CAP_ID_PM +#define PCIY_MSI PCI_CAP_ID_MSI +#define PCIY_EXPRESS PCI_CAP_ID_EXP +#define PCIY_MSIX PCI_CAP_ID_MSIX +#define PCIR_EXPRESS_DEVICE_STA PCI_EXP_TYPE_RC_EC +#define PCIM_EXP_STA_TRANSACTION_PND PCI_EXP_DEVSTA_TRPND +#define PCIR_EXPRESS_LINK_STA PCI_EXP_LNKSTA +#define PCIM_LINK_STA_WIDTH PCI_EXP_LNKSTA_NLW +#define PCIM_LINK_STA_SPEED PCI_EXP_LNKSTA_CLS +#define PCIR_EXPRESS_DEVICE_CTL PCI_EXP_DEVCTL +#define PCIM_EXP_CTL_MAX_PAYLOAD PCI_EXP_DEVCTL_PAYLOAD +#define PCIM_EXP_CTL_MAX_READ_REQUEST PCI_EXP_DEVCTL_READRQ +#define PCIR_POWER_STATUS PCI_PM_CTRL +#define PCIM_PSTAT_DMASK PCI_PM_CTRL_STATE_MASK +#define PCIM_PSTAT_PME PCI_PM_CTRL_PME_STATUS +#define PCIM_PSTAT_D3 0x3 +#define PCIM_PSTAT_PMEENABLE PCI_PM_CTRL_PME_ENABLE +#define PCIR_MSIX_CTRL PCI_MSIX_FLAGS +#define PCIM_MSIXCTRL_TABLE_SIZE PCI_MSIX_FLAGS_QSIZE +#else +#include <dev/pci/pcireg.h> +#endif + +#define IFM_10G_CX4 20 /* 10GBase CX4 copper */ +#define IFM_10G_TWINAX 22 /* 10GBase Twinax copper */ +#define IFM_10G_T 26 /* 10GBase-T - RJ45 */ + +#ifndef __FreeBSD__ +#define PCIR_EXPRESS_DEVICE_STA PCI_EXP_TYPE_RC_EC +#define PCIM_EXP_STA_TRANSACTION_PND PCI_EXP_DEVSTA_TRPND +#define PCIR_EXPRESS_LINK_STA PCI_EXP_LNKSTA +#define PCIM_LINK_STA_WIDTH PCI_EXP_LNKSTA_NLW +#define PCIM_LINK_STA_SPEED PCI_EXP_LNKSTA_CLS +#define PCIR_EXPRESS_DEVICE_CTL PCI_EXP_DEVCTL +#define PCIM_EXP_CTL_MAX_PAYLOAD PCI_EXP_DEVCTL_PAYLOAD +#define PCIM_EXP_CTL_MAX_READ_REQUEST PCI_EXP_DEVCTL_READRQ +#else +#define PCIR_EXPRESS_DEVICE_STA PCIER_DEVICE_STA +#define PCIM_EXP_STA_TRANSACTION_PND PCIEM_STA_TRANSACTION_PND +#define PCIR_EXPRESS_LINK_STA PCIER_LINK_STA +#define PCIM_LINK_STA_WIDTH PCIEM_LINK_STA_WIDTH +#define PCIM_LINK_STA_SPEED PCIEM_LINK_STA_SPEED +#define PCIR_EXPRESS_DEVICE_CTL PCIER_DEVICE_CTL +#define PCIM_EXP_CTL_MAX_PAYLOAD PCIEM_CTL_MAX_PAYLOAD +#define PCIM_EXP_CTL_MAX_READ_REQUEST PCIEM_CTL_MAX_READ_REQUEST +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#endif +#ifndef ARRSIZE +#define ARRSIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#endif +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#endif +#ifndef roundup +#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +#endif +#ifndef ilog2 +static inline +int bnx2x_ilog2(int x) +{ + int log = 0; + x >>= 1; + + while(x) { + log++; + x >>= 1; + } + return log; +} +#define ilog2(x) bnx2x_ilog2(x) +#endif + +#include "ecore_sp.h" + +struct bnx2x_device_type { + uint16_t bnx2x_vid; + uint16_t bnx2x_did; + uint16_t bnx2x_svid; + uint16_t bnx2x_sdid; + char *bnx2x_name; +}; + +#define BNX2X_PAGE_SHIFT 12 +#define BNX2X_PAGE_SIZE (1 << BNX2X_PAGE_SHIFT) +#define BNX2X_PAGE_MASK (~(BNX2X_PAGE_SIZE - 1)) +#define BNX2X_PAGE_ALIGN(addr) ((addr + BNX2X_PAGE_SIZE - 1) & BNX2X_PAGE_MASK) + +#if BNX2X_PAGE_SIZE != 4096 +#error Page sizes other than 4KB are unsupported! +#endif + +#define U64_LO(addr) ((uint32_t)(((uint64_t)(addr)) & 0xFFFFFFFF)) +#define U64_HI(addr) ((uint32_t)(((uint64_t)(addr)) >> 32)) +#define HILO_U64(hi, lo) ((((uint64_t)(hi)) << 32) + (lo)) + +/* dropless fc FW/HW related params */ +#define BRB_SIZE(sc) (CHIP_IS_E3(sc) ? 1024 : 512) +#define MAX_AGG_QS(sc) ETH_MAX_AGGREGATION_QUEUES_E1H_E2 +#define FW_DROP_LEVEL(sc) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(sc)) +#define FW_PREFETCH_CNT 16U +#define DROPLESS_FC_HEADROOM 100 + +/* + * Transmit Buffer Descriptor (tx_bd) definitions* + */ +/* NUM_TX_PAGES must be a power of 2. */ +#define TOTAL_TX_BD_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(union eth_tx_bd_types)) /* 256 */ +#define USABLE_TX_BD_PER_PAGE (TOTAL_TX_BD_PER_PAGE - 1) /* 255 */ + +#define TOTAL_TX_BD(q) (TOTAL_TX_BD_PER_PAGE * q->nb_tx_pages) /* 512 */ +#define USABLE_TX_BD(q) (USABLE_TX_BD_PER_PAGE * q->nb_tx_pages) /* 510 */ +#define MAX_TX_BD(q) (TOTAL_TX_BD(q) - 1) /* 511 */ + +#define NEXT_TX_BD(x) \ + ((((x) & USABLE_TX_BD_PER_PAGE) == \ + (USABLE_TX_BD_PER_PAGE - 1)) ? (x) + 2 : (x) + 1) + +#define TX_BD(x, q) ((x) & MAX_TX_BD(q)) +#define TX_PAGE(x) (((x) & ~USABLE_TX_BD_PER_PAGE) >> 8) +#define TX_IDX(x) ((x) & USABLE_TX_BD_PER_PAGE) + +/* + * Trigger pending transmits when the number of available BDs is greater + * than 1/8 of the total number of usable BDs. + */ +#define BNX2X_TX_CLEANUP_THRESHOLD(q) (USABLE_TX_BD(q) / 8) +#define BNX2X_TX_TIMEOUT 5 + +/* + * Receive Buffer Descriptor (rx_bd) definitions* + */ +//#define NUM_RX_PAGES 1 +#define TOTAL_RX_BD_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(struct eth_rx_bd)) /* 512 */ +#define USABLE_RX_BD_PER_PAGE (TOTAL_RX_BD_PER_PAGE - 2) /* 510 */ +#define RX_BD_PER_PAGE_MASK (TOTAL_RX_BD_PER_PAGE - 1) /* 511 */ +#define TOTAL_RX_BD(q) (TOTAL_RX_BD_PER_PAGE * q->nb_rx_pages) /* 512 */ +#define USABLE_RX_BD(q) (USABLE_RX_BD_PER_PAGE * q->nb_rx_pages) /* 510 */ +#define MAX_RX_BD(q) (TOTAL_RX_BD(q) - 1) /* 511 */ +#define RX_BD_NEXT_PAGE_DESC_CNT 2 + +#define NEXT_RX_BD(x) \ + ((((x) & RX_BD_PER_PAGE_MASK) == \ + (USABLE_RX_BD_PER_PAGE - 1)) ? (x) + 3 : (x) + 1) + +/* x & 0x3ff */ +#define RX_BD(x, q) ((x) & MAX_RX_BD(q)) +#define RX_PAGE(x) (((x) & ~RX_BD_PER_PAGE_MASK) >> 9) +#define RX_IDX(x) ((x) & RX_BD_PER_PAGE_MASK) + +/* + * Receive Completion Queue definitions* + */ +//#define NUM_RCQ_PAGES (NUM_RX_PAGES * 4) +#define TOTAL_RCQ_ENTRIES_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(union eth_rx_cqe)) /* 128 */ +#define USABLE_RCQ_ENTRIES_PER_PAGE (TOTAL_RCQ_ENTRIES_PER_PAGE - 1) /* 127 */ +#define TOTAL_RCQ_ENTRIES(q) (TOTAL_RCQ_ENTRIES_PER_PAGE * q->nb_cq_pages) /* 512 */ +#define USABLE_RCQ_ENTRIES(q) (USABLE_RCQ_ENTRIES_PER_PAGE * q->nb_cq_pages) /* 508 */ +#define MAX_RCQ_ENTRIES(q) (TOTAL_RCQ_ENTRIES(q) - 1) /* 511 */ +#define RCQ_NEXT_PAGE_DESC_CNT 1 + +#define NEXT_RCQ_IDX(x) \ + ((((x) & USABLE_RCQ_ENTRIES_PER_PAGE) == \ + (USABLE_RCQ_ENTRIES_PER_PAGE - 1)) ? (x) + 2 : (x) + 1) + +#define CQE_BD_REL \ + (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) + +#define RCQ_BD_PAGES(q) \ + (q->nb_rx_pages * CQE_BD_REL) + +#define RCQ_ENTRY(x, q) ((x) & MAX_RCQ_ENTRIES(q)) +#define RCQ_PAGE(x) (((x) & ~USABLE_RCQ_ENTRIES_PER_PAGE) >> 7) +#define RCQ_IDX(x) ((x) & USABLE_RCQ_ENTRIES_PER_PAGE) + +/* + * dropless fc calculations for BDs + * Number of BDs should be as number of buffers in BRB: + * Low threshold takes into account RX_BD_NEXT_PAGE_DESC_CNT + * "next" elements on each page + */ +#define NUM_BD_REQ(sc) \ + BRB_SIZE(sc) +#define NUM_BD_PG_REQ(sc) \ + ((NUM_BD_REQ(sc) + USABLE_RX_BD_PER_PAGE - 1) / USABLE_RX_BD_PER_PAGE) +#define BD_TH_LO(sc) \ + (NUM_BD_REQ(sc) + \ + NUM_BD_PG_REQ(sc) * RX_BD_NEXT_PAGE_DESC_CNT + \ + FW_DROP_LEVEL(sc)) +#define BD_TH_HI(sc) \ + (BD_TH_LO(sc) + DROPLESS_FC_HEADROOM) +#define MIN_RX_AVAIL(sc) \ + ((sc)->dropless_fc ? BD_TH_HI(sc) + 128 : 128) + +/* + * dropless fc calculations for RCQs + * Number of RCQs should be as number of buffers in BRB: + * Low threshold takes into account RCQ_NEXT_PAGE_DESC_CNT + * "next" elements on each page + */ +#define NUM_RCQ_REQ(sc) \ + BRB_SIZE(sc) +#define NUM_RCQ_PG_REQ(sc) \ + ((NUM_RCQ_REQ(sc) + USABLE_RCQ_ENTRIES_PER_PAGE - 1) / USABLE_RCQ_ENTRIES_PER_PAGE) +#define RCQ_TH_LO(sc) \ + (NUM_RCQ_REQ(sc) + \ + NUM_RCQ_PG_REQ(sc) * RCQ_NEXT_PAGE_DESC_CNT + \ + FW_DROP_LEVEL(sc)) +#define RCQ_TH_HI(sc) \ + (RCQ_TH_LO(sc) + DROPLESS_FC_HEADROOM) + +/* Load / Unload modes */ +#define LOAD_NORMAL 0 +#define LOAD_OPEN 1 +#define LOAD_DIAG 2 +#define LOAD_LOOPBACK_EXT 3 +#define UNLOAD_NORMAL 0 +#define UNLOAD_CLOSE 1 +#define UNLOAD_RECOVERY 2 + +/* Some constants... */ +//#define MAX_PATH_NUM 2 +//#define E2_MAX_NUM_OF_VFS 64 +//#define E1H_FUNC_MAX 8 +//#define E2_FUNC_MAX 4 /* per path */ +#define MAX_VNIC_NUM 4 +#define MAX_FUNC_NUM 8 /* common to all chips */ +//#define MAX_NDSB HC_SB_MAX_SB_E2 /* max non-default status block */ +#define MAX_RSS_CHAINS 16 /* a constant for HW limit */ +#define MAX_MSI_VECTOR 8 /* a constant for HW limit */ + +#define ILT_NUM_PAGE_ENTRIES 3072 +/* + * 57711 we use whole table since we have 8 functions. + * 57712 we have only 4 functions, but use same size per func, so only half + * of the table is used. + */ +#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES / 8) +#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) +/* + * the phys address is shifted right 12 bits and has an added + * 1=valid bit added to the 53rd bit + * then since this is a wide register(TM) + * we split it into two 32 bit writes + */ +#define ONCHIP_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF)) +#define ONCHIP_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44))) + +/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ +#define ETH_HLEN 14 +#define ETH_OVERHEAD (ETH_HLEN + 8 + 8) +#define ETH_MIN_PACKET_SIZE 60 +#define ETH_MAX_PACKET_SIZE ETHERMTU /* 1500 */ +#define ETH_MAX_JUMBO_PACKET_SIZE 9600 +/* TCP with Timestamp Option (32) + IPv6 (40) */ + +/* max supported alignment is 256 (8 shift) */ +#define BNX2X_RX_ALIGN_SHIFT 8 +/* FW uses 2 cache lines alignment for start packet and size */ +#define BNX2X_FW_RX_ALIGN_START (1 << BNX2X_RX_ALIGN_SHIFT) +#define BNX2X_FW_RX_ALIGN_END (1 << BNX2X_RX_ALIGN_SHIFT) + +#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) + +struct bnx2x_bar { + void *base_addr; +}; + +/* Used to manage DMA allocations. */ +struct bnx2x_dma { + struct bnx2x_softc *sc; + phys_addr_t paddr; + void *vaddr; + int nseg; + char msg[RTE_MEMZONE_NAMESIZE - 6]; +}; + +/* attn group wiring */ +#define MAX_DYNAMIC_ATTN_GRPS 8 + +struct attn_route { + uint32_t sig[5]; +}; + +struct iro { + uint32_t base; + uint16_t m1; + uint16_t m2; + uint16_t m3; + uint16_t size; +}; + +union bnx2x_host_hc_status_block { + /* pointer to fp status block e2 */ + struct host_hc_status_block_e2 *e2_sb; + /* pointer to fp status block e1x */ + struct host_hc_status_block_e1x *e1x_sb; +}; + +union bnx2x_db_prod { + struct doorbell_set_prod data; + uint32_t raw; +}; + +struct bnx2x_sw_tx_bd { + struct mbuf *m; + uint16_t first_bd; + uint8_t flags; +/* set on the first BD descriptor when there is a split BD */ +#define BNX2X_TSO_SPLIT_BD (1 << 0) +}; + +/* + * This is the HSI fastpath data structure. There can be up to MAX_RSS_CHAIN + * instances of the fastpath structure when using multiple queues. + */ +struct bnx2x_fastpath { + /* pointer back to parent structure */ + struct bnx2x_softc *sc; + + /* status block */ + struct bnx2x_dma sb_dma; + union bnx2x_host_hc_status_block status_block; + + phys_addr_t tx_desc_mapping; + + phys_addr_t rx_desc_mapping; + phys_addr_t rx_comp_mapping; + + uint16_t *sb_index_values; + uint16_t *sb_running_index; + uint32_t ustorm_rx_prods_offset; + + uint8_t igu_sb_id; /* status block number in HW */ + uint8_t fw_sb_id; /* status block number in FW */ + + uint32_t rx_buf_size; + + int state; +#define BNX2X_FP_STATE_CLOSED 0x01 +#define BNX2X_FP_STATE_IRQ 0x02 +#define BNX2X_FP_STATE_OPENING 0x04 +#define BNX2X_FP_STATE_OPEN 0x08 +#define BNX2X_FP_STATE_HALTING 0x10 +#define BNX2X_FP_STATE_HALTED 0x20 + + /* reference back to this fastpath queue number */ + uint8_t index; /* this is also the 'cid' */ +#define FP_IDX(fp) (fp->index) + + /* ethernet client ID (each fastpath set of RX/TX/CQE is a client) */ + uint8_t cl_id; +#define FP_CL_ID(fp) (fp->cl_id) + uint8_t cl_qzone_id; + + uint16_t fp_hc_idx; + + union bnx2x_db_prod tx_db; + + struct tstorm_per_queue_stats old_tclient; + struct ustorm_per_queue_stats old_uclient; + struct xstorm_per_queue_stats old_xclient; + struct bnx2x_eth_q_stats eth_q_stats; + struct bnx2x_eth_q_stats_old eth_q_stats_old; + + /* Pointer to the receive consumer in the status block */ + uint16_t *rx_cq_cons_sb; + + /* Pointer to the transmit consumer in the status block */ + uint16_t *tx_cons_sb; + + /* transmit timeout until chip reset */ + int watchdog_timer; + +}; /* struct bnx2x_fastpath */ + +#define BNX2X_MAX_NUM_OF_VFS 64 +#define BNX2X_VF_ID_INVALID 0xFF + +/* maximum number of fast-path interrupt contexts */ +#define FP_SB_MAX_E1x 16 +#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2 + +union cdu_context { + struct eth_context eth; + char pad[1024]; +}; + +/* CDU host DB constants */ +#define CDU_ILT_PAGE_SZ_HW 2 +#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */ +#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) + +#define CNIC_ISCSI_CID_MAX 256 +#define CNIC_FCOE_CID_MAX 2048 +#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX) +#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) + +#define QM_ILT_PAGE_SZ_HW 0 +#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ +#define QM_CID_ROUND 1024 + +/* TM (timers) host DB constants */ +#define TM_ILT_PAGE_SZ_HW 0 +#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ +/*#define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ +#define TM_CONN_NUM 1024 +#define TM_ILT_SZ (8 * TM_CONN_NUM) +#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) + +/* SRC (Searcher) host DB constants */ +#define SRC_ILT_PAGE_SZ_HW 0 +#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */ +#define SRC_HASH_BITS 10 +#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ +#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) +#define SRC_T2_SZ SRC_ILT_SZ +#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) + +struct hw_context { + struct bnx2x_dma vcxt_dma; + union cdu_context *vcxt; + //phys_addr_t cxt_mapping; + size_t size; +}; + +#define SM_RX_ID 0 +#define SM_TX_ID 1 + +/* defines for multiple tx priority indices */ +#define FIRST_TX_ONLY_COS_INDEX 1 +#define FIRST_TX_COS_INDEX 0 + +#define CID_TO_FP(cid, sc) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(sc)) + +#define HC_INDEX_ETH_RX_CQ_CONS 1 +#define HC_INDEX_OOO_TX_CQ_CONS 4 +#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 +#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 +#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 +#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 + +/* congestion management fairness mode */ +#define CMNG_FNS_NONE 0 +#define CMNG_FNS_MINMAX 1 + +/* CMNG constants, as derived from system spec calculations */ +/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ +#define DEF_MIN_RATE 100 +/* resolution of the rate shaping timer - 400 usec */ +#define RS_PERIODIC_TIMEOUT_USEC 400 +/* number of bytes in single QM arbitration cycle - + * coefficient for calculating the fairness timer */ +#define QM_ARB_BYTES 160000 +/* resolution of Min algorithm 1:100 */ +#define MIN_RES 100 +/* how many bytes above threshold for the minimal credit of Min algorithm*/ +#define MIN_ABOVE_THRESH 32768 +/* fairness algorithm integration time coefficient - + * for calculating the actual Tfair */ +#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) +/* memory of fairness algorithm - 2 cycles */ +#define FAIR_MEM 2 + +#define HC_SEG_ACCESS_DEF 0 /* Driver decision 0-3 */ +#define HC_SEG_ACCESS_ATTN 4 +#define HC_SEG_ACCESS_NORM 0 /* Driver decision 0-1 */ + +/* + * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is + * control by the number of fast-path status blocks supported by the + * device (HW/FW). Each fast-path status block (FP-SB) aka non-default + * status block represents an independent interrupts context that can + * serve a regular L2 networking queue. However special L2 queues such + * as the FCoE queue do not require a FP-SB and other components like + * the CNIC may consume FP-SB reducing the number of possible L2 queues + * + * If the maximum number of FP-SB available is X then: + * a. If CNIC is supported it consumes 1 FP-SB thus the max number of + * regular L2 queues is Y=X-1 + * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) + * c. If the FCoE L2 queue is supported the actual number of L2 queues + * is Y+1 + * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for + * slow-path interrupts) or Y+2 if CNIC is supported (one additional + * FP interrupt context for the CNIC). + * e. The number of HW context (CID count) is always X or X+1 if FCoE + * L2 queue is supported. the cid for the FCoE L2 queue is always X. + * + * So this is quite simple for now as no ULPs are supported yet. :-) + */ +#define BNX2X_NUM_QUEUES(sc) ((sc)->num_queues) +#define BNX2X_NUM_ETH_QUEUES(sc) BNX2X_NUM_QUEUES(sc) +#define BNX2X_NUM_NON_CNIC_QUEUES(sc) BNX2X_NUM_QUEUES(sc) +#define BNX2X_NUM_RX_QUEUES(sc) BNX2X_NUM_QUEUES(sc) + +#define FOR_EACH_QUEUE(sc, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(sc); (var)++) + +#define FOR_EACH_NONDEFAULT_QUEUE(sc, var) \ + for ((var) = 1; (var) < BNX2X_NUM_QUEUES(sc); (var)++) + +#define FOR_EACH_ETH_QUEUE(sc, var) \ + for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(sc); (var)++) + +#define FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, var) \ + for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(sc); (var)++) + +#define FOR_EACH_COS_IN_TX_QUEUE(sc, var) \ + for ((var) = 0; (var) < (sc)->max_cos; (var)++) + +#define FOR_EACH_CNIC_QUEUE(sc, var) \ + for ((var) = BNX2X_NUM_ETH_QUEUES(sc); \ + (var) < BNX2X_NUM_QUEUES(sc); \ + (var)++) + +enum { + OOO_IDX_OFFSET, + FCOE_IDX_OFFSET, + FWD_IDX_OFFSET, +}; + +#define FCOE_IDX(sc) (BNX2X_NUM_NON_CNIC_QUEUES(sc) + FCOE_IDX_OFFSET) +#define bnx2x_fcoe_fp(sc) (&sc->fp[FCOE_IDX(sc)]) +#define bnx2x_fcoe(sc, var) (bnx2x_fcoe_fp(sc)->var) +#define bnx2x_fcoe_inner_sp_obj(sc) (&sc->sp_objs[FCOE_IDX(sc)]) +#define bnx2x_fcoe_sp_obj(sc, var) (bnx2x_fcoe_inner_sp_obj(sc)->var) +#define bnx2x_fcoe_tx(sc, var) (bnx2x_fcoe_fp(sc)->txdata_ptr[FIRST_TX_COS_INDEX]->var) + +#define OOO_IDX(sc) (BNX2X_NUM_NON_CNIC_QUEUES(sc) + OOO_IDX_OFFSET) +#define bnx2x_ooo_fp(sc) (&sc->fp[OOO_IDX(sc)]) +#define bnx2x_ooo(sc, var) (bnx2x_ooo_fp(sc)->var) +#define bnx2x_ooo_inner_sp_obj(sc) (&sc->sp_objs[OOO_IDX(sc)]) +#define bnx2x_ooo_sp_obj(sc, var) (bnx2x_ooo_inner_sp_obj(sc)->var) + +#define FWD_IDX(sc) (BNX2X_NUM_NON_CNIC_QUEUES(sc) + FWD_IDX_OFFSET) +#define bnx2x_fwd_fp(sc) (&sc->fp[FWD_IDX(sc)]) +#define bnx2x_fwd(sc, var) (bnx2x_fwd_fp(sc)->var) +#define bnx2x_fwd_inner_sp_obj(sc) (&sc->sp_objs[FWD_IDX(sc)]) +#define bnx2x_fwd_sp_obj(sc, var) (bnx2x_fwd_inner_sp_obj(sc)->var) +#define bnx2x_fwd_txdata(fp) (fp->txdata_ptr[FIRST_TX_COS_INDEX]) + +#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->sc)) +#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->sc)) +#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(sc)) +#define IS_FWD_FP(fp) ((fp)->index == FWD_IDX((fp)->sc)) +#define IS_FWD_IDX(idx) ((idx) == FWD_IDX(sc)) +#define IS_OOO_FP(fp) ((fp)->index == OOO_IDX((fp)->sc)) +#define IS_OOO_IDX(idx) ((idx) == OOO_IDX(sc)) + +enum { + BNX2X_PORT_QUERY_IDX, + BNX2X_PF_QUERY_IDX, + BNX2X_FCOE_QUERY_IDX, + BNX2X_FIRST_QUEUE_QUERY_IDX, +}; + +struct bnx2x_fw_stats_req { + struct stats_query_header hdr; + struct stats_query_entry query[FP_SB_MAX_E1x + + BNX2X_FIRST_QUEUE_QUERY_IDX]; +}; + +struct bnx2x_fw_stats_data { + struct stats_counter storm_counters; + struct per_port_stats port; + struct per_pf_stats pf; + struct per_queue_stats queue_stats[1]; +}; + +/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */ +#define BNX2X_IGU_STAS_MSG_VF_CNT 64 +#define BNX2X_IGU_STAS_MSG_PF_CNT 4 + +#define MAX_DMAE_C 8 + +/* + * This is the slowpath data structure. It is mapped into non-paged memory + * so that the hardware can access it's contents directly and must be page + * aligned. + */ +struct bnx2x_slowpath { + + /* used by the DMAE command executer */ + struct dmae_command dmae[MAX_DMAE_C]; + + /* statistics completion */ + uint32_t stats_comp; + + /* firmware defined statistics blocks */ + union mac_stats mac_stats; + struct nig_stats nig_stats; + struct host_port_stats port_stats; + struct host_func_stats func_stats; + + /* DMAE completion value and data source/sink */ + uint32_t wb_comp; + uint32_t wb_data[4]; + + union { + struct mac_configuration_cmd e1x; + struct eth_classify_rules_ramrod_data e2; + } mac_rdata; + + union { + struct tstorm_eth_mac_filter_config e1x; + struct eth_filter_rules_ramrod_data e2; + } rx_mode_rdata; + + struct eth_rss_update_ramrod_data rss_rdata; + + union { + struct mac_configuration_cmd e1; + struct eth_multicast_rules_ramrod_data e2; + } mcast_rdata; + + union { + struct function_start_data func_start; + struct flow_control_configuration pfc_config; /* for DCBX ramrod */ + } func_rdata; + + /* Queue State related ramrods */ + union { + struct client_init_ramrod_data init_data; + struct client_update_ramrod_data update_data; + } q_rdata; + + /* + * AFEX ramrod can not be a part of func_rdata union because these + * events might arrive in parallel to other events from func_rdata. + * If they were defined in the same union the data can get corrupted. + */ + struct afex_vif_list_ramrod_data func_afex_rdata; + + union drv_info_to_mcp drv_info_to_mcp; +}; /* struct bnx2x_slowpath */ + +/* + * Port specifc data structure. + */ +struct bnx2x_port { + /* + * Port Management Function (for 57711E only). + * When this field is set the driver instance is + * responsible for managing port specifc + * configurations such as handling link attentions. + */ + uint32_t pmf; + + /* Ethernet maximum transmission unit. */ + uint16_t ether_mtu; + + uint32_t link_config[ELINK_LINK_CONFIG_SIZE]; + + uint32_t ext_phy_config; + + /* Port feature config.*/ + uint32_t config; + + /* Defines the features supported by the PHY. */ + uint32_t supported[ELINK_LINK_CONFIG_SIZE]; + + /* Defines the features advertised by the PHY. */ + uint32_t advertising[ELINK_LINK_CONFIG_SIZE]; +#define ADVERTISED_10baseT_Half (1 << 1) +#define ADVERTISED_10baseT_Full (1 << 2) +#define ADVERTISED_100baseT_Half (1 << 3) +#define ADVERTISED_100baseT_Full (1 << 4) +#define ADVERTISED_1000baseT_Half (1 << 5) +#define ADVERTISED_1000baseT_Full (1 << 6) +#define ADVERTISED_TP (1 << 7) +#define ADVERTISED_FIBRE (1 << 8) +#define ADVERTISED_Autoneg (1 << 9) +#define ADVERTISED_Asym_Pause (1 << 10) +#define ADVERTISED_Pause (1 << 11) +#define ADVERTISED_2500baseX_Full (1 << 15) +#define ADVERTISED_10000baseT_Full (1 << 16) + + uint32_t phy_addr; + + /* + * MCP scratchpad address for port specific statistics. + * The device is responsible for writing statistcss + * back to the MCP for use with management firmware such + * as UMP/NC-SI. + */ + uint32_t port_stx; + + struct nig_stats old_nig_stats; +}; /* struct bnx2x_port */ + +struct bnx2x_mf_info { + uint32_t mf_config[E1HVN_MAX]; + + uint32_t vnics_per_port; /* 1, 2 or 4 */ + uint32_t multi_vnics_mode; /* can be set even if vnics_per_port = 1 */ + uint32_t path_has_ovlan; /* MF mode in the path (can be different than the MF mode of the function */ + +#define IS_MULTI_VNIC(sc) ((sc)->devinfo.mf_info.multi_vnics_mode) +#define VNICS_PER_PORT(sc) ((sc)->devinfo.mf_info.vnics_per_port) +#define VNICS_PER_PATH(sc) \ + ((sc)->devinfo.mf_info.vnics_per_port * \ + ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 1 )) + + uint8_t min_bw[MAX_VNIC_NUM]; + uint8_t max_bw[MAX_VNIC_NUM]; + + uint16_t ext_id; /* vnic outer vlan or VIF ID */ +#define VALID_OVLAN(ovlan) ((ovlan) <= 4096) +#define INVALID_VIF_ID 0xFFFF +#define OVLAN(sc) ((sc)->devinfo.mf_info.ext_id) +#define VIF_ID(sc) ((sc)->devinfo.mf_info.ext_id) + + uint16_t default_vlan; +#define NIV_DEFAULT_VLAN(sc) ((sc)->devinfo.mf_info.default_vlan) + + uint8_t niv_allowed_priorities; +#define NIV_ALLOWED_PRIORITIES(sc) ((sc)->devinfo.mf_info.niv_allowed_priorities) + + uint8_t niv_default_cos; +#define NIV_DEFAULT_COS(sc) ((sc)->devinfo.mf_info.niv_default_cos) + + uint8_t niv_mba_enabled; + + enum mf_cfg_afex_vlan_mode afex_vlan_mode; +#define AFEX_VLAN_MODE(sc) ((sc)->devinfo.mf_info.afex_vlan_mode) + int afex_def_vlan_tag; + uint32_t pending_max; + + uint16_t flags; +#define MF_INFO_VALID_MAC 0x0001 + + uint16_t mf_ov; + uint8_t mf_mode; /* Switch-Dependent or Switch-Independent */ +#define IS_MF(sc) \ + (IS_MULTI_VNIC(sc) && \ + ((sc)->devinfo.mf_info.mf_mode != 0)) +#define IS_MF_SD(sc) \ + (IS_MULTI_VNIC(sc) && \ + ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)) +#define IS_MF_SI(sc) \ + (IS_MULTI_VNIC(sc) && \ + ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)) +#define IS_MF_AFEX(sc) \ + (IS_MULTI_VNIC(sc) && \ + ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX)) +#define IS_MF_SD_MODE(sc) IS_MF_SD(sc) +#define IS_MF_SI_MODE(sc) IS_MF_SI(sc) +#define IS_MF_AFEX_MODE(sc) IS_MF_AFEX(sc) + + uint32_t mf_protos_supported; + #define MF_PROTO_SUPPORT_ETHERNET 0x1 + #define MF_PROTO_SUPPORT_ISCSI 0x2 + #define MF_PROTO_SUPPORT_FCOE 0x4 +}; /* struct bnx2x_mf_info */ + +/* Device information data structure. */ +struct bnx2x_devinfo { + /* PCIe info */ + uint16_t vendor_id; + uint16_t device_id; + uint16_t subvendor_id; + uint16_t subdevice_id; + + /* + * chip_id = 0b'CCCCCCCCCCCCCCCCRRRRMMMMMMMMBBBB' + * C = Chip Number (bits 16-31) + * R = Chip Revision (bits 12-15) + * M = Chip Metal (bits 4-11) + * B = Chip Bond ID (bits 0-3) + */ + uint32_t chip_id; +#define CHIP_ID(sc) ((sc)->devinfo.chip_id & 0xffff0000) +#define CHIP_NUM(sc) ((sc)->devinfo.chip_id >> 16) +/* device ids */ +#define CHIP_NUM_57711 0x164f +#define CHIP_NUM_57711E 0x1650 +#define CHIP_NUM_57712 0x1662 +#define CHIP_NUM_57712_MF 0x1663 +#define CHIP_NUM_57712_VF 0x166f +#define CHIP_NUM_57800 0x168a +#define CHIP_NUM_57800_MF 0x16a5 +#define CHIP_NUM_57800_VF 0x16a9 +#define CHIP_NUM_57810 0x168e +#define CHIP_NUM_57810_MF 0x16ae +#define CHIP_NUM_57810_VF 0x16af +#define CHIP_NUM_57811 0x163d +#define CHIP_NUM_57811_MF 0x163e +#define CHIP_NUM_57811_VF 0x163f +#define CHIP_NUM_57840_OBS 0x168d +#define CHIP_NUM_57840_OBS_MF 0x16ab +#define CHIP_NUM_57840_4_10 0x16a1 +#define CHIP_NUM_57840_2_20 0x16a2 +#define CHIP_NUM_57840_MF 0x16a4 +#define CHIP_NUM_57840_VF 0x16ad + +#define CHIP_REV_SHIFT 12 +#define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT) +#define CHIP_REV(sc) ((sc)->devinfo.chip_id & CHIP_REV_MASK) + +#define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT) +#define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT) +#define CHIP_REV_Cx (0x2 << CHIP_REV_SHIFT) + +#define CHIP_REV_IS_SLOW(sc) \ + (CHIP_REV(sc) > 0x00005000) +#define CHIP_REV_IS_FPGA(sc) \ + (CHIP_REV_IS_SLOW(sc) && (CHIP_REV(sc) & 0x00001000)) +#define CHIP_REV_IS_EMUL(sc) \ + (CHIP_REV_IS_SLOW(sc) && !(CHIP_REV(sc) & 0x00001000)) +#define CHIP_REV_IS_ASIC(sc) \ + (!CHIP_REV_IS_SLOW(sc)) + +#define CHIP_METAL(sc) ((sc->devinfo.chip_id) & 0x00000ff0) +#define CHIP_BOND_ID(sc) ((sc->devinfo.chip_id) & 0x0000000f) + +#define CHIP_IS_57711(sc) (CHIP_NUM(sc) == CHIP_NUM_57711) +#define CHIP_IS_57711E(sc) (CHIP_NUM(sc) == CHIP_NUM_57711E) +#define CHIP_IS_E1H(sc) ((CHIP_IS_57711(sc)) || \ + (CHIP_IS_57711E(sc))) +#define CHIP_IS_E1x(sc) CHIP_IS_E1H(sc) + +#define CHIP_IS_57712(sc) (CHIP_NUM(sc) == CHIP_NUM_57712) +#define CHIP_IS_57712_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57712_MF) +#define CHIP_IS_57712_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57712_VF) +#define CHIP_IS_E2(sc) (CHIP_IS_57712(sc) || \ + CHIP_IS_57712_MF(sc)) + +#define CHIP_IS_57800(sc) (CHIP_NUM(sc) == CHIP_NUM_57800) +#define CHIP_IS_57800_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57800_MF) +#define CHIP_IS_57800_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57800_VF) +#define CHIP_IS_57810(sc) (CHIP_NUM(sc) == CHIP_NUM_57810) +#define CHIP_IS_57810_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57810_MF) +#define CHIP_IS_57810_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57810_VF) +#define CHIP_IS_57811(sc) (CHIP_NUM(sc) == CHIP_NUM_57811) +#define CHIP_IS_57811_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57811_MF) +#define CHIP_IS_57811_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57811_VF) +#define CHIP_IS_57840(sc) ((CHIP_NUM(sc) == CHIP_NUM_57840_OBS) || \ + (CHIP_NUM(sc) == CHIP_NUM_57840_4_10) || \ + (CHIP_NUM(sc) == CHIP_NUM_57840_2_20)) +#define CHIP_IS_57840_MF(sc) ((CHIP_NUM(sc) == CHIP_NUM_57840_OBS_MF) || \ + (CHIP_NUM(sc) == CHIP_NUM_57840_MF)) +#define CHIP_IS_57840_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57840_VF) + +#define CHIP_IS_E3(sc) (CHIP_IS_57800(sc) || \ + CHIP_IS_57800_MF(sc) || \ + CHIP_IS_57800_VF(sc) || \ + CHIP_IS_57810(sc) || \ + CHIP_IS_57810_MF(sc) || \ + CHIP_IS_57810_VF(sc) || \ + CHIP_IS_57811(sc) || \ + CHIP_IS_57811_MF(sc) || \ + CHIP_IS_57811_VF(sc) || \ + CHIP_IS_57840(sc) || \ + CHIP_IS_57840_MF(sc) || \ + CHIP_IS_57840_VF(sc)) +#define CHIP_IS_E3A0(sc) (CHIP_IS_E3(sc) && \ + (CHIP_REV(sc) == CHIP_REV_Ax)) +#define CHIP_IS_E3B0(sc) (CHIP_IS_E3(sc) && \ + (CHIP_REV(sc) == CHIP_REV_Bx)) + +#define USES_WARPCORE(sc) (CHIP_IS_E3(sc)) +#define CHIP_IS_E2E3(sc) (CHIP_IS_E2(sc) || \ + CHIP_IS_E3(sc)) + +#define CHIP_IS_MF_CAP(sc) (CHIP_IS_57711E(sc) || \ + CHIP_IS_57712_MF(sc) || \ + CHIP_IS_E3(sc)) + +#define IS_VF(sc) ((sc)->flags & BNX2X_IS_VF_FLAG) +#define IS_PF(sc) (!IS_VF(sc)) + +/* + * This define is used in two main places: + * 1. In the early stages of nic_load, to know if to configure Parser/Searcher + * to nic-only mode or to offload mode. Offload mode is configured if either + * the chip is E1x (where NIC_MODE register is not applicable), or if cnic + * already registered for this port (which means that the user wants storage + * services). + * 2. During cnic-related load, to know if offload mode is already configured + * in the HW or needs to be configrued. Since the transition from nic-mode to + * offload-mode in HW causes traffic coruption, nic-mode is configured only + * in ports on which storage services where never requested. + */ +#define CONFIGURE_NIC_MODE(sc) (!CHIP_IS_E1x(sc) && !CNIC_ENABLED(sc)) + + uint8_t chip_port_mode; +#define CHIP_4_PORT_MODE 0x0 +#define CHIP_2_PORT_MODE 0x1 +#define CHIP_PORT_MODE_NONE 0x2 +#define CHIP_PORT_MODE(sc) ((sc)->devinfo.chip_port_mode) +#define CHIP_IS_MODE_4_PORT(sc) (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) + + uint8_t int_block; +#define INT_BLOCK_HC 0 +#define INT_BLOCK_IGU 1 +#define INT_BLOCK_MODE_NORMAL 0 +#define INT_BLOCK_MODE_BW_COMP 2 +#define CHIP_INT_MODE_IS_NBC(sc) \ + (!CHIP_IS_E1x(sc) && \ + !((sc)->devinfo.int_block & INT_BLOCK_MODE_BW_COMP)) +#define CHIP_INT_MODE_IS_BC(sc) (!CHIP_INT_MODE_IS_NBC(sc)) + + uint32_t shmem_base; + uint32_t shmem2_base; + uint32_t bc_ver; + char bc_ver_str[32]; + uint32_t mf_cfg_base; /* bootcode shmem address in BAR memory */ + struct bnx2x_mf_info mf_info; + + uint32_t flash_size; +#define NVRAM_1MB_SIZE 0x20000 +#define NVRAM_TIMEOUT_COUNT 30000 +#define NVRAM_PAGE_SIZE 256 + + /* PCIe capability information */ + uint32_t pcie_cap_flags; +#define BNX2X_PM_CAPABLE_FLAG 0x00000001 +#define BNX2X_PCIE_CAPABLE_FLAG 0x00000002 +#define BNX2X_MSI_CAPABLE_FLAG 0x00000004 +#define BNX2X_MSIX_CAPABLE_FLAG 0x00000008 + uint16_t pcie_pm_cap_reg; + uint16_t pcie_link_width; + uint16_t pcie_link_speed; + uint16_t pcie_msi_cap_reg; + uint16_t pcie_msix_cap_reg; + + /* device configuration read from bootcode shared memory */ + uint32_t hw_config; + uint32_t hw_config2; +}; /* struct bnx2x_devinfo */ + +struct bnx2x_sp_objs { + struct ecore_vlan_mac_obj mac_obj; /* MACs object */ + struct ecore_queue_sp_obj q_obj; /* Queue State object */ +}; /* struct bnx2x_sp_objs */ + +/* + * Data that will be used to create a link report message. We will keep the + * data used for the last link report in order to prevent reporting the same + * link parameters twice. + */ +struct bnx2x_link_report_data { + uint16_t line_speed; /* Effective line speed */ + unsigned long link_report_flags; /* BNX2X_LINK_REPORT_XXX flags */ +}; + +enum { + BNX2X_LINK_REPORT_FULL_DUPLEX, + BNX2X_LINK_REPORT_LINK_DOWN, + BNX2X_LINK_REPORT_RX_FC_ON, + BNX2X_LINK_REPORT_TX_FC_ON +}; + +#define BNX2X_RX_CHAIN_PAGE_SZ BNX2X_PAGE_SIZE + +struct bnx2x_pci_cap { + struct bnx2x_pci_cap *next; + uint16_t id; + uint16_t type; + uint16_t addr; +}; + +struct bnx2x_vfdb; + +/* Top level device private data structure. */ +struct bnx2x_softc { + + void **rx_queues; + void **tx_queues; + uint32_t max_tx_queues; + uint32_t max_rx_queues; + const struct rte_pci_device *pci_dev; + uint32_t pci_val; + struct bnx2x_pci_cap *pci_caps; +#define BNX2X_INTRS_POLL_PERIOD 1 + + void *firmware; + uint64_t fw_len; + + /* MAC address operations */ + struct bnx2x_mac_ops mac_ops; + + /* structures for VF mbox/response/bulletin */ + struct bnx2x_vf_mbx_msg *vf2pf_mbox; + struct bnx2x_dma vf2pf_mbox_mapping; + struct vf_acquire_resp_tlv acquire_resp; + struct bnx2x_vf_bulletin *pf2vf_bulletin; + struct bnx2x_dma pf2vf_bulletin_mapping; + struct bnx2x_vf_bulletin old_bulletin; + + int media; + + int state; /* device state */ +#define BNX2X_STATE_CLOSED 0x0000 +#define BNX2X_STATE_OPENING_WAITING_LOAD 0x1000 +#define BNX2X_STATE_OPENING_WAITING_PORT 0x2000 +#define BNX2X_STATE_OPEN 0x3000 +#define BNX2X_STATE_CLOSING_WAITING_HALT 0x4000 +#define BNX2X_STATE_CLOSING_WAITING_DELETE 0x5000 +#define BNX2X_STATE_CLOSING_WAITING_UNLOAD 0x6000 +#define BNX2X_STATE_DISABLED 0xD000 +#define BNX2X_STATE_DIAG 0xE000 +#define BNX2X_STATE_ERROR 0xF000 + + int flags; +#define BNX2X_ONE_PORT_FLAG 0x1 +#define BNX2X_NO_FCOE_FLAG 0x2 +#define BNX2X_NO_WOL_FLAG 0x4 +#define BNX2X_NO_MCP_FLAG 0x8 +#define BNX2X_NO_ISCSI_OOO_FLAG 0x10 +#define BNX2X_NO_ISCSI_FLAG 0x20 +#define BNX2X_MF_FUNC_DIS 0x40 +#define BNX2X_TX_SWITCHING 0x80 +#define BNX2X_IS_VF_FLAG 0x100 + +#define BNX2X_ONE_PORT(sc) (sc->flags & BNX2X_ONE_PORT_FLAG) +#define BNX2X_NOFCOE(sc) (sc->flags & BNX2X_NO_FCOE_FLAG) +#define BNX2X_NOMCP(sc) (sc->flags & BNX2X_NO_MCP_FLAG) + +#define MAX_BARS 5 + struct bnx2x_bar bar[MAX_BARS]; /* map BARs 0, 2, 4 */ + + uint16_t doorbell_size; + + /* periodic timer callout */ +#define PERIODIC_STOP 0 +#define PERIODIC_GO 1 + volatile unsigned long periodic_flags; + + struct bnx2x_fastpath fp[MAX_RSS_CHAINS]; + struct bnx2x_sp_objs sp_objs[MAX_RSS_CHAINS]; + + uint8_t unit; /* driver instance number */ + + int pcie_bus; /* PCIe bus number */ + int pcie_device; /* PCIe device/slot number */ + int pcie_func; /* PCIe function number */ + + uint8_t pfunc_rel; /* function relative */ + uint8_t pfunc_abs; /* function absolute */ + uint8_t path_id; /* function absolute */ +#define SC_PATH(sc) (sc->path_id) +#define SC_PORT(sc) (sc->pfunc_rel & 1) +#define SC_FUNC(sc) (sc->pfunc_rel) +#define SC_ABS_FUNC(sc) (sc->pfunc_abs) +#define SC_VN(sc) (sc->pfunc_rel >> 1) +#define SC_L_ID(sc) (SC_VN(sc) << 2) +#define PORT_ID(sc) SC_PORT(sc) +#define PATH_ID(sc) SC_PATH(sc) +#define VNIC_ID(sc) SC_VN(sc) +#define FUNC_ID(sc) SC_FUNC(sc) +#define ABS_FUNC_ID(sc) SC_ABS_FUNC(sc) +#define SC_FW_MB_IDX_VN(sc, vn) \ + (SC_PORT(sc) + (vn) * \ + ((CHIP_IS_E1x(sc) || (CHIP_IS_MODE_4_PORT(sc))) ? 2 : 1)) +#define SC_FW_MB_IDX(sc) SC_FW_MB_IDX_VN(sc, SC_VN(sc)) + + int if_capen; /* enabled interface capabilities */ + + struct bnx2x_devinfo devinfo; + char fw_ver_str[32]; + char mf_mode_str[32]; + char pci_link_str[32]; + + struct iro *iro_array; + + int dmae_ready; +#define DMAE_READY(sc) (sc->dmae_ready) + + struct ecore_credit_pool_obj vlans_pool; + struct ecore_credit_pool_obj macs_pool; + struct ecore_rx_mode_obj rx_mode_obj; + struct ecore_mcast_obj mcast_obj; + struct ecore_rss_config_obj rss_conf_obj; + struct ecore_func_sp_obj func_obj; + + uint16_t fw_seq; + uint16_t fw_drv_pulse_wr_seq; + uint32_t func_stx; + + struct elink_params link_params; + struct elink_vars link_vars; + uint32_t link_cnt; + struct bnx2x_link_report_data last_reported_link; + char mac_addr_str[32]; + + uint32_t tx_ring_size; + uint32_t rx_ring_size; + int wol; + + int is_leader; + int recovery_state; +#define BNX2X_RECOVERY_DONE 1 +#define BNX2X_RECOVERY_INIT 2 +#define BNX2X_RECOVERY_WAIT 3 +#define BNX2X_RECOVERY_FAILED 4 +#define BNX2X_RECOVERY_NIC_LOADING 5 + + uint32_t rx_mode; +#define BNX2X_RX_MODE_NONE 0 +#define BNX2X_RX_MODE_NORMAL 1 +#define BNX2X_RX_MODE_ALLMULTI 2 +#define BNX2X_RX_MODE_PROMISC 3 +#define BNX2X_MAX_MULTICAST 64 + + struct bnx2x_port port; + + struct cmng_init cmng; + + /* user configs */ + uint8_t num_queues; + int hc_rx_ticks; + int hc_tx_ticks; + uint32_t rx_budget; + int interrupt_mode; +#define INTR_MODE_INTX 0 +#define INTR_MODE_MSI 1 +#define INTR_MODE_MSIX 2 +#define INTR_MODE_SINGLE_MSIX 3 + int udp_rss; + + uint8_t igu_dsb_id; + uint8_t igu_base_sb; + uint8_t igu_sb_cnt; + uint32_t igu_base_addr; + uint8_t base_fw_ndsb; +#define DEF_SB_IGU_ID 16 +#define DEF_SB_ID HC_SP_SB_ID + + /* default status block */ + struct bnx2x_dma def_sb_dma; + struct host_sp_status_block *def_sb; + uint16_t def_idx; + uint16_t def_att_idx; + uint32_t attn_state; + struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; + + /* general SP events - stats query, cfc delete, etc */ +#define HC_SP_INDEX_ETH_DEF_CONS 3 + /* EQ completions */ +#define HC_SP_INDEX_EQ_CONS 7 + /* FCoE L2 connection completions */ +#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6 +#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4 + /* iSCSI L2 */ +#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 +#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 + + /* event queue */ + struct bnx2x_dma eq_dma; + union event_ring_elem *eq; + uint16_t eq_prod; + uint16_t eq_cons; + uint16_t *eq_cons_sb; +#define NUM_EQ_PAGES 1 /* must be a power of 2 */ +#define EQ_DESC_CNT_PAGE (BNX2X_PAGE_SIZE / sizeof(union event_ring_elem)) +#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1) +#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES) +#define EQ_DESC_MASK (NUM_EQ_DESC - 1) +#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2) + /* depends on EQ_DESC_CNT_PAGE being a power of 2 */ +#define NEXT_EQ_IDX(x) \ + ((((x) & EQ_DESC_MAX_PAGE) == (EQ_DESC_MAX_PAGE - 1)) ? \ + ((x) + 2) : ((x) + 1)) + /* depends on the above and on NUM_EQ_PAGES being a power of 2 */ +#define EQ_DESC(x) ((x) & EQ_DESC_MASK) + + /* slow path */ + struct bnx2x_dma sp_dma; + struct bnx2x_slowpath *sp; + unsigned long sp_state; + + /* slow path queue */ + struct bnx2x_dma spq_dma; + struct eth_spe *spq; +#define SP_DESC_CNT (BNX2X_PAGE_SIZE / sizeof(struct eth_spe)) +#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) +#define MAX_SPQ_PENDING 8 + + uint16_t spq_prod_idx; + struct eth_spe *spq_prod_bd; + struct eth_spe *spq_last_bd; + uint16_t *dsb_sp_prod; + + volatile unsigned long eq_spq_left; /* COMMON_xxx ramrod credit */ + volatile unsigned long cq_spq_left; /* ETH_xxx ramrod credit */ + + /* fw decompression buffer */ + struct bnx2x_dma gz_buf_dma; + void *gz_buf; + uint32_t gz_outlen; +#define GUNZIP_BUF(sc) (sc->gz_buf) +#define GUNZIP_OUTLEN(sc) (sc->gz_outlen) +#define GUNZIP_PHYS(sc) (phys_addr_t)(sc->gz_buf_dma.paddr) +#define FW_BUF_SIZE 0x40000 + + struct raw_op *init_ops; + uint16_t *init_ops_offsets; /* init block offsets inside init_ops */ + uint32_t *init_data; /* data blob, 32 bit granularity */ + uint32_t init_mode_flags; +#define INIT_MODE_FLAGS(sc) (sc->init_mode_flags) + /* PRAM blobs - raw data */ + const uint8_t *tsem_int_table_data; + const uint8_t *tsem_pram_data; + const uint8_t *usem_int_table_data; + const uint8_t *usem_pram_data; + const uint8_t *xsem_int_table_data; + const uint8_t *xsem_pram_data; + const uint8_t *csem_int_table_data; + const uint8_t *csem_pram_data; +#define INIT_OPS(sc) (sc->init_ops) +#define INIT_OPS_OFFSETS(sc) (sc->init_ops_offsets) +#define INIT_DATA(sc) (sc->init_data) +#define INIT_TSEM_INT_TABLE_DATA(sc) (sc->tsem_int_table_data) +#define INIT_TSEM_PRAM_DATA(sc) (sc->tsem_pram_data) +#define INIT_USEM_INT_TABLE_DATA(sc) (sc->usem_int_table_data) +#define INIT_USEM_PRAM_DATA(sc) (sc->usem_pram_data) +#define INIT_XSEM_INT_TABLE_DATA(sc) (sc->xsem_int_table_data) +#define INIT_XSEM_PRAM_DATA(sc) (sc->xsem_pram_data) +#define INIT_CSEM_INT_TABLE_DATA(sc) (sc->csem_int_table_data) +#define INIT_CSEM_PRAM_DATA(sc) (sc->csem_pram_data) + +#define PHY_FW_VER_LEN 20 + char fw_ver[32]; + + /* ILT + * For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB + * context size we need 8 ILT entries. + */ +#define ILT_MAX_L2_LINES 8 + struct hw_context context[ILT_MAX_L2_LINES]; + struct ecore_ilt *ilt; +#define ILT_MAX_LINES 256 + + /* max supported number of RSS queues: IGU SBs minus one for CNIC */ +#define BNX2X_MAX_RSS_COUNT(sc) ((sc)->igu_sb_cnt - CNIC_SUPPORT(sc)) + /* max CID count: Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI */ +#define BNX2X_L2_MAX_CID(sc) \ + (BNX2X_MAX_RSS_COUNT(sc) * ECORE_MULTI_TX_COS + 2 * CNIC_SUPPORT(sc)) +#define BNX2X_L2_CID_COUNT(sc) \ + (BNX2X_NUM_ETH_QUEUES(sc) * ECORE_MULTI_TX_COS + 2 * CNIC_SUPPORT(sc)) +#define L2_ILT_LINES(sc) \ + (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(sc), ILT_PAGE_CIDS)) + + int qm_cid_count; + + uint8_t dropless_fc; + + /* total number of FW statistics requests */ + uint8_t fw_stats_num; + /* + * This is a memory buffer that will contain both statistics ramrod + * request and data. + */ + struct bnx2x_dma fw_stats_dma; + /* + * FW statistics request shortcut (points at the beginning of fw_stats + * buffer). + */ + int fw_stats_req_size; + struct bnx2x_fw_stats_req *fw_stats_req; + phys_addr_t fw_stats_req_mapping; + /* + * FW statistics data shortcut (points at the beginning of fw_stats + * buffer + fw_stats_req_size). + */ + int fw_stats_data_size; + struct bnx2x_fw_stats_data *fw_stats_data; + phys_addr_t fw_stats_data_mapping; + + /* tracking a pending STAT_QUERY ramrod */ + uint16_t stats_pending; + /* number of completed statistics ramrods */ + uint16_t stats_comp; + uint16_t stats_counter; + uint8_t stats_init; + int stats_state; + + struct bnx2x_eth_stats eth_stats; + struct host_func_stats func_stats; + struct bnx2x_eth_stats_old eth_stats_old; + struct bnx2x_net_stats_old net_stats_old; + struct bnx2x_fw_port_stats_old fw_stats_old; + + struct dmae_command stats_dmae; /* used by dmae command loader */ + int executer_idx; + + int mtu; + + /* DCB support on/off */ + int dcb_state; +#define BNX2X_DCB_STATE_OFF 0 +#define BNX2X_DCB_STATE_ON 1 + /* DCBX engine mode */ + int dcbx_enabled; +#define BNX2X_DCBX_ENABLED_OFF 0 +#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1 +#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2 +#define BNX2X_DCBX_ENABLED_INVALID -1 + + uint8_t cnic_support; + uint8_t cnic_enabled; + uint8_t cnic_loaded; +#define CNIC_SUPPORT(sc) 0 /* ((sc)->cnic_support) */ +#define CNIC_ENABLED(sc) 0 /* ((sc)->cnic_enabled) */ +#define CNIC_LOADED(sc) 0 /* ((sc)->cnic_loaded) */ + + /* multiple tx classes of service */ + uint8_t max_cos; +#define BNX2X_MAX_PRIORITY 8 + /* priority to cos mapping */ + uint8_t prio_to_cos[BNX2X_MAX_PRIORITY]; + + int panic; +}; /* struct bnx2x_softc */ + +/* IOCTL sub-commands for edebug and firmware upgrade */ +#define BNX2X_IOC_RD_NVRAM 1 +#define BNX2X_IOC_WR_NVRAM 2 +#define BNX2X_IOC_STATS_SHOW_NUM 3 +#define BNX2X_IOC_STATS_SHOW_STR 4 +#define BNX2X_IOC_STATS_SHOW_CNT 5 + +struct bnx2x_nvram_data { + uint32_t op; /* ioctl sub-command */ + uint32_t offset; + uint32_t len; + uint32_t value[1]; /* variable */ +}; + +union bnx2x_stats_show_data { + uint32_t op; /* ioctl sub-command */ + + struct { + uint32_t num; /* return number of stats */ + uint32_t len; /* length of each string item */ + } desc; + + /* variable length... */ + char str[1]; /* holds names of desc.num stats, each desc.len in length */ + + /* variable length... */ + uint64_t stats[1]; /* holds all stats */ +}; + +/* function init flags */ +#define FUNC_FLG_RSS 0x0001 +#define FUNC_FLG_STATS 0x0002 +/* FUNC_FLG_UNMATCHED 0x0004 */ +#define FUNC_FLG_SPQ 0x0010 +#define FUNC_FLG_LEADING 0x0020 /* PF only */ + +struct bnx2x_func_init_params { + phys_addr_t fw_stat_map; /* (dma) valid if FUNC_FLG_STATS */ + phys_addr_t spq_map; /* (dma) valid if FUNC_FLG_SPQ */ + uint16_t func_flgs; + uint16_t func_id; /* abs function id */ + uint16_t pf_id; + uint16_t spq_prod; /* valid if FUNC_FLG_SPQ */ +}; + +/* memory resources reside at BARs 0, 2, 4 */ +/* Run `pciconf -lb` to see mappings */ +#define BAR0 0 +#define BAR1 2 +#define BAR2 4 + +#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC +uint8_t bnx2x_reg_read8(struct bnx2x_softc *sc, size_t offset); +uint16_t bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset); +uint32_t bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset); + +void bnx2x_reg_write8(struct bnx2x_softc *sc, size_t offset, uint8_t val); +void bnx2x_reg_write16(struct bnx2x_softc *sc, size_t offset, uint16_t val); +void bnx2x_reg_write32(struct bnx2x_softc *sc, size_t offset, uint32_t val); +#else +#define bnx2x_reg_write8(sc, offset, val)\ + *((volatile uint8_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val + +#define bnx2x_reg_write16(sc, offset, val)\ + *((volatile uint16_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val + +#define bnx2x_reg_write32(sc, offset, val)\ + *((volatile uint32_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val + +#define bnx2x_reg_read8(sc, offset)\ + (*((volatile uint8_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset))) + +#define bnx2x_reg_read16(sc, offset)\ + (*((volatile uint16_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset))) + +#define bnx2x_reg_read32(sc, offset)\ + (*((volatile uint32_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset))) +#endif + +#define REG_ADDR(sc, offset) (((uint64_t)sc->bar[BAR0].base_addr) + (offset)) + +#define REG_RD8(sc, offset) bnx2x_reg_read8(sc, (offset)) +#define REG_RD16(sc, offset) bnx2x_reg_read16(sc, (offset)) +#define REG_RD32(sc, offset) bnx2x_reg_read32(sc, (offset)) + +#define REG_WR8(sc, offset, val) bnx2x_reg_write8(sc, (offset), val) +#define REG_WR16(sc, offset, val) bnx2x_reg_write16(sc, (offset), val) +#define REG_WR32(sc, offset, val) bnx2x_reg_write32(sc, (offset), val) + +#define REG_RD(sc, offset) REG_RD32(sc, offset) +#define REG_WR(sc, offset, val) REG_WR32(sc, offset, val) + +#define BNX2X_SP(sc, var) (&(sc)->sp->var) +#define BNX2X_SP_MAPPING(sc, var) \ + (sc->sp_dma.paddr + offsetof(struct bnx2x_slowpath, var)) + +#define BNX2X_FP(sc, nr, var) ((sc)->fp[(nr)].var) +#define BNX2X_SP_OBJ(sc, fp) ((sc)->sp_objs[(fp)->index]) + +#define bnx2x_fp(sc, nr, var) ((sc)->fp[nr].var) + +#define REG_RD_DMAE(sc, offset, valp, len32) \ + do { \ + (void)bnx2x_read_dmae(sc, offset, len32); \ + (void)rte_memcpy(valp, BNX2X_SP(sc, wb_data[0]), (len32) * 4); \ + } while (0) + +#define REG_WR_DMAE(sc, offset, valp, len32) \ + do { \ + (void)rte_memcpy(BNX2X_SP(sc, wb_data[0]), valp, (len32) * 4); \ + (void)bnx2x_write_dmae(sc, BNX2X_SP_MAPPING(sc, wb_data), offset, len32); \ + } while (0) + +#define REG_WR_DMAE_LEN(sc, offset, valp, len32) \ + REG_WR_DMAE(sc, offset, valp, len32) + +#define REG_RD_DMAE_LEN(sc, offset, valp, len32) \ + REG_RD_DMAE(sc, offset, valp, len32) + +#define VIRT_WR_DMAE_LEN(sc, data, addr, len32, le32_swap) \ + do { \ + /* if (le32_swap) { */ \ + /* PMD_PWARN_LOG(sc, "VIRT_WR_DMAE_LEN with le32_swap=1"); */ \ + /* } */ \ + rte_memcpy(GUNZIP_BUF(sc), data, len32 * 4); \ + ecore_write_big_buf_wb(sc, addr, len32); \ + } while (0) + +#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ +#define BNX2X_DB_SHIFT 7 /* 128 bytes */ +#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) +#error "Minimum DB doorbell stride is 8" +#endif +#define DPM_TRIGGER_TYPE 0x40 + +/* Doorbell macro */ +#define BNX2X_DB_WRITE(db_bar, val) \ + *((volatile uint32_t *)(db_bar)) = (val) + +#define BNX2X_DB_READ(db_bar) \ + *((volatile uint32_t *)(db_bar)) + +#define DOORBELL_ADDR(sc, offset) \ + (volatile uint32_t *)(((char *)(sc)->bar[BAR1].base_addr + (offset))) + +#define DOORBELL(sc, cid, val) \ + if (IS_PF(sc)) \ + BNX2X_DB_WRITE((DOORBELL_ADDR(sc, sc->doorbell_size * (cid) + DPM_TRIGGER_TYPE)), (val)); \ + else \ + BNX2X_DB_WRITE((DOORBELL_ADDR(sc, sc->doorbell_size * (cid))), (val)) \ + +#define SHMEM_ADDR(sc, field) \ + (sc->devinfo.shmem_base + offsetof(struct shmem_region, field)) +#define SHMEM_RD(sc, field) REG_RD(sc, SHMEM_ADDR(sc, field)) +#define SHMEM_RD16(sc, field) REG_RD16(sc, SHMEM_ADDR(sc, field)) +#define SHMEM_WR(sc, field, val) REG_WR(sc, SHMEM_ADDR(sc, field), val) + +#define SHMEM2_ADDR(sc, field) \ + (sc->devinfo.shmem2_base + offsetof(struct shmem2_region, field)) +#define SHMEM2_HAS(sc, field) \ + (sc->devinfo.shmem2_base && (REG_RD(sc, SHMEM2_ADDR(sc, size)) > \ + offsetof(struct shmem2_region, field))) +#define SHMEM2_RD(sc, field) REG_RD(sc, SHMEM2_ADDR(sc, field)) +#define SHMEM2_WR(sc, field, val) REG_WR(sc, SHMEM2_ADDR(sc, field), val) + +#define MFCFG_ADDR(sc, field) \ + (sc->devinfo.mf_cfg_base + offsetof(struct mf_cfg, field)) +#define MFCFG_RD(sc, field) REG_RD(sc, MFCFG_ADDR(sc, field)) +#define MFCFG_RD16(sc, field) REG_RD16(sc, MFCFG_ADDR(sc, field)) +#define MFCFG_WR(sc, field, val) REG_WR(sc, MFCFG_ADDR(sc, field), val) + +/* DMAE command defines */ + +#define DMAE_TIMEOUT -1 +#define DMAE_PCI_ERROR -2 /* E2 and onward */ +#define DMAE_NOT_RDY -3 +#define DMAE_PCI_ERR_FLAG 0x80000000 + +#define DMAE_SRC_PCI 0 +#define DMAE_SRC_GRC 1 + +#define DMAE_DST_NONE 0 +#define DMAE_DST_PCI 1 +#define DMAE_DST_GRC 2 + +#define DMAE_COMP_PCI 0 +#define DMAE_COMP_GRC 1 + +#define DMAE_COMP_REGULAR 0 +#define DMAE_COM_SET_ERR 1 + +#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << DMAE_COMMAND_SRC_SHIFT) +#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << DMAE_COMMAND_SRC_SHIFT) +#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << DMAE_COMMAND_DST_SHIFT) +#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << DMAE_COMMAND_DST_SHIFT) + +#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << DMAE_COMMAND_C_DST_SHIFT) +#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << DMAE_COMMAND_C_DST_SHIFT) + +#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT) + +#define DMAE_CMD_PORT_0 0 +#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT + +#define DMAE_SRC_PF 0 +#define DMAE_SRC_VF 1 + +#define DMAE_DST_PF 0 +#define DMAE_DST_VF 1 + +#define DMAE_C_SRC 0 +#define DMAE_C_DST 1 + +#define DMAE_LEN32_RD_MAX 0x80 +#define DMAE_LEN32_WR_MAX(sc) 0x2000 + +#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and beyond, upper bit indicates error */ + +#define MAX_DMAE_C_PER_PORT 8 +#define INIT_DMAE_C(sc) ((SC_PORT(sc) * MAX_DMAE_C_PER_PORT) + SC_VN(sc)) +#define PMF_DMAE_C(sc) ((SC_PORT(sc) * MAX_DMAE_C_PER_PORT) + E1HVN_MAX) + +static const uint32_t dmae_reg_go_c[] = { + DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, + DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, + DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, + DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 +}; + +#define ATTN_NIG_FOR_FUNC (1L << 8) +#define ATTN_SW_TIMER_4_FUNC (1L << 9) +#define GPIO_2_FUNC (1L << 10) +#define GPIO_3_FUNC (1L << 11) +#define GPIO_4_FUNC (1L << 12) +#define ATTN_GENERAL_ATTN_1 (1L << 13) +#define ATTN_GENERAL_ATTN_2 (1L << 14) +#define ATTN_GENERAL_ATTN_3 (1L << 15) +#define ATTN_GENERAL_ATTN_4 (1L << 13) +#define ATTN_GENERAL_ATTN_5 (1L << 14) +#define ATTN_GENERAL_ATTN_6 (1L << 15) +#define ATTN_HARD_WIRED_MASK 0xff00 +#define ATTENTION_ID 4 + +#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR + +#define MAX_IGU_ATTN_ACK_TO 100 + +#define STORM_ASSERT_ARRAY_SIZE 50 + +#define BNX2X_PMF_LINK_ASSERT(sc) \ + GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + SC_FUNC(sc)) + +#define BNX2X_MC_ASSERT_BITS \ + (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT)) + +#define BNX2X_MCP_ASSERT \ + GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT) + +#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC) +#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) + +#define MULTI_MASK 0x7f + +#define PFS_PER_PORT(sc) \ + ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4) +#define SC_MAX_VN_NUM(sc) PFS_PER_PORT(sc) + +#define FIRST_ABS_FUNC_IN_PORT(sc) \ + ((CHIP_PORT_MODE(sc) == CHIP_PORT_MODE_NONE) ? \ + PORT_ID(sc) : (PATH_ID(sc) + (2 * PORT_ID(sc)))) + +#define FOREACH_ABS_FUNC_IN_PORT(sc, i) \ + for ((i) = FIRST_ABS_FUNC_IN_PORT(sc); \ + (i) < MAX_FUNC_NUM; \ + (i) += (MAX_FUNC_NUM / PFS_PER_PORT(sc))) + +#define BNX2X_SWCID_SHIFT 17 +#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) + +#define SW_CID(x) (le32toh(x) & BNX2X_SWCID_MASK) +#define CQE_CMD(x) (le32toh(x) >> COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) + +#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) +#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG) +#define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG) +#define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD) +#define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH) + +/* must be used on a CID before placing it on a HW ring */ +#define HW_CID(sc, x) \ + ((SC_PORT(sc) << 23) | (SC_VN(sc) << BNX2X_SWCID_SHIFT) | (x)) + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define SPEED_10000 10000 + +#define PCI_PM_D0 1 +#define PCI_PM_D3hot 2 + +int bnx2x_test_bit(int nr, volatile unsigned long * addr); +void bnx2x_set_bit(unsigned int nr, volatile unsigned long * addr); +void bnx2x_clear_bit(int nr, volatile unsigned long * addr); +int bnx2x_test_and_clear_bit(int nr, volatile unsigned long * addr); +int bnx2x_cmpxchg(volatile int *addr, int old, int new); + +int bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, + struct bnx2x_dma *dma, const char *msg, uint32_t align); + +uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type); +uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode); +uint32_t bnx2x_dmae_opcode(struct bnx2x_softc *sc, uint8_t src_type, + uint8_t dst_type, uint8_t with_comp, + uint8_t comp_type); +void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx); +void bnx2x_read_dmae(struct bnx2x_softc *sc, uint32_t src_addr, uint32_t len32); +void bnx2x_write_dmae(struct bnx2x_softc *sc, phys_addr_t dma_addr, + uint32_t dst_addr, uint32_t len32); +void bnx2x_set_ctx_validation(struct bnx2x_softc *sc, struct eth_context *cxt, + uint32_t cid); +void bnx2x_update_coalesce_sb_index(struct bnx2x_softc *sc, uint8_t fw_sb_id, + uint8_t sb_index, uint8_t disable, + uint16_t usec); + +int bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, + uint32_t data_hi, uint32_t data_lo, int cmd_type); + +void ecore_init_e1h_firmware(struct bnx2x_softc *sc); +void ecore_init_e2_firmware(struct bnx2x_softc *sc); + +void ecore_storm_memset_struct(struct bnx2x_softc *sc, uint32_t addr, + size_t size, uint32_t *data); + +#define CATC_TRIGGER(sc, data) REG_WR((sc), 0x2000, (data)); +#define CATC_TRIGGER_START(sc) CATC_TRIGGER((sc), 0xcafecafe) + +#define BNX2X_MAC_FMT "%pM" +#define BNX2X_MAC_PRN_LIST(mac) (mac) + +/***********/ +/* INLINES */ +/***********/ + +static inline uint32_t +reg_poll(struct bnx2x_softc *sc, uint32_t reg, uint32_t expected, int ms, int wait) +{ + uint32_t val; + do { + val = REG_RD(sc, reg); + if (val == expected) { + break; + } + ms -= wait; + DELAY(wait * 1000); + } while (ms > 0); + + return val; +} + +static inline void +bnx2x_update_fp_sb_idx(struct bnx2x_fastpath *fp) +{ + mb(); /* status block is written to by the chip */ + fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; +} + +static inline void +bnx2x_igu_ack_sb_gen(struct bnx2x_softc *sc, uint8_t segment, + uint16_t index, uint8_t op, uint8_t update, uint32_t igu_addr) +{ + struct igu_regular cmd_data = {0}; + + cmd_data.sb_id_and_flags = + ((index << IGU_REGULAR_SB_INDEX_SHIFT) | + (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | + (update << IGU_REGULAR_BUPDATE_SHIFT) | + (op << IGU_REGULAR_ENABLE_INT_SHIFT)); + + REG_WR(sc, igu_addr, cmd_data.sb_id_and_flags); + + /* Make sure that ACK is written */ + mb(); +} + +static inline void +bnx2x_hc_ack_sb(struct bnx2x_softc *sc, uint8_t sb_id, uint8_t storm, + uint16_t index, uint8_t op, uint8_t update) +{ + uint32_t hc_addr = (HC_REG_COMMAND_REG + SC_PORT(sc) * 32 + + COMMAND_REG_INT_ACK); + union igu_ack_register igu_ack; + + igu_ack.sb.status_block_index = index; + igu_ack.sb.sb_id_and_flags = + ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | + (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | + (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | + (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); + + REG_WR(sc, hc_addr, igu_ack.raw_data); + + /* Make sure that ACK is written */ + mb(); +} + +static inline uint32_t +bnx2x_hc_ack_int(struct bnx2x_softc *sc) +{ + uint32_t hc_addr = (HC_REG_COMMAND_REG + SC_PORT(sc) * 32 + + COMMAND_REG_SIMD_MASK); + uint32_t result = REG_RD(sc, hc_addr); + + mb(); + return result; +} + +static inline uint32_t +bnx2x_igu_ack_int(struct bnx2x_softc *sc) +{ + uint32_t igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER * 8); + uint32_t result = REG_RD(sc, igu_addr); + + /* PMD_PDEBUG_LOG(sc, DBG_INTR, "read 0x%08x from IGU addr 0x%x", + result, igu_addr); */ + + mb(); + return result; +} + +static inline uint32_t +bnx2x_ack_int(struct bnx2x_softc *sc) +{ + mb(); + if (sc->devinfo.int_block == INT_BLOCK_HC) { + return bnx2x_hc_ack_int(sc); + } else { + return bnx2x_igu_ack_int(sc); + } +} + +static inline int +func_by_vn(struct bnx2x_softc *sc, int vn) +{ + return 2 * vn + SC_PORT(sc); +} + +/* + * send notification to other functions. + */ +static inline void +bnx2x_link_sync_notify(struct bnx2x_softc *sc) +{ + int func, vn; + + /* Set the attention towards other drivers on the same port */ + for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { + if (vn == SC_VN(sc)) + continue; + + func = func_by_vn(sc, vn); + REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_0 + + (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func) * 4, 1); + } +} + +/* + * Statistics ID are global per chip/path, while Client IDs for E1x + * are per port. + */ +static inline uint8_t +bnx2x_stats_id(struct bnx2x_fastpath *fp) +{ + struct bnx2x_softc *sc = fp->sc; + + if (!CHIP_IS_E1x(sc)) { + return fp->cl_id; + } + + return fp->cl_id + SC_PORT(sc) * FP_SB_MAX_E1x; +} + +int bnx2x_init(struct bnx2x_softc *sc); +void bnx2x_load_firmware(struct bnx2x_softc *sc); +int bnx2x_attach(struct bnx2x_softc *sc); +int bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link); +int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc); +int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc); +void bnx2x_free_ilt_mem(struct bnx2x_softc *sc); +void bnx2x_dump_tx_chain(struct bnx2x_fastpath * fp, int bd_prod, int count); +int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head, int m_pkts); +uint8_t bnx2x_txeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp); +void bnx2x_print_adapter_info(struct bnx2x_softc *sc); +int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp); +void bnx2x_link_status_update(struct bnx2x_softc *sc); +int bnx2x_complete_sp(struct bnx2x_softc *sc); +int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc); +void bnx2x_periodic_callout(struct bnx2x_softc *sc); + +int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count); +void bnx2x_vf_close(struct bnx2x_softc *sc); +int bnx2x_vf_init(struct bnx2x_softc *sc); +void bnx2x_vf_unload(struct bnx2x_softc *sc); +int bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + int leading); +void bnx2x_free_hsi_mem(struct bnx2x_softc *sc); +int bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc); +int bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode, + unsigned long *rx_accept_flags, unsigned long *tx_accept_flags); +int bnx2x_check_bull(struct bnx2x_softc *sc); + +//#define BNX2X_PULSE + +#define BNX2X_PCI_CAP 1 +#define BNX2X_PCI_ECAP 2 + +static inline struct bnx2x_pci_cap* +pci_find_cap(struct bnx2x_softc *sc, uint8_t id, uint8_t type) +{ + struct bnx2x_pci_cap *cap = sc->pci_caps; + + while (cap) { + if (cap->id == id && cap->type == type) + return cap; + cap = cap->next; + } + + return NULL; +} + +static inline int is_valid_ether_addr(uint8_t *addr) +{ + if (!(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5])) + return 0; + else + return 1; +} + +static inline void +bnx2x_set_rx_mode(struct bnx2x_softc *sc) +{ + if (sc->state == BNX2X_STATE_OPEN) { + if (IS_PF(sc)) { + bnx2x_set_storm_rx_mode(sc); + } else { + sc->rx_mode = BNX2X_RX_MODE_PROMISC; + bnx2x_vf_set_rx_mode(sc); + } + } else { + PMD_DRV_LOG(NOTICE, "Card is not ready to change mode"); + } +} + +static inline int pci_read(struct bnx2x_softc *sc, size_t addr, + void *val, uint8_t size) +{ + if (rte_eal_pci_read_config(sc->pci_dev, val, size, addr) <= 0) { + PMD_DRV_LOG(ERR, "Can't read from PCI config space"); + return ENXIO; + } + + return 0; +} + +static inline int pci_write_word(struct bnx2x_softc *sc, size_t addr, off_t val) +{ + uint16_t val16 = val; + + if (rte_eal_pci_write_config(sc->pci_dev, &val16, + sizeof(val16), addr) <= 0) { + PMD_DRV_LOG(ERR, "Can't write to PCI config space"); + return ENXIO; + } + + return 0; +} + +static inline int pci_write_long(struct bnx2x_softc *sc, size_t addr, off_t val) +{ + uint32_t val32 = val; + if (rte_eal_pci_write_config(sc->pci_dev, &val32, + sizeof(val32), addr) <= 0) { + PMD_DRV_LOG(ERR, "Can't write to PCI config space"); + return ENXIO; + } + + return 0; +} + +#endif /* __BNX2X_H__ */ diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c new file mode 100644 index 00000000..071b44fe --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_ethdev.c @@ -0,0 +1,554 @@ +/* + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#include "bnx2x.h" +#include "bnx2x_rxtx.h" + +#include <rte_dev.h> + +/* + * The set of PCI devices this driver supports + */ +static struct rte_pci_id pci_id_bnx2x_map[] = { +#define RTE_PCI_DEV_ID_DECL_BNX2X(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" + { .vendor_id = 0, } +}; + +static struct rte_pci_id pci_id_bnx2xvf_map[] = { +#define RTE_PCI_DEV_ID_DECL_BNX2XVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" + { .vendor_id = 0, } +}; + +static void +bnx2x_link_update(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + bnx2x_link_status_update(sc); + mb(); + dev->data->dev_link.link_speed = sc->link_vars.line_speed; + switch (sc->link_vars.duplex) { + case DUPLEX_FULL: + dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case DUPLEX_HALF: + dev->data->dev_link.link_duplex = ETH_LINK_HALF_DUPLEX; + break; + } + dev->data->dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + dev->data->dev_link.link_status = sc->link_vars.link_up; +} + +static void +bnx2x_interrupt_action(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + uint32_t link_status; + + PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled"); + + if (bnx2x_intr_legacy(sc, 0)) + DELAY_MS(250); + if (sc->periodic_flags & PERIODIC_GO) + bnx2x_periodic_callout(sc); + link_status = REG_RD(sc, sc->link_params.shmem_base + + offsetof(struct shmem_region, + port_mb[sc->link_params.port].link_status)); + if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status) + bnx2x_link_update(dev); +} + +static __rte_unused void +bnx2x_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + bnx2x_interrupt_action(dev); + rte_intr_enable(&(dev->pci_dev->intr_handle)); +} + +/* + * Devops - helper functions can be called from user application + */ + +static int +bnx2x_dev_configure(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF); + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->dev_conf.rxmode.jumbo_frame) + sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len; + + if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues"); + return -EINVAL; + } + + sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues); + if (sc->num_queues > mp_ncpus) { + PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs"); + return -EINVAL; + } + + PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d", + sc->num_queues, sc->mtu); + + /* allocate ilt */ + if (bnx2x_alloc_ilt_mem(sc) != 0) { + PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed"); + return -ENXIO; + } + + /* allocate the host hardware/software hsi structures */ + if (bnx2x_alloc_hsi_mem(sc) != 0) { + PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed"); + bnx2x_free_ilt_mem(sc); + return -ENXIO; + } + + return 0; +} + +static int +bnx2x_dev_start(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + + ret = bnx2x_init(sc); + if (ret) { + PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret); + return -1; + } + + if (IS_PF(sc)) { + rte_intr_callback_register(&(dev->pci_dev->intr_handle), + bnx2x_interrupt_handler, (void *)dev); + + if(rte_intr_enable(&(dev->pci_dev->intr_handle))) + PMD_DRV_LOG(ERR, "rte_intr_enable failed"); + } + + ret = bnx2x_dev_rx_init(dev); + if (ret != 0) { + PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code"); + return -3; + } + + /* Print important adapter info for the user. */ + bnx2x_print_adapter_info(sc); + + DELAY_MS(2500); + + return ret; +} + +static void +bnx2x_dev_stop(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + + if (IS_PF(sc)) { + rte_intr_disable(&(dev->pci_dev->intr_handle)); + rte_intr_callback_unregister(&(dev->pci_dev->intr_handle), + bnx2x_interrupt_handler, (void *)dev); + } + + ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE); + if (ret) { + PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret); + return; + } + + return; +} + +static void +bnx2x_dev_close(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (IS_VF(sc)) + bnx2x_vf_close(sc); + + bnx2x_dev_clear_queues(dev); + memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link)); + + /* free the host hardware/software hsi structures */ + bnx2x_free_hsi_mem(sc); + + /* free ilt */ + bnx2x_free_ilt_mem(sc); +} + +static void +bnx2x_promisc_enable(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + sc->rx_mode = BNX2X_RX_MODE_PROMISC; + bnx2x_set_rx_mode(sc); +} + +static void +bnx2x_promisc_disable(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + sc->rx_mode = BNX2X_RX_MODE_NORMAL; + bnx2x_set_rx_mode(sc); +} + +static void +bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + sc->rx_mode = BNX2X_RX_MODE_ALLMULTI; + bnx2x_set_rx_mode(sc); +} + +static void +bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + sc->rx_mode = BNX2X_RX_MODE_NORMAL; + bnx2x_set_rx_mode(sc); +} + +static int +bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) +{ + PMD_INIT_FUNC_TRACE(); + + int old_link_status = dev->data->dev_link.link_status; + + bnx2x_link_update(dev); + + return old_link_status == dev->data->dev_link.link_status ? -1 : 0; +} + +static int +bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) +{ + int old_link_status = dev->data->dev_link.link_status; + struct bnx2x_softc *sc = dev->data->dev_private; + + bnx2x_link_update(dev); + + bnx2x_check_bull(sc); + if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) { + PMD_DRV_LOG(ERR, "PF indicated channel is down." + "VF device is no longer operational"); + dev->data->dev_link.link_status = ETH_LINK_DOWN; + } + + return old_link_status == dev->data->dev_link.link_status ? -1 : 0; +} + +static void +bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + bnx2x_stats_handle(sc, STATS_EVENT_UPDATE); + + memset(stats, 0, sizeof (struct rte_eth_stats)); + + stats->ipackets = + HILO_U64(sc->eth_stats.total_unicast_packets_received_hi, + sc->eth_stats.total_unicast_packets_received_lo) + + HILO_U64(sc->eth_stats.total_multicast_packets_received_hi, + sc->eth_stats.total_multicast_packets_received_lo) + + HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi, + sc->eth_stats.total_broadcast_packets_received_lo); + + stats->opackets = + HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi, + sc->eth_stats.total_unicast_packets_transmitted_lo) + + HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi, + sc->eth_stats.total_multicast_packets_transmitted_lo) + + HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi, + sc->eth_stats.total_broadcast_packets_transmitted_lo); + + stats->ibytes = + HILO_U64(sc->eth_stats.total_bytes_received_hi, + sc->eth_stats.total_bytes_received_lo); + + stats->obytes = + HILO_U64(sc->eth_stats.total_bytes_transmitted_hi, + sc->eth_stats.total_bytes_transmitted_lo); + + stats->ierrors = + HILO_U64(sc->eth_stats.error_bytes_received_hi, + sc->eth_stats.error_bytes_received_lo); + + stats->oerrors = 0; + + stats->rx_nombuf = + HILO_U64(sc->eth_stats.no_buff_discard_hi, + sc->eth_stats.no_buff_discard_lo); +} + +static void +bnx2x_dev_infos_get(struct rte_eth_dev *dev, __rte_unused struct rte_eth_dev_info *dev_info) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + dev_info->max_rx_queues = sc->max_rx_queues; + dev_info->max_tx_queues = sc->max_tx_queues; + dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE; + dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN; + dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS; + dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G; +} + +static void +bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t pool) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + if (sc->mac_ops.mac_addr_add) + sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool); +} + +static void +bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + + if (sc->mac_ops.mac_addr_remove) + sc->mac_ops.mac_addr_remove(dev, index); +} + +static const struct eth_dev_ops bnx2x_eth_dev_ops = { + .dev_configure = bnx2x_dev_configure, + .dev_start = bnx2x_dev_start, + .dev_stop = bnx2x_dev_stop, + .dev_close = bnx2x_dev_close, + .promiscuous_enable = bnx2x_promisc_enable, + .promiscuous_disable = bnx2x_promisc_disable, + .allmulticast_enable = bnx2x_dev_allmulticast_enable, + .allmulticast_disable = bnx2x_dev_allmulticast_disable, + .link_update = bnx2x_dev_link_update, + .stats_get = bnx2x_dev_stats_get, + .dev_infos_get = bnx2x_dev_infos_get, + .rx_queue_setup = bnx2x_dev_rx_queue_setup, + .rx_queue_release = bnx2x_dev_rx_queue_release, + .tx_queue_setup = bnx2x_dev_tx_queue_setup, + .tx_queue_release = bnx2x_dev_tx_queue_release, + .mac_addr_add = bnx2x_mac_addr_add, + .mac_addr_remove = bnx2x_mac_addr_remove, +}; + +/* + * dev_ops for virtual function + */ +static const struct eth_dev_ops bnx2xvf_eth_dev_ops = { + .dev_configure = bnx2x_dev_configure, + .dev_start = bnx2x_dev_start, + .dev_stop = bnx2x_dev_stop, + .dev_close = bnx2x_dev_close, + .promiscuous_enable = bnx2x_promisc_enable, + .promiscuous_disable = bnx2x_promisc_disable, + .allmulticast_enable = bnx2x_dev_allmulticast_enable, + .allmulticast_disable = bnx2x_dev_allmulticast_disable, + .link_update = bnx2xvf_dev_link_update, + .stats_get = bnx2x_dev_stats_get, + .dev_infos_get = bnx2x_dev_infos_get, + .rx_queue_setup = bnx2x_dev_rx_queue_setup, + .rx_queue_release = bnx2x_dev_rx_queue_release, + .tx_queue_setup = bnx2x_dev_tx_queue_setup, + .tx_queue_release = bnx2x_dev_tx_queue_release, + .mac_addr_add = bnx2x_mac_addr_add, + .mac_addr_remove = bnx2x_mac_addr_remove, +}; + + +static int +bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf) +{ + int ret = 0; + struct rte_pci_device *pci_dev; + struct bnx2x_softc *sc; + + PMD_INIT_FUNC_TRACE(); + + eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops; + pci_dev = eth_dev->pci_dev; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + sc = eth_dev->data->dev_private; + sc->pcie_bus = pci_dev->addr.bus; + sc->pcie_device = pci_dev->addr.devid; + + if (is_vf) + sc->flags = BNX2X_IS_VF_FLAG; + + sc->devinfo.vendor_id = pci_dev->id.vendor_id; + sc->devinfo.device_id = pci_dev->id.device_id; + sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id; + sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id; + + sc->pcie_func = pci_dev->addr.function; + sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr; + if (is_vf) + sc->bar[BAR1].base_addr = (void *) + ((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START); + else + sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr; + + assert(sc->bar[BAR0].base_addr); + assert(sc->bar[BAR1].base_addr); + + bnx2x_load_firmware(sc); + assert(sc->firmware); + + if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + sc->udp_rss = 1; + + sc->rx_budget = BNX2X_RX_BUDGET; + sc->hc_rx_ticks = BNX2X_RX_TICKS; + sc->hc_tx_ticks = BNX2X_TX_TICKS; + + sc->interrupt_mode = INTR_MODE_SINGLE_MSIX; + sc->rx_mode = BNX2X_RX_MODE_NORMAL; + + sc->pci_dev = pci_dev; + ret = bnx2x_attach(sc); + if (ret) { + PMD_DRV_LOG(ERR, "bnx2x_attach failed (%d)", ret); + return ret; + } + + eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr; + + PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d", + sc->pcie_bus, sc->pcie_device); + PMD_DRV_LOG(INFO, "bar0.addr=%p, bar1.addr=%p", + sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr); + PMD_DRV_LOG(INFO, "port=%d, path=%d, vnic=%d, func=%d", + PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc)); + PMD_DRV_LOG(INFO, "portID=%d vendorID=0x%x deviceID=0x%x", + eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id); + + if (IS_VF(sc)) { + if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg), + &sc->vf2pf_mbox_mapping, "vf2pf_mbox", + RTE_CACHE_LINE_SIZE) != 0) + return -ENOMEM; + + sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *) + sc->vf2pf_mbox_mapping.vaddr; + + if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin), + &sc->pf2vf_bulletin_mapping, "vf2pf_bull", + RTE_CACHE_LINE_SIZE) != 0) + return -ENOMEM; + + sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *) + sc->pf2vf_bulletin_mapping.vaddr; + + ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues, + sc->max_rx_queues); + if (ret) + return ret; + } + + return 0; +} + +static int +eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev) +{ + PMD_INIT_FUNC_TRACE(); + return bnx2x_common_dev_init(eth_dev, 0); +} + +static int +eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev) +{ + PMD_INIT_FUNC_TRACE(); + return bnx2x_common_dev_init(eth_dev, 1); +} + +static struct eth_driver rte_bnx2x_pmd = { + .pci_drv = { + .name = "rte_bnx2x_pmd", + .id_table = pci_id_bnx2x_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + }, + .eth_dev_init = eth_bnx2x_dev_init, + .dev_private_size = sizeof(struct bnx2x_softc), +}; + +/* + * virtual function driver struct + */ +static struct eth_driver rte_bnx2xvf_pmd = { + .pci_drv = { + .name = "rte_bnx2xvf_pmd", + .id_table = pci_id_bnx2xvf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + }, + .eth_dev_init = eth_bnx2xvf_dev_init, + .dev_private_size = sizeof(struct bnx2x_softc), +}; + +static int rte_bnx2x_pmd_init(const char *name __rte_unused, const char *params __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + rte_eth_driver_register(&rte_bnx2x_pmd); + + return 0; +} + +static int rte_bnx2xvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + rte_eth_driver_register(&rte_bnx2xvf_pmd); + + return 0; +} + +static struct rte_driver rte_bnx2x_driver = { + .type = PMD_PDEV, + .init = rte_bnx2x_pmd_init, +}; + +static struct rte_driver rte_bnx2xvf_driver = { + .type = PMD_PDEV, + .init = rte_bnx2xvf_pmd_init, +}; + +PMD_REGISTER_DRIVER(rte_bnx2x_driver); +PMD_REGISTER_DRIVER(rte_bnx2xvf_driver); diff --git a/drivers/net/bnx2x/bnx2x_ethdev.h b/drivers/net/bnx2x/bnx2x_ethdev.h new file mode 100644 index 00000000..a9da9de8 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_ethdev.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#ifndef PMD_BNX2X_ETHDEV_H +#define PMD_BNX2X_ETHDEV_H + +#include <sys/queue.h> +#include <sys/param.h> +#include <sys/user.h> +#include <sys/stat.h> + +#include <stdio.h> +#include <stdlib.h> +#include <errno.h> +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <stdarg.h> +#include <inttypes.h> +#include <assert.h> + +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_pci.h> +#include <rte_malloc.h> +#include <rte_ethdev.h> +#include <rte_spinlock.h> +#include <rte_memzone.h> +#include <rte_eal.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> + +#include "bnx2x_rxtx.h" +#include "bnx2x_logs.h" + +#define DELAY(x) rte_delay_us(x) +#define DELAY_MS(x) rte_delay_ms(x) +#define usec_delay(x) DELAY(x) +#define msec_delay(x) DELAY(1000*(x)) + +#define FALSE 0 +#define TRUE 1 + +#define false 0 +#define true 1 +#define min(a,b) RTE_MIN(a,b) + +#define mb() rte_mb() +#define wmb() rte_wmb() +#define rmb() rte_rmb() + + +#define MAX_QUEUES sysconf(_SC_NPROCESSORS_CONF) + +#define BNX2X_MIN_RX_BUF_SIZE 1024 +#define BNX2X_MAX_RX_PKT_LEN 15872 +#define BNX2X_MAX_MAC_ADDRS 1 + +/* Hardware RX tick timer (usecs) */ +#define BNX2X_RX_TICKS 25 +/* Hardware TX tick timer (usecs) */ +#define BNX2X_TX_TICKS 50 +/* Maximum number of Rx packets to process at a time */ +#define BNX2X_RX_BUDGET 0xffffffff + +#endif + +/* MAC address operations */ +struct bnx2x_mac_ops { + void (*mac_addr_add)(struct rte_eth_dev *dev, struct ether_addr *addr, + uint16_t index, uint32_t pool); /* not implemented yet */ + void (*mac_addr_remove)(struct rte_eth_dev *dev, uint16_t index); /* not implemented yet */ +}; diff --git a/drivers/net/bnx2x/bnx2x_logs.h b/drivers/net/bnx2x/bnx2x_logs.h new file mode 100644 index 00000000..dff014d7 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_logs.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#ifndef _PMD_LOGS_H_ +#define _PMD_LOGS_H_ + +#define PMD_INIT_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args) + +#ifdef RTE_LIBRTE_BNX2X_DEBUG_INIT +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +#else +#define PMD_INIT_FUNC_TRACE() do { } while(0) +#endif + +#ifdef RTE_LIBRTE_BNX2X_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_BNX2X_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_BNX2X_DEBUG_TX_FREE +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_BNX2X_DEBUG +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) +#else +#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) +#endif + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC +#define PMD_DEBUG_PERIODIC_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_DEBUG_PERIODIC_LOG(level, fmt, args...) do { } while(0) +#endif + + +#endif /* _PMD_LOGS_H_ */ diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c new file mode 100644 index 00000000..752a5e81 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_rxtx.c @@ -0,0 +1,497 @@ +/* + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#include "bnx2x.h" +#include "bnx2x_rxtx.h" + +static inline struct rte_mbuf * +bnx2x_rxmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + m = __rte_mbuf_raw_alloc(mp); + __rte_mbuf_sanity_check(m, 0); + + return m; +} + +static const struct rte_memzone * +ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, + uint16_t queue_id, uint32_t ring_size, int socket_id) +{ + char z_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + + snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + dev->driver->pci_drv.name, ring_name, dev->data->port_id, queue_id); + + mz = rte_memzone_lookup(z_name); + if (mz) + return mz; + + return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE); +} + +static void +bnx2x_rx_queue_release(struct bnx2x_rx_queue *rx_queue) +{ + uint16_t i; + struct rte_mbuf **sw_ring; + + if (NULL != rx_queue) { + + sw_ring = rx_queue->sw_ring; + if (NULL != sw_ring) { + for (i = 0; i < rx_queue->nb_rx_desc; i++) { + if (NULL != sw_ring[i]) + rte_pktmbuf_free(sw_ring[i]); + } + rte_free(sw_ring); + } + rte_free(rx_queue); + } +} + +void +bnx2x_dev_rx_queue_release(void *rxq) +{ + bnx2x_rx_queue_release(rxq); +} + +int +bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + uint16_t j, idx; + const struct rte_memzone *dma; + struct bnx2x_rx_queue *rxq; + uint32_t dma_size; + struct rte_mbuf *mbuf; + struct bnx2x_softc *sc = dev->data->dev_private; + struct bnx2x_fastpath *fp = &sc->fp[queue_idx]; + struct eth_rx_cqe_next_page *nextpg; + phys_addr_t *rx_bd; + phys_addr_t busaddr; + + /* First allocate the rx queue data structure */ + rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (NULL == rxq) { + PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); + return -ENOMEM; + } + rxq->sc = sc; + rxq->mb_pool = mp; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN); + + rxq->nb_rx_pages = 1; + while (USABLE_RX_BD(rxq) < nb_desc) + rxq->nb_rx_pages <<= 1; + + rxq->nb_rx_desc = TOTAL_RX_BD(rxq); + sc->rx_ring_size = USABLE_RX_BD(rxq); + rxq->nb_cq_pages = RCQ_BD_PAGES(rxq); + + rxq->rx_free_thresh = rx_conf->rx_free_thresh ? + rx_conf->rx_free_thresh : DEFAULT_RX_FREE_THRESH; + + PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " + "total_bd=%lu, rx_pages=%u, cq_pages=%u", + queue_idx, nb_desc, rxq->rx_free_thresh, + (unsigned long)USABLE_RX_BD(rxq), + (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages, + rxq->nb_cq_pages); + + /* Allocate RX ring hardware descriptors */ + dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd); + dma = ring_dma_zone_reserve(dev, "hw_ring", queue_idx, dma_size, socket_id); + if (NULL == dma) { + PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!"); + bnx2x_rx_queue_release(rxq); + return -ENOMEM; + } + fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr; + rxq->rx_ring = (uint64_t*)dma->addr; + memset((void *)rxq->rx_ring, 0, dma_size); + + /* Link the RX chain pages. */ + for (j = 1; j <= rxq->nb_rx_pages; j++) { + rx_bd = &rxq->rx_ring[TOTAL_RX_BD_PER_PAGE * j - 2]; + busaddr = rxq->rx_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_rx_pages); + *rx_bd = busaddr; + } + + /* Allocate software ring */ + dma_size = rxq->nb_rx_desc * sizeof(struct bnx2x_rx_entry); + rxq->sw_ring = rte_zmalloc_socket("sw_ring", dma_size, + RTE_CACHE_LINE_SIZE, + socket_id); + if (NULL == rxq->sw_ring) { + PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!"); + bnx2x_rx_queue_release(rxq); + return -ENOMEM; + } + + /* Initialize software ring entries */ + rxq->rx_mbuf_alloc = 0; + for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) { + mbuf = bnx2x_rxmbuf_alloc(mp); + if (NULL == mbuf) { + PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d", + (unsigned)rxq->queue_id, idx); + bnx2x_rx_queue_release(rxq); + return -ENOMEM; + } + rxq->sw_ring[idx] = mbuf; + rxq->rx_ring[idx] = mbuf->buf_physaddr; + rxq->rx_mbuf_alloc++; + } + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + rxq->rx_bd_head = 0; + rxq->rx_bd_tail = rxq->nb_rx_desc; + + /* Allocate CQ chain. */ + dma_size = BNX2X_RX_CHAIN_PAGE_SZ * rxq->nb_cq_pages; + dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id); + if (NULL == dma) { + PMD_RX_LOG(ERR, "RCQ alloc failed"); + return -ENOMEM; + } + fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr; + rxq->cq_ring = (union eth_rx_cqe*)dma->addr; + + /* Link the CQ chain pages. */ + for (j = 1; j <= rxq->nb_cq_pages; j++) { + nextpg = &rxq->cq_ring[TOTAL_RCQ_ENTRIES_PER_PAGE * j - 1].next_page_cqe; + busaddr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_cq_pages); + nextpg->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr)); + nextpg->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr)); + } + rxq->rx_cq_head = 0; + rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq); + + dev->data->rx_queues[queue_idx] = rxq; + if (!sc->rx_queues) sc->rx_queues = dev->data->rx_queues; + + return 0; +} + +static void +bnx2x_tx_queue_release(struct bnx2x_tx_queue *tx_queue) +{ + uint16_t i; + struct rte_mbuf **sw_ring; + + if (NULL != tx_queue) { + + sw_ring = tx_queue->sw_ring; + if (NULL != sw_ring) { + for (i = 0; i < tx_queue->nb_tx_desc; i++) { + if (NULL != sw_ring[i]) + rte_pktmbuf_free(sw_ring[i]); + } + rte_free(sw_ring); + } + rte_free(tx_queue); + } +} + +void +bnx2x_dev_tx_queue_release(void *txq) +{ + bnx2x_tx_queue_release(txq); +} + +static uint16_t +bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct bnx2x_tx_queue *txq; + struct bnx2x_softc *sc; + struct bnx2x_fastpath *fp; + uint32_t burst, nb_tx; + struct rte_mbuf **m = tx_pkts; + int ret; + + txq = p_txq; + sc = txq->sc; + fp = &sc->fp[txq->queue_id]; + + nb_tx = nb_pkts; + + do { + burst = RTE_MIN(nb_pkts, RTE_PMD_BNX2X_TX_MAX_BURST); + + ret = bnx2x_tx_encap(txq, m, burst); + if (unlikely(ret)) { + PMD_TX_LOG(ERR, "tx_encap failed!"); + } + + bnx2x_update_fp_sb_idx(fp); + + if ((txq->nb_tx_desc - txq->nb_tx_avail) > txq->tx_free_thresh) { + bnx2x_txeof(sc, fp); + } + + if (unlikely(ret == -ENOMEM)) { + break; + } + + m += burst; + nb_pkts -= burst; + + } while (nb_pkts); + + return nb_tx - nb_pkts; +} + +int +bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + uint16_t i; + unsigned int tsize; + const struct rte_memzone *tz; + struct bnx2x_tx_queue *txq; + struct eth_tx_next_bd *tx_n_bd; + uint64_t busaddr; + struct bnx2x_softc *sc = dev->data->dev_private; + struct bnx2x_fastpath *fp = &sc->fp[queue_idx]; + + /* First allocate the tx queue data structure */ + txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue), + RTE_CACHE_LINE_SIZE); + if (txq == NULL) + return -ENOMEM; + txq->sc = sc; + + txq->nb_tx_pages = 1; + while (USABLE_TX_BD(txq) < nb_desc) + txq->nb_tx_pages <<= 1; + + txq->nb_tx_desc = TOTAL_TX_BD(txq); + sc->tx_ring_size = TOTAL_TX_BD(txq); + + txq->tx_free_thresh = tx_conf->tx_free_thresh ? + tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH; + + PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " + "total_bd=%lu, tx_pages=%u", + queue_idx, nb_desc, txq->tx_free_thresh, + (unsigned long)USABLE_TX_BD(txq), + (unsigned long)TOTAL_TX_BD(txq), txq->nb_tx_pages); + + /* Allocate TX ring hardware descriptors */ + tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types); + tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id); + if (tz == NULL) { + bnx2x_tx_queue_release(txq); + return -ENOMEM; + } + fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr; + txq->tx_ring = (union eth_tx_bd_types *) tz->addr; + memset(txq->tx_ring, 0, tsize); + + /* Allocate software ring */ + tsize = txq->nb_tx_desc * sizeof(struct rte_mbuf *); + txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize, + RTE_CACHE_LINE_SIZE); + if (txq->sw_ring == NULL) { + bnx2x_tx_queue_release(txq); + return -ENOMEM; + } + + /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */ + + /* Link TX pages */ + for (i = 1; i <= txq->nb_tx_pages; i++) { + tx_n_bd = &txq->tx_ring[TOTAL_TX_BD_PER_PAGE * i - 1].next_bd; + busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages); + tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr)); + tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr)); + /* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */ + } + + txq->queue_id = queue_idx; + txq->port_id = dev->data->port_id; + txq->tx_pkt_tail = 0; + txq->tx_pkt_head = 0; + txq->tx_bd_tail = 0; + txq->tx_bd_head = 0; + txq->nb_tx_avail = txq->nb_tx_desc; + dev->tx_pkt_burst = bnx2x_xmit_pkts; + dev->data->tx_queues[queue_idx] = txq; + if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues; + + return 0; +} + +static inline void +bnx2x_upd_rx_prod_fast(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + uint16_t rx_bd_prod, uint16_t rx_cq_prod) +{ + union ustorm_eth_rx_producers rx_prods; + + rx_prods.prod.bd_prod = rx_bd_prod; + rx_prods.prod.cqe_prod = rx_cq_prod; + + REG_WR(sc, fp->ustorm_rx_prods_offset, rx_prods.raw_data[0]); +} + +static uint16_t +bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct bnx2x_rx_queue *rxq = p_rxq; + struct bnx2x_softc *sc = rxq->sc; + struct bnx2x_fastpath *fp = &sc->fp[rxq->queue_id]; + uint32_t nb_rx = 0; + uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; + uint16_t bd_cons, bd_prod; + struct rte_mbuf *new_mb; + uint16_t rx_pref; + struct eth_fast_path_rx_cqe *cqe_fp; + uint16_t len, pad; + struct rte_mbuf *rx_mb = NULL; + + hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); + if ((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) == + USABLE_RCQ_ENTRIES_PER_PAGE) { + ++hw_cq_cons; + } + + bd_cons = rxq->rx_bd_head; + bd_prod = rxq->rx_bd_tail; + sw_cq_cons = rxq->rx_cq_head; + sw_cq_prod = rxq->rx_cq_tail; + + if (sw_cq_cons == hw_cq_cons) + return 0; + + while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) { + + bd_prod &= MAX_RX_BD(rxq); + bd_cons &= MAX_RX_BD(rxq); + + cqe_fp = &rxq->cq_ring[sw_cq_cons & MAX_RX_BD(rxq)].fast_path_cqe; + + if (unlikely(CQE_TYPE_SLOW(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_TYPE))) { + PMD_RX_LOG(ERR, "slowpath event during traffic processing"); + break; + } + + if (unlikely(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { + PMD_RX_LOG(ERR, "flags 0x%x rx packet %u", + cqe_fp->type_error_flags, sw_cq_cons); + goto next_rx; + } + + len = cqe_fp->pkt_len_or_gro_seg_len; + pad = cqe_fp->placement_offset; + + new_mb = bnx2x_rxmbuf_alloc(rxq->mb_pool); + if (unlikely(!new_mb)) { + PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index); + goto next_rx; + } + + rx_mb = rxq->sw_ring[bd_cons]; + rxq->sw_ring[bd_cons] = new_mb; + rxq->rx_ring[bd_prod] = new_mb->buf_physaddr; + + rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq); + rte_prefetch0(rxq->sw_ring[rx_pref]); + if ((rx_pref & 0x3) == 0) { + rte_prefetch0(&rxq->rx_ring[rx_pref]); + rte_prefetch0(&rxq->sw_ring[rx_pref]); + } + + rx_mb->data_off = pad; + rx_mb->nb_segs = 1; + rx_mb->next = NULL; + rx_mb->pkt_len = rx_mb->data_len = len; + rx_mb->port = rxq->port_id; + rx_mb->buf_len = len + pad; + rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *)); + + /* + * If we received a packet with a vlan tag, + * attach that information to the packet. + */ + if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) { + rx_mb->vlan_tci = cqe_fp->vlan_tag; + rx_mb->ol_flags |= PKT_RX_VLAN_PKT; + } + + rx_pkts[nb_rx] = rx_mb; + nb_rx++; + + /* limit spinning on the queue */ + if (unlikely(nb_rx == sc->rx_budget)) { + PMD_RX_LOG(ERR, "Limit spinning on the queue"); + break; + } + +next_rx: + bd_cons = NEXT_RX_BD(bd_cons); + bd_prod = NEXT_RX_BD(bd_prod); + sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod); + sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons); + } + rxq->rx_bd_head = bd_cons; + rxq->rx_bd_tail = bd_prod; + rxq->rx_cq_head = sw_cq_cons; + rxq->rx_cq_tail = sw_cq_prod; + + bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod); + + return nb_rx; +} + +int +bnx2x_dev_rx_init(struct rte_eth_dev *dev) +{ + dev->rx_pkt_burst = bnx2x_recv_pkts; + + return 0; +} + +void +bnx2x_dev_clear_queues(struct rte_eth_dev *dev) +{ + uint8_t i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct bnx2x_tx_queue *txq = dev->data->tx_queues[i]; + if (txq != NULL) { + bnx2x_tx_queue_release(txq); + dev->data->tx_queues[i] = NULL; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct bnx2x_rx_queue *rxq = dev->data->rx_queues[i]; + if (rxq != NULL) { + bnx2x_rx_queue_release(rxq); + dev->data->rx_queues[i] = NULL; + } + } +} diff --git a/drivers/net/bnx2x/bnx2x_rxtx.h b/drivers/net/bnx2x/bnx2x_rxtx.h new file mode 100644 index 00000000..ccb22fc1 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_rxtx.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#ifndef _BNX2X_RXTX_H_ +#define _BNX2X_RXTX_H_ + + +#define DEFAULT_RX_FREE_THRESH 0 +#define DEFAULT_TX_FREE_THRESH 512 +#define RTE_PMD_BNX2X_TX_MAX_BURST 1 + +/** + * Structure associated with each descriptor of the RX ring of a RX queue. + */ +struct bnx2x_rx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ +}; + +/** + * Structure associated with each RX queue. + */ +struct bnx2x_rx_queue { + struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ + union eth_rx_cqe *cq_ring; /**< RCQ ring virtual address. */ + uint64_t cq_ring_phys_addr; /**< RCQ ring DMA address. */ + uint64_t *rx_ring; /**< RX ring virtual address. */ + uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ + struct rte_mbuf **sw_ring; /**< address of RX software ring. */ + struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint16_t nb_cq_pages; /**< number of RCQ pages. */ + uint16_t nb_rx_desc; /**< number of RX descriptors. */ + uint16_t nb_rx_pages; /**< number of RX pages. */ + uint16_t rx_bd_head; /**< Index of current rx bd. */ + uint16_t rx_bd_tail; /**< Index of last rx bd. */ + uint16_t rx_cq_head; /**< Index of current rcq bd. */ + uint16_t rx_cq_tail; /**< Index of last rcq bd. */ + uint16_t nb_rx_hold; /**< number of held free RX desc. */ + uint16_t rx_free_thresh; /**< max free RX desc to hold. */ + uint16_t queue_id; /**< RX queue index. */ + uint8_t port_id; /**< Device port identifier. */ + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ + struct bnx2x_softc *sc; /**< Ptr to dev_private data. */ + uint64_t rx_mbuf_alloc; /**< Number of allocated mbufs. */ +}; + +/** + * Structure associated with each TX queue. + */ +struct bnx2x_tx_queue { + /** TX ring virtual address. */ + union eth_tx_bd_types *tx_ring; /**< TX ring virtual address. */ + uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ + struct rte_mbuf **sw_ring; /**< virtual address of SW ring. */ + uint16_t tx_pkt_tail; /**< Index of current tx pkt. */ + uint16_t tx_pkt_head; /**< Index of last pkt counted by txeof. */ + uint16_t tx_bd_tail; /**< Index of current tx bd. */ + uint16_t tx_bd_head; /**< Index of last bd counted by txeof. */ + uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_free_thresh; /**< minimum TX before freeing. */ + uint16_t nb_tx_avail; /**< Number of TX descriptors available. */ + uint16_t nb_tx_pages; /**< number of TX pages */ + uint16_t queue_id; /**< TX queue index. */ + uint8_t port_id; /**< Device port identifier. */ + struct bnx2x_softc *sc; /**< Ptr to dev_private data */ +}; + +int bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +int bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +void bnx2x_dev_rx_queue_release(void *rxq); +void bnx2x_dev_tx_queue_release(void *txq); +int bnx2x_dev_rx_init(struct rte_eth_dev *dev); +void bnx2x_dev_clear_queues(struct rte_eth_dev *dev); + +#endif /* _BNX2X_RXTX_H_ */ diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c new file mode 100644 index 00000000..c489cbee --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_stats.c @@ -0,0 +1,1586 @@ +/*- + * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#include "bnx2x.h" +#include "bnx2x_stats.h" + +#ifdef __i386__ +#define BITS_PER_LONG 32 +#else +#define BITS_PER_LONG 64 +#endif + +static inline uint16_t +bnx2x_get_port_stats_dma_len(struct bnx2x_softc *sc) +{ + uint16_t res = 0; + uint32_t size; + + /* 'newest' convention - shmem2 contains the size of the port stats */ + if (SHMEM2_HAS(sc, sizeof_port_stats)) { + size = SHMEM2_RD(sc, sizeof_port_stats); + if (size) { + res = size; + } + + /* prevent newer BC from causing buffer overflow */ + if (res > sizeof(struct host_port_stats)) { + res = sizeof(struct host_port_stats); + } + } + + /* + * Older convention - all BCs support the port stats fields up until + * the 'not_used' field + */ + if (!res) { + res = (offsetof(struct host_port_stats, not_used) + 4); + + /* if PFC stats are supported by the MFW, DMA them as well */ + if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) { + res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) - + offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4); + } + } + + res >>= 2; + + return res; +} + +/* + * Init service functions + */ + +/* + * Post the next statistics ramrod. Protect it with the lock in + * order to ensure the strict order between statistics ramrods + * (each ramrod has a sequence number passed in a + * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be + * sent in order). + */ +static void +bnx2x_storm_stats_post(struct bnx2x_softc *sc) +{ + int rc; + + if (!sc->stats_pending) { + if (sc->stats_pending) { + return; + } + + sc->fw_stats_req->hdr.drv_stats_counter = + htole16(sc->stats_counter++); + + PMD_DEBUG_PERIODIC_LOG(DEBUG, + "sending statistics ramrod %d", + le16toh(sc->fw_stats_req->hdr.drv_stats_counter)); + + /* adjust the ramrod to include VF queues statistics */ + + /* send FW stats ramrod */ + rc = bnx2x_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, + U64_HI(sc->fw_stats_req_mapping), + U64_LO(sc->fw_stats_req_mapping), + NONE_CONNECTION_TYPE); + if (rc == 0) { + sc->stats_pending = 1; + } + } +} + +static void +bnx2x_hw_stats_post(struct bnx2x_softc *sc) +{ + struct dmae_command *dmae = &sc->stats_dmae; + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + int loader_idx; + uint32_t opcode; + + *stats_comp = DMAE_COMP_VAL; + if (CHIP_REV_IS_SLOW(sc)) { + return; + } + + /* Update MCP's statistics if possible */ + if (sc->func_stx) { + rte_memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats, + sizeof(sc->func_stats)); + } + + /* loader */ + if (sc->executer_idx) { + loader_idx = PMF_DMAE_C(sc); + opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, + TRUE, DMAE_COMP_GRC); + opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); + + memset(dmae, 0, sizeof(struct dmae_command)); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, dmae[0])); + dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, dmae[0])); + dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM + + sizeof(struct dmae_command) * + (loader_idx + 1)) >> 2); + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct dmae_command) >> 2; + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + *stats_comp = 0; + bnx2x_post_dmae(sc, dmae, loader_idx); + } else if (sc->func_stx) { + *stats_comp = 0; + bnx2x_post_dmae(sc, dmae, INIT_DMAE_C(sc)); + } +} + +static int +bnx2x_stats_comp(struct bnx2x_softc *sc) +{ + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + int cnt = 10; + + while (*stats_comp != DMAE_COMP_VAL) { + if (!cnt) { + PMD_DRV_LOG(ERR, "Timeout waiting for stats finished"); + break; + } + + cnt--; + DELAY(1000); + } + + return 1; +} + +/* + * Statistics service functions + */ + +static void +bnx2x_stats_pmf_update(struct bnx2x_softc *sc) +{ + struct dmae_command *dmae; + uint32_t opcode; + int loader_idx = PMF_DMAE_C(sc); + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + + if (sc->devinfo.bc_ver <= 0x06001400) { + /* + * Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing + * BRB registers while the BRB block is in reset. The DMA transfer + * below triggers this issue resulting in the DMAE to stop + * functioning. Skip this initial stats transfer for old bootcode + * versions <= 6.0.20. + */ + return; + } + /* sanity */ + if (!sc->port.pmf || !sc->port.port_stx) { + PMD_DRV_LOG(ERR, "BUG!"); + return; + } + + sc->executer_idx = 0; + + opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0); + + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); + dmae->src_addr_lo = (sc->port.port_stx >> 2); + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->len = DMAE_LEN32_RD_MAX; + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); + dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX); + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats) + + DMAE_LEN32_RD_MAX * 4); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats) + + DMAE_LEN32_RD_MAX * 4); + dmae->len = (bnx2x_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX); + + dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(sc); + bnx2x_stats_comp(sc); +} + +static void +bnx2x_port_stats_init(struct bnx2x_softc *sc) +{ + struct dmae_command *dmae; + int port = SC_PORT(sc); + uint32_t opcode; + int loader_idx = PMF_DMAE_C(sc); + uint32_t mac_addr; + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + + /* sanity */ + if (!sc->link_vars.link_up || !sc->port.pmf) { + PMD_DRV_LOG(ERR, "BUG!"); + return; + } + + sc->executer_idx = 0; + + /* MCP */ + opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, + TRUE, DMAE_COMP_GRC); + + if (sc->port.port_stx) { + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->dst_addr_lo = sc->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = bnx2x_get_port_stats_dma_len(sc); + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + if (sc->func_stx) { + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats)); + dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats)); + dmae->dst_addr_lo = (sc->func_stx >> 2); + dmae->dst_addr_hi = 0; + dmae->len = (sizeof(struct host_func_stats) >> 2); + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + /* MAC */ + opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, + TRUE, DMAE_COMP_GRC); + + /* EMAC is special */ + if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) { + mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); + + /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats)); + dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* EMAC_REG_EMAC_RX_STAT_AC_28 */ + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2); + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) + + offsetof(struct emac_stats, + rx_stat_falsecarriererrors)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) + + offsetof(struct emac_stats, + rx_stat_falsecarriererrors)); + dmae->len = 1; + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2); + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) + + offsetof(struct emac_stats, + tx_stat_ifhcoutoctets)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) + + offsetof(struct emac_stats, + tx_stat_ifhcoutoctets)); + dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } else { + uint32_t tx_src_addr_lo, rx_src_addr_lo; + uint16_t rx_len, tx_len; + + /* configure the params according to MAC type */ + switch (sc->link_vars.mac_type) { + case ELINK_MAC_TYPE_BMAC: + mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + + /* BIGMAC_REGISTER_TX_STAT_GTPKT .. + BIGMAC_REGISTER_TX_STAT_GTBYT */ + if (CHIP_IS_E1x(sc)) { + tx_src_addr_lo = + ((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2); + tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT - + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2); + rx_src_addr_lo = + ((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2); + rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - + BIGMAC_REGISTER_RX_STAT_GR64) >> 2); + } else { + tx_src_addr_lo = + ((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2); + tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2); + rx_src_addr_lo = + ((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2); + rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2); + } + + break; + + case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */ + case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */ + default: + mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; + tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2); + rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2); + tx_len = + (sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2); + rx_len = + (sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2); + break; + } + + /* TX stats */ + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = tx_src_addr_lo; + dmae->src_addr_hi = 0; + dmae->len = tx_len; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats)); + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* RX stats */ + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_hi = 0; + dmae->src_addr_lo = rx_src_addr_lo; + dmae->dst_addr_lo = + U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2)); + dmae->dst_addr_hi = + U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2)); + dmae->len = rx_len; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + /* NIG */ + if (!CHIP_IS_E3(sc)) { + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = + (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : + NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) + + offsetof(struct nig_stats, + egress_mac_pkt0_lo)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) + + offsetof(struct nig_stats, + egress_mac_pkt0_lo)); + dmae->len = ((2 * sizeof(uint32_t)) >> 2); + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = + (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : + NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) + + offsetof(struct nig_stats, + egress_mac_pkt1_lo)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) + + offsetof(struct nig_stats, + egress_mac_pkt1_lo)); + dmae->len = ((2 * sizeof(uint32_t)) >> 2); + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, + TRUE, DMAE_COMP_PCI); + dmae->src_addr_lo = + (port ? NIG_REG_STAT1_BRB_DISCARD : + NIG_REG_STAT0_BRB_DISCARD) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats)); + dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats)); + dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2; + + dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; +} + +static void +bnx2x_func_stats_init(struct bnx2x_softc *sc) +{ + struct dmae_command *dmae = &sc->stats_dmae; + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + + /* sanity */ + if (!sc->func_stx) { + PMD_DRV_LOG(ERR, "BUG!"); + return; + } + + sc->executer_idx = 0; + memset(dmae, 0, sizeof(struct dmae_command)); + + dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, + TRUE, DMAE_COMP_PCI); + dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats)); + dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats)); + dmae->dst_addr_lo = (sc->func_stx >> 2); + dmae->dst_addr_hi = 0; + dmae->len = (sizeof(struct host_func_stats) >> 2); + dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; +} + +static void +bnx2x_stats_start(struct bnx2x_softc *sc) +{ + /* + * VFs travel through here as part of the statistics FSM, but no action + * is required + */ + if (IS_VF(sc)) { + return; + } + + if (sc->port.pmf) { + bnx2x_port_stats_init(sc); + } + + else if (sc->func_stx) { + bnx2x_func_stats_init(sc); + } + + bnx2x_hw_stats_post(sc); + bnx2x_storm_stats_post(sc); +} + +static void +bnx2x_stats_pmf_start(struct bnx2x_softc *sc) +{ + bnx2x_stats_comp(sc); + bnx2x_stats_pmf_update(sc); + bnx2x_stats_start(sc); +} + +static void +bnx2x_stats_restart(struct bnx2x_softc *sc) +{ + /* + * VFs travel through here as part of the statistics FSM, but no action + * is required + */ + if (IS_VF(sc)) { + return; + } + + bnx2x_stats_comp(sc); + bnx2x_stats_start(sc); +} + +static void +bnx2x_bmac_stats_update(struct bnx2x_softc *sc) +{ + struct host_port_stats *pstats = BNX2X_SP(sc, port_stats); + struct bnx2x_eth_stats *estats = &sc->eth_stats; + struct { + uint32_t lo; + uint32_t hi; + } diff; + + if (CHIP_IS_E1x(sc)) { + struct bmac1_stats *new = BNX2X_SP(sc, mac_stats.bmac1_stats); + + /* the macros below will use "bmac1_stats" type */ + UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); + UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); + UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); + UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); + UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); + UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); + UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); + + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); + UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); + UPDATE_STAT64(tx_stat_gt127, + tx_stat_etherstatspkts65octetsto127octets); + UPDATE_STAT64(tx_stat_gt255, + tx_stat_etherstatspkts128octetsto255octets); + UPDATE_STAT64(tx_stat_gt511, + tx_stat_etherstatspkts256octetsto511octets); + UPDATE_STAT64(tx_stat_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_STAT64(tx_stat_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); + UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); + UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); + UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); + UPDATE_STAT64(tx_stat_gterr, + tx_stat_dot3statsinternalmactransmiterrors); + UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); + } else { + struct bmac2_stats *new = BNX2X_SP(sc, mac_stats.bmac2_stats); + struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old; + + /* the macros below will use "bmac2_stats" type */ + UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); + UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); + UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); + UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); + UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); + UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); + UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); + UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); + UPDATE_STAT64(tx_stat_gt127, + tx_stat_etherstatspkts65octetsto127octets); + UPDATE_STAT64(tx_stat_gt255, + tx_stat_etherstatspkts128octetsto255octets); + UPDATE_STAT64(tx_stat_gt511, + tx_stat_etherstatspkts256octetsto511octets); + UPDATE_STAT64(tx_stat_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_STAT64(tx_stat_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); + UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); + UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); + UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); + UPDATE_STAT64(tx_stat_gterr, + tx_stat_dot3statsinternalmactransmiterrors); + UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); + + /* collect PFC stats */ + pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; + pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; + ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi, + pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo); + + pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; + pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; + ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi, + pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo); + } + + estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi; + estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo; + + estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi; + estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo; + + estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi; + estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo; + estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi; + estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo; +} + +static void +bnx2x_mstat_stats_update(struct bnx2x_softc *sc) +{ + struct host_port_stats *pstats = BNX2X_SP(sc, port_stats); + struct bnx2x_eth_stats *estats = &sc->eth_stats; + struct mstat_stats *new = BNX2X_SP(sc, mac_stats.mstat_stats); + + ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); + ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); + ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); + ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); + ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); + ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); + ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); + ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); + ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); + ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); + + /* collect pfc stats */ + ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi, + pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo); + ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi, + pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo); + + ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); + ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets); + ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets); + ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets); + ADD_STAT64(stats_tx.tx_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + ADD_STAT64(stats_tx.tx_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); + + ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); + ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); + ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); + + ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors); + ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); + + estats->etherstatspkts1024octetsto1522octets_hi = + pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi; + estats->etherstatspkts1024octetsto1522octets_lo = + pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo; + + estats->etherstatspktsover1522octets_hi = + pstats->mac_stx[1].tx_stat_mac_2047_hi; + estats->etherstatspktsover1522octets_lo = + pstats->mac_stx[1].tx_stat_mac_2047_lo; + + ADD_64(estats->etherstatspktsover1522octets_hi, + pstats->mac_stx[1].tx_stat_mac_4095_hi, + estats->etherstatspktsover1522octets_lo, + pstats->mac_stx[1].tx_stat_mac_4095_lo); + + ADD_64(estats->etherstatspktsover1522octets_hi, + pstats->mac_stx[1].tx_stat_mac_9216_hi, + estats->etherstatspktsover1522octets_lo, + pstats->mac_stx[1].tx_stat_mac_9216_lo); + + ADD_64(estats->etherstatspktsover1522octets_hi, + pstats->mac_stx[1].tx_stat_mac_16383_hi, + estats->etherstatspktsover1522octets_lo, + pstats->mac_stx[1].tx_stat_mac_16383_lo); + + estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi; + estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo; + + estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi; + estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo; + + estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi; + estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo; + estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi; + estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo; +} + +static void +bnx2x_emac_stats_update(struct bnx2x_softc *sc) +{ + struct emac_stats *new = BNX2X_SP(sc, mac_stats.emac_stats); + struct host_port_stats *pstats = BNX2X_SP(sc, port_stats); + struct bnx2x_eth_stats *estats = &sc->eth_stats; + + UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); + UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); + UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); + UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); + UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); + UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); + UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); + UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); + UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); + UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); + UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); + UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); + UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); + UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); + UPDATE_EXTEND_STAT(tx_stat_outxonsent); + UPDATE_EXTEND_STAT(tx_stat_outxoffsent); + UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); + UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); + UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); + UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); + UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); + UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); + UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); + UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); + + estats->pause_frames_received_hi = + pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; + estats->pause_frames_received_lo = + pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; + ADD_64(estats->pause_frames_received_hi, + pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, + estats->pause_frames_received_lo, + pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); + + estats->pause_frames_sent_hi = + pstats->mac_stx[1].tx_stat_outxonsent_hi; + estats->pause_frames_sent_lo = + pstats->mac_stx[1].tx_stat_outxonsent_lo; + ADD_64(estats->pause_frames_sent_hi, + pstats->mac_stx[1].tx_stat_outxoffsent_hi, + estats->pause_frames_sent_lo, + pstats->mac_stx[1].tx_stat_outxoffsent_lo); +} + +static int +bnx2x_hw_stats_update(struct bnx2x_softc *sc) +{ + struct nig_stats *new = BNX2X_SP(sc, nig_stats); + struct nig_stats *old = &(sc->port.old_nig_stats); + struct host_port_stats *pstats = BNX2X_SP(sc, port_stats); + struct bnx2x_eth_stats *estats = &sc->eth_stats; + uint32_t lpi_reg, nig_timer_max; + struct { + uint32_t lo; + uint32_t hi; + } diff; + + switch (sc->link_vars.mac_type) { + case ELINK_MAC_TYPE_BMAC: + bnx2x_bmac_stats_update(sc); + break; + + case ELINK_MAC_TYPE_EMAC: + bnx2x_emac_stats_update(sc); + break; + + case ELINK_MAC_TYPE_UMAC: + case ELINK_MAC_TYPE_XMAC: + bnx2x_mstat_stats_update(sc); + break; + + case ELINK_MAC_TYPE_NONE: /* unreached */ + PMD_DRV_LOG(DEBUG, + "stats updated by DMAE but no MAC active"); + return -1; + + default: /* unreached */ + PMD_DRV_LOG(ERR, "stats update failed, unknown MAC type"); + } + + ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, + new->brb_discard - old->brb_discard); + ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, + new->brb_truncate - old->brb_truncate); + + if (!CHIP_IS_E3(sc)) { + UPDATE_STAT64_NIG(egress_mac_pkt0, + etherstatspkts1024octetsto1522octets); + UPDATE_STAT64_NIG(egress_mac_pkt1, + etherstatspktsover1522octets); + } + + rte_memcpy(old, new, sizeof(struct nig_stats)); + + rte_memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), + sizeof(struct mac_stx)); + estats->brb_drop_hi = pstats->brb_drop_hi; + estats->brb_drop_lo = pstats->brb_drop_lo; + + pstats->host_port_stats_counter++; + + if (CHIP_IS_E3(sc)) { + lpi_reg = (SC_PORT(sc)) ? + MISC_REG_CPMU_LP_SM_ENT_CNT_P1 : + MISC_REG_CPMU_LP_SM_ENT_CNT_P0; + estats->eee_tx_lpi += REG_RD(sc, lpi_reg); + } + + if (!BNX2X_NOMCP(sc)) { + nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer); + if (nig_timer_max != estats->nig_timer_max) { + estats->nig_timer_max = nig_timer_max; + PMD_DRV_LOG(ERR, "invalid NIG timer max (%u)", + estats->nig_timer_max); + } + } + + return 0; +} + +static int +bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc) +{ + struct stats_counter *counters = &sc->fw_stats_data->storm_counters; + uint16_t cur_stats_counter; + + /* + * Make sure we use the value of the counter + * used for sending the last stats ramrod. + */ + cur_stats_counter = (sc->stats_counter - 1); + + /* are storm stats valid? */ + if (le16toh(counters->xstats_counter) != cur_stats_counter) { + PMD_DRV_LOG(DEBUG, + "stats not updated by xstorm, " + "counter 0x%x != stats_counter 0x%x", + le16toh(counters->xstats_counter), sc->stats_counter); + return -EAGAIN; + } + + if (le16toh(counters->ustats_counter) != cur_stats_counter) { + PMD_DRV_LOG(DEBUG, + "stats not updated by ustorm, " + "counter 0x%x != stats_counter 0x%x", + le16toh(counters->ustats_counter), sc->stats_counter); + return -EAGAIN; + } + + if (le16toh(counters->cstats_counter) != cur_stats_counter) { + PMD_DRV_LOG(DEBUG, + "stats not updated by cstorm, " + "counter 0x%x != stats_counter 0x%x", + le16toh(counters->cstats_counter), sc->stats_counter); + return -EAGAIN; + } + + if (le16toh(counters->tstats_counter) != cur_stats_counter) { + PMD_DRV_LOG(DEBUG, + "stats not updated by tstorm, " + "counter 0x%x != stats_counter 0x%x", + le16toh(counters->tstats_counter), sc->stats_counter); + return -EAGAIN; + } + + return 0; +} + +static int +bnx2x_storm_stats_update(struct bnx2x_softc *sc) +{ + struct tstorm_per_port_stats *tport = + &sc->fw_stats_data->port.tstorm_port_statistics; + struct tstorm_per_pf_stats *tfunc = + &sc->fw_stats_data->pf.tstorm_pf_statistics; + struct host_func_stats *fstats = &sc->func_stats; + struct bnx2x_eth_stats *estats = &sc->eth_stats; + struct bnx2x_eth_stats_old *estats_old = &sc->eth_stats_old; + int i; + + /* vfs stat counter is managed by pf */ + if (IS_PF(sc) && bnx2x_storm_stats_validate_counters(sc)) { + return -EAGAIN; + } + + estats->error_bytes_received_hi = 0; + estats->error_bytes_received_lo = 0; + + for (i = 0; i < sc->num_queues; i++) { + struct bnx2x_fastpath *fp = &sc->fp[i]; + struct tstorm_per_queue_stats *tclient = + &sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics; + struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; + struct ustorm_per_queue_stats *uclient = + &sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics; + struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; + struct xstorm_per_queue_stats *xclient = + &sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics; + struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; + + uint32_t diff; + + /* PMD_DRV_LOG(DEBUG, + "queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x", + i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent, + xclient->mcast_pkts_sent); + + PMD_DRV_LOG(DEBUG, "---------------"); */ + + UPDATE_QSTAT(tclient->rcv_bcast_bytes, + total_broadcast_bytes_received); + UPDATE_QSTAT(tclient->rcv_mcast_bytes, + total_multicast_bytes_received); + UPDATE_QSTAT(tclient->rcv_ucast_bytes, + total_unicast_bytes_received); + + /* + * sum to total_bytes_received all + * unicast/multicast/broadcast + */ + qstats->total_bytes_received_hi = + qstats->total_broadcast_bytes_received_hi; + qstats->total_bytes_received_lo = + qstats->total_broadcast_bytes_received_lo; + + ADD_64(qstats->total_bytes_received_hi, + qstats->total_multicast_bytes_received_hi, + qstats->total_bytes_received_lo, + qstats->total_multicast_bytes_received_lo); + + ADD_64(qstats->total_bytes_received_hi, + qstats->total_unicast_bytes_received_hi, + qstats->total_bytes_received_lo, + qstats->total_unicast_bytes_received_lo); + + qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi; + qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo; + + UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received); + UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received); + UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received); + UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard, + etherstatsoverrsizepkts, 32); + UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16); + + SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received); + SUB_EXTEND_USTAT(mcast_no_buff_pkts, + total_multicast_packets_received); + SUB_EXTEND_USTAT(bcast_no_buff_pkts, + total_broadcast_packets_received); + UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard); + + UPDATE_QSTAT(xclient->bcast_bytes_sent, + total_broadcast_bytes_transmitted); + UPDATE_QSTAT(xclient->mcast_bytes_sent, + total_multicast_bytes_transmitted); + UPDATE_QSTAT(xclient->ucast_bytes_sent, + total_unicast_bytes_transmitted); + + /* + * sum to total_bytes_transmitted all + * unicast/multicast/broadcast + */ + qstats->total_bytes_transmitted_hi = + qstats->total_unicast_bytes_transmitted_hi; + qstats->total_bytes_transmitted_lo = + qstats->total_unicast_bytes_transmitted_lo; + + ADD_64(qstats->total_bytes_transmitted_hi, + qstats->total_broadcast_bytes_transmitted_hi, + qstats->total_bytes_transmitted_lo, + qstats->total_broadcast_bytes_transmitted_lo); + + ADD_64(qstats->total_bytes_transmitted_hi, + qstats->total_multicast_bytes_transmitted_hi, + qstats->total_bytes_transmitted_lo, + qstats->total_multicast_bytes_transmitted_lo); + + UPDATE_EXTEND_XSTAT(ucast_pkts_sent, + total_unicast_packets_transmitted); + UPDATE_EXTEND_XSTAT(mcast_pkts_sent, + total_multicast_packets_transmitted); + UPDATE_EXTEND_XSTAT(bcast_pkts_sent, + total_broadcast_packets_transmitted); + + UPDATE_EXTEND_TSTAT(checksum_discard, + total_packets_received_checksum_discarded); + UPDATE_EXTEND_TSTAT(ttl0_discard, + total_packets_received_ttl0_discarded); + + UPDATE_EXTEND_XSTAT(error_drop_pkts, + total_transmitted_dropped_packets_error); + + UPDATE_FSTAT_QSTAT(total_bytes_received); + UPDATE_FSTAT_QSTAT(total_bytes_transmitted); + UPDATE_FSTAT_QSTAT(total_unicast_packets_received); + UPDATE_FSTAT_QSTAT(total_multicast_packets_received); + UPDATE_FSTAT_QSTAT(total_broadcast_packets_received); + UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted); + UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted); + UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted); + UPDATE_FSTAT_QSTAT(valid_bytes_received); + } + + ADD_64(estats->total_bytes_received_hi, + estats->rx_stat_ifhcinbadoctets_hi, + estats->total_bytes_received_lo, + estats->rx_stat_ifhcinbadoctets_lo); + + ADD_64_LE(estats->total_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + estats->total_bytes_received_lo, + tfunc->rcv_error_bytes.lo); + + ADD_64_LE(estats->error_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + estats->error_bytes_received_lo, + tfunc->rcv_error_bytes.lo); + + UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong); + + ADD_64(estats->error_bytes_received_hi, + estats->rx_stat_ifhcinbadoctets_hi, + estats->error_bytes_received_lo, + estats->rx_stat_ifhcinbadoctets_lo); + + if (sc->port.pmf) { + struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old; + UPDATE_FW_STAT(mac_filter_discard); + UPDATE_FW_STAT(mf_tag_discard); + UPDATE_FW_STAT(brb_truncate_discard); + UPDATE_FW_STAT(mac_discard); + } + + fstats->host_func_stats_start = ++fstats->host_func_stats_end; + + sc->stats_pending = 0; + + return 0; +} + +static void +bnx2x_drv_stats_update(struct bnx2x_softc *sc) +{ + struct bnx2x_eth_stats *estats = &sc->eth_stats; + int i; + + for (i = 0; i < sc->num_queues; i++) { + struct bnx2x_eth_q_stats *qstats = &sc->fp[i].eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old; + + UPDATE_ESTAT_QSTAT(rx_calls); + UPDATE_ESTAT_QSTAT(rx_pkts); + UPDATE_ESTAT_QSTAT(rx_soft_errors); + UPDATE_ESTAT_QSTAT(rx_hw_csum_errors); + UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip); + UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp); + UPDATE_ESTAT_QSTAT(rx_budget_reached); + UPDATE_ESTAT_QSTAT(tx_pkts); + UPDATE_ESTAT_QSTAT(tx_soft_errors); + UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip); + UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp); + UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp); + UPDATE_ESTAT_QSTAT(tx_encap_failures); + UPDATE_ESTAT_QSTAT(tx_hw_queue_full); + UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth); + UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure); + UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth); + UPDATE_ESTAT_QSTAT(tx_window_violation_std); + UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf); + UPDATE_ESTAT_QSTAT(tx_frames_deferred); + UPDATE_ESTAT_QSTAT(tx_queue_xoff); + + /* mbuf driver statistics */ + UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts); + UPDATE_ESTAT_QSTAT(mbuf_defrag_failures); + UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed); + UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed); + + /* track the number of allocated mbufs */ + UPDATE_ESTAT_QSTAT(mbuf_alloc_tx); + UPDATE_ESTAT_QSTAT(mbuf_alloc_rx); + } +} + +static uint8_t +bnx2x_edebug_stats_stopped(struct bnx2x_softc *sc) +{ + uint32_t val; + + if (SHMEM2_HAS(sc, edebug_driver_if[1])) { + val = SHMEM2_RD(sc, edebug_driver_if[1]); + + if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) { + return TRUE; + } + } + + return FALSE; +} + +static void +bnx2x_stats_update(struct bnx2x_softc *sc) +{ + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + + if (bnx2x_edebug_stats_stopped(sc)) { + return; + } + + if (IS_PF(sc)) { + + bnx2x_storm_stats_update(sc); + bnx2x_hw_stats_post(sc); + bnx2x_storm_stats_post(sc); + DELAY_MS(5); + + if (*stats_comp != DMAE_COMP_VAL) { + return; + } + + if (sc->port.pmf) { + bnx2x_hw_stats_update(sc); + } + + if (bnx2x_storm_stats_update(sc)) { + if (sc->stats_pending++ == 3) { + rte_panic("storm stats not updated for 3 times"); + } + return; + } + } else { + /* + * VF doesn't collect HW statistics, and doesn't get completions, + * performs only update. + */ + bnx2x_storm_stats_update(sc); + } + + bnx2x_drv_stats_update(sc); +} + +static void +bnx2x_port_stats_stop(struct bnx2x_softc *sc) +{ + struct dmae_command *dmae; + uint32_t opcode; + int loader_idx = PMF_DMAE_C(sc); + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + + sc->executer_idx = 0; + + opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0); + + if (sc->port.port_stx) { + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + + if (sc->func_stx) { + dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); + } else { + dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); + } + + dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->dst_addr_lo = sc->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = bnx2x_get_port_stats_dma_len(sc); + if (sc->func_stx) { + dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } else { + dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + } + } + + if (sc->func_stx) { + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); + dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats)); + dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats)); + dmae->dst_addr_lo = (sc->func_stx >> 2); + dmae->dst_addr_hi = 0; + dmae->len = (sizeof(struct host_func_stats) >> 2); + dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + } +} + +static void +bnx2x_stats_stop(struct bnx2x_softc *sc) +{ + uint8_t update = FALSE; + + bnx2x_stats_comp(sc); + + if (sc->port.pmf) { + update = bnx2x_hw_stats_update(sc) == 0; + } + + update |= bnx2x_storm_stats_update(sc) == 0; + + if (update) { + + if (sc->port.pmf) { + bnx2x_port_stats_stop(sc); + } + + bnx2x_hw_stats_post(sc); + bnx2x_stats_comp(sc); + } +} + +static void +bnx2x_stats_do_nothing(__rte_unused struct bnx2x_softc *sc) +{ + return; +} + +static const struct { + void (*action)(struct bnx2x_softc *sc); + enum bnx2x_stats_state next_state; +} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { + { + /* DISABLED PMF */ { bnx2x_stats_pmf_update, STATS_STATE_DISABLED }, + /* LINK_UP */ { bnx2x_stats_start, STATS_STATE_ENABLED }, + /* UPDATE */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED }, + /* STOP */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED } + }, + { + /* ENABLED PMF */ { bnx2x_stats_pmf_start, STATS_STATE_ENABLED }, + /* LINK_UP */ { bnx2x_stats_restart, STATS_STATE_ENABLED }, + /* UPDATE */ { bnx2x_stats_update, STATS_STATE_ENABLED }, + /* STOP */ { bnx2x_stats_stop, STATS_STATE_DISABLED } + } +}; + +void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event) +{ + enum bnx2x_stats_state state; + + if (unlikely(sc->panic)) { + return; + } + + state = sc->stats_state; + sc->stats_state = bnx2x_stats_stm[state][event].next_state; + + bnx2x_stats_stm[state][event].action(sc); + + if (event != STATS_EVENT_UPDATE) { + PMD_DRV_LOG(DEBUG, + "state %d -> event %d -> state %d", + state, event, sc->stats_state); + } +} + +static void +bnx2x_port_stats_base_init(struct bnx2x_softc *sc) +{ + struct dmae_command *dmae; + uint32_t *stats_comp = BNX2X_SP(sc, stats_comp); + + /* sanity */ + if (!sc->port.pmf || !sc->port.port_stx) { + PMD_DRV_LOG(ERR, "BUG!"); + return; + } + + sc->executer_idx = 0; + + dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, + TRUE, DMAE_COMP_PCI); + dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats)); + dmae->dst_addr_lo = (sc->port.port_stx >> 2); + dmae->dst_addr_hi = 0; + dmae->len = bnx2x_get_port_stats_dma_len(sc); + dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(sc); + bnx2x_stats_comp(sc); +} + +/* + * This function will prepare the statistics ramrod data the way + * we will only have to increment the statistics counter and + * send the ramrod each time we have to. + */ +static void +bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc) +{ + int i; + int first_queue_query_index; + struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr; + phys_addr_t cur_data_offset; + struct stats_query_entry *cur_query_entry; + + stats_hdr->cmd_num = sc->fw_stats_num; + stats_hdr->drv_stats_counter = 0; + + /* + * The storm_counters struct contains the counters of completed + * statistics requests per storm which are incremented by FW + * each time it completes hadning a statistics ramrod. We will + * check these counters in the timer handler and discard a + * (statistics) ramrod completion. + */ + cur_data_offset = (sc->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, storm_counters)); + + stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset)); + stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset)); + + /* + * Prepare the first stats ramrod (will be completed with + * the counters equal to zero) - init counters to somethig different. + */ + memset(&sc->fw_stats_data->storm_counters, 0xff, + sizeof(struct stats_counter)); + + /**** Port FW statistics data ****/ + cur_data_offset = (sc->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, port)); + + cur_query_entry = &sc->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; + + cur_query_entry->kind = STATS_TYPE_PORT; + /* For port query index is a DONT CARE */ + cur_query_entry->index = SC_PORT(sc); + /* For port query funcID is a DONT CARE */ + cur_query_entry->funcID = htole16(SC_FUNC(sc)); + cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset)); + + /**** PF FW statistics data ****/ + cur_data_offset = (sc->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, pf)); + + cur_query_entry = &sc->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; + + cur_query_entry->kind = STATS_TYPE_PF; + /* For PF query index is a DONT CARE */ + cur_query_entry->index = SC_PORT(sc); + cur_query_entry->funcID = htole16(SC_FUNC(sc)); + cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset)); + + /**** Clients' queries ****/ + cur_data_offset = (sc->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, queue_stats)); + + /* + * First queue query index depends whether FCoE offloaded request will + * be included in the ramrod + */ + first_queue_query_index = (BNX2X_FIRST_QUEUE_QUERY_IDX - 1); + + for (i = 0; i < sc->num_queues; i++) { + cur_query_entry = + &sc->fw_stats_req->query[first_queue_query_index + i]; + + cur_query_entry->kind = STATS_TYPE_QUEUE; + cur_query_entry->index = bnx2x_stats_id(&sc->fp[i]); + cur_query_entry->funcID = htole16(SC_FUNC(sc)); + cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset)); + + cur_data_offset += sizeof(struct per_queue_stats); + } +} + +void bnx2x_memset_stats(struct bnx2x_softc *sc) +{ + int i; + + /* function stats */ + for (i = 0; i < sc->num_queues; i++) { + struct bnx2x_fastpath *fp = &sc->fp[i]; + + memset(&fp->old_tclient, 0, + sizeof(fp->old_tclient)); + memset(&fp->old_uclient, 0, + sizeof(fp->old_uclient)); + memset(&fp->old_xclient, 0, + sizeof(fp->old_xclient)); + if (sc->stats_init) { + memset(&fp->eth_q_stats, 0, + sizeof(fp->eth_q_stats)); + memset(&fp->eth_q_stats_old, 0, + sizeof(fp->eth_q_stats_old)); + } + } + + if (sc->stats_init) { + memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old)); + memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old)); + memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old)); + memset(&sc->eth_stats, 0, sizeof(sc->eth_stats)); + memset(&sc->func_stats, 0, sizeof(sc->func_stats)); + } + + sc->stats_state = STATS_STATE_DISABLED; + + if (sc->port.pmf && sc->port.port_stx) + bnx2x_port_stats_base_init(sc); + + /* mark the end of statistics initializiation */ + sc->stats_init = false; +} + +void +bnx2x_stats_init(struct bnx2x_softc *sc) +{ + int /*abs*/port = SC_PORT(sc); + int mb_idx = SC_FW_MB_IDX(sc); + int i; + + sc->stats_pending = 0; + sc->executer_idx = 0; + sc->stats_counter = 0; + + sc->stats_init = TRUE; + + /* port and func stats for management */ + if (!BNX2X_NOMCP(sc)) { + sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx); + sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param); + } else { + sc->port.port_stx = 0; + sc->func_stx = 0; + } + + PMD_DRV_LOG(DEBUG, "port_stx 0x%x func_stx 0x%x", + sc->port.port_stx, sc->func_stx); + + /* pmf should retrieve port statistics from SP on a non-init*/ + if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) { + bnx2x_stats_handle(sc, STATS_EVENT_PMF); + } + + port = SC_PORT(sc); + /* port stats */ + memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats)); + sc->port.old_nig_stats.brb_discard = + REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38); + sc->port.old_nig_stats.brb_truncate = + REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); + if (!CHIP_IS_E3(sc)) { + REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, + &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2); + REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, + &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2); + } + + /* function stats */ + for (i = 0; i < sc->num_queues; i++) { + memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient)); + memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient)); + memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient)); + if (sc->stats_init) { + memset(&sc->fp[i].eth_q_stats, 0, + sizeof(sc->fp[i].eth_q_stats)); + memset(&sc->fp[i].eth_q_stats_old, 0, + sizeof(sc->fp[i].eth_q_stats_old)); + } + } + + /* prepare statistics ramrod data */ + bnx2x_prep_fw_stats_req(sc); + + if (sc->stats_init) { + memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old)); + memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old)); + memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old)); + memset(&sc->eth_stats, 0, sizeof(sc->eth_stats)); + memset(&sc->func_stats, 0, sizeof(sc->func_stats)); + + /* Clean SP from previous statistics */ + if (sc->func_stx) { + memset(BNX2X_SP(sc, func_stats), 0, sizeof(struct host_func_stats)); + bnx2x_func_stats_init(sc); + bnx2x_hw_stats_post(sc); + bnx2x_stats_comp(sc); + } + } + + sc->stats_state = STATS_STATE_DISABLED; + + if (sc->port.pmf && sc->port.port_stx) { + bnx2x_port_stats_base_init(sc); + } + + /* mark the end of statistics initializiation */ + sc->stats_init = FALSE; +} + +void +bnx2x_save_statistics(struct bnx2x_softc *sc) +{ + int i; + + /* save queue statistics */ + for (i = 0; i < sc->num_queues; i++) { + struct bnx2x_fastpath *fp = &sc->fp[i]; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; + + UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo); + UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo); + UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo); + } + + /* store port firmware statistics */ + if (sc->port.pmf) { + struct bnx2x_eth_stats *estats = &sc->eth_stats; + struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old; + struct host_port_stats *pstats = BNX2X_SP(sc, port_stats); + + fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi; + fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo; + fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi; + fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo; + + if (IS_MF(sc)) { + UPDATE_FW_STAT_OLD(mac_filter_discard); + UPDATE_FW_STAT_OLD(mf_tag_discard); + UPDATE_FW_STAT_OLD(brb_truncate_discard); + UPDATE_FW_STAT_OLD(mac_discard); + } + } +} diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h new file mode 100644 index 00000000..3396de31 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_stats.h @@ -0,0 +1,611 @@ +/*- + * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#ifndef BNX2X_STATS_H +#define BNX2X_STATS_H + +#include <sys/types.h> + +struct nig_stats { + uint32_t brb_discard; + uint32_t brb_packet; + uint32_t brb_truncate; + uint32_t flow_ctrl_discard; + uint32_t flow_ctrl_octets; + uint32_t flow_ctrl_packet; + uint32_t mng_discard; + uint32_t mng_octet_inp; + uint32_t mng_octet_out; + uint32_t mng_packet_inp; + uint32_t mng_packet_out; + uint32_t pbf_octets; + uint32_t pbf_packet; + uint32_t safc_inp; + uint32_t egress_mac_pkt0_lo; + uint32_t egress_mac_pkt0_hi; + uint32_t egress_mac_pkt1_lo; + uint32_t egress_mac_pkt1_hi; +}; + + +enum bnx2x_stats_event { + STATS_EVENT_PMF = 0, + STATS_EVENT_LINK_UP, + STATS_EVENT_UPDATE, + STATS_EVENT_STOP, + STATS_EVENT_MAX +}; + +enum bnx2x_stats_state { + STATS_STATE_DISABLED = 0, + STATS_STATE_ENABLED, + STATS_STATE_MAX +}; + +struct bnx2x_eth_stats { + uint32_t total_bytes_received_hi; + uint32_t total_bytes_received_lo; + uint32_t total_bytes_transmitted_hi; + uint32_t total_bytes_transmitted_lo; + uint32_t total_unicast_packets_received_hi; + uint32_t total_unicast_packets_received_lo; + uint32_t total_multicast_packets_received_hi; + uint32_t total_multicast_packets_received_lo; + uint32_t total_broadcast_packets_received_hi; + uint32_t total_broadcast_packets_received_lo; + uint32_t total_unicast_packets_transmitted_hi; + uint32_t total_unicast_packets_transmitted_lo; + uint32_t total_multicast_packets_transmitted_hi; + uint32_t total_multicast_packets_transmitted_lo; + uint32_t total_broadcast_packets_transmitted_hi; + uint32_t total_broadcast_packets_transmitted_lo; + uint32_t valid_bytes_received_hi; + uint32_t valid_bytes_received_lo; + + uint32_t error_bytes_received_hi; + uint32_t error_bytes_received_lo; + uint32_t etherstatsoverrsizepkts_hi; + uint32_t etherstatsoverrsizepkts_lo; + uint32_t no_buff_discard_hi; + uint32_t no_buff_discard_lo; + + uint32_t rx_stat_ifhcinbadoctets_hi; + uint32_t rx_stat_ifhcinbadoctets_lo; + uint32_t tx_stat_ifhcoutbadoctets_hi; + uint32_t tx_stat_ifhcoutbadoctets_lo; + uint32_t rx_stat_dot3statsfcserrors_hi; + uint32_t rx_stat_dot3statsfcserrors_lo; + uint32_t rx_stat_dot3statsalignmenterrors_hi; + uint32_t rx_stat_dot3statsalignmenterrors_lo; + uint32_t rx_stat_dot3statscarriersenseerrors_hi; + uint32_t rx_stat_dot3statscarriersenseerrors_lo; + uint32_t rx_stat_falsecarriererrors_hi; + uint32_t rx_stat_falsecarriererrors_lo; + uint32_t rx_stat_etherstatsundersizepkts_hi; + uint32_t rx_stat_etherstatsundersizepkts_lo; + uint32_t rx_stat_dot3statsframestoolong_hi; + uint32_t rx_stat_dot3statsframestoolong_lo; + uint32_t rx_stat_etherstatsfragments_hi; + uint32_t rx_stat_etherstatsfragments_lo; + uint32_t rx_stat_etherstatsjabbers_hi; + uint32_t rx_stat_etherstatsjabbers_lo; + uint32_t rx_stat_maccontrolframesreceived_hi; + uint32_t rx_stat_maccontrolframesreceived_lo; + uint32_t rx_stat_bmac_xpf_hi; + uint32_t rx_stat_bmac_xpf_lo; + uint32_t rx_stat_bmac_xcf_hi; + uint32_t rx_stat_bmac_xcf_lo; + uint32_t rx_stat_xoffstateentered_hi; + uint32_t rx_stat_xoffstateentered_lo; + uint32_t rx_stat_xonpauseframesreceived_hi; + uint32_t rx_stat_xonpauseframesreceived_lo; + uint32_t rx_stat_xoffpauseframesreceived_hi; + uint32_t rx_stat_xoffpauseframesreceived_lo; + uint32_t tx_stat_outxonsent_hi; + uint32_t tx_stat_outxonsent_lo; + uint32_t tx_stat_outxoffsent_hi; + uint32_t tx_stat_outxoffsent_lo; + uint32_t tx_stat_flowcontroldone_hi; + uint32_t tx_stat_flowcontroldone_lo; + uint32_t tx_stat_etherstatscollisions_hi; + uint32_t tx_stat_etherstatscollisions_lo; + uint32_t tx_stat_dot3statssinglecollisionframes_hi; + uint32_t tx_stat_dot3statssinglecollisionframes_lo; + uint32_t tx_stat_dot3statsmultiplecollisionframes_hi; + uint32_t tx_stat_dot3statsmultiplecollisionframes_lo; + uint32_t tx_stat_dot3statsdeferredtransmissions_hi; + uint32_t tx_stat_dot3statsdeferredtransmissions_lo; + uint32_t tx_stat_dot3statsexcessivecollisions_hi; + uint32_t tx_stat_dot3statsexcessivecollisions_lo; + uint32_t tx_stat_dot3statslatecollisions_hi; + uint32_t tx_stat_dot3statslatecollisions_lo; + uint32_t tx_stat_etherstatspkts64octets_hi; + uint32_t tx_stat_etherstatspkts64octets_lo; + uint32_t tx_stat_etherstatspkts65octetsto127octets_hi; + uint32_t tx_stat_etherstatspkts65octetsto127octets_lo; + uint32_t tx_stat_etherstatspkts128octetsto255octets_hi; + uint32_t tx_stat_etherstatspkts128octetsto255octets_lo; + uint32_t tx_stat_etherstatspkts256octetsto511octets_hi; + uint32_t tx_stat_etherstatspkts256octetsto511octets_lo; + uint32_t tx_stat_etherstatspkts512octetsto1023octets_hi; + uint32_t tx_stat_etherstatspkts512octetsto1023octets_lo; + uint32_t tx_stat_etherstatspkts1024octetsto1522octets_hi; + uint32_t tx_stat_etherstatspkts1024octetsto1522octets_lo; + uint32_t tx_stat_etherstatspktsover1522octets_hi; + uint32_t tx_stat_etherstatspktsover1522octets_lo; + uint32_t tx_stat_bmac_2047_hi; + uint32_t tx_stat_bmac_2047_lo; + uint32_t tx_stat_bmac_4095_hi; + uint32_t tx_stat_bmac_4095_lo; + uint32_t tx_stat_bmac_9216_hi; + uint32_t tx_stat_bmac_9216_lo; + uint32_t tx_stat_bmac_16383_hi; + uint32_t tx_stat_bmac_16383_lo; + uint32_t tx_stat_dot3statsinternalmactransmiterrors_hi; + uint32_t tx_stat_dot3statsinternalmactransmiterrors_lo; + uint32_t tx_stat_bmac_ufl_hi; + uint32_t tx_stat_bmac_ufl_lo; + + uint32_t pause_frames_received_hi; + uint32_t pause_frames_received_lo; + uint32_t pause_frames_sent_hi; + uint32_t pause_frames_sent_lo; + + uint32_t etherstatspkts1024octetsto1522octets_hi; + uint32_t etherstatspkts1024octetsto1522octets_lo; + uint32_t etherstatspktsover1522octets_hi; + uint32_t etherstatspktsover1522octets_lo; + + uint32_t brb_drop_hi; + uint32_t brb_drop_lo; + uint32_t brb_truncate_hi; + uint32_t brb_truncate_lo; + + uint32_t mac_filter_discard; + uint32_t mf_tag_discard; + uint32_t brb_truncate_discard; + uint32_t mac_discard; + + uint32_t nig_timer_max; + + /* PFC */ + uint32_t pfc_frames_received_hi; + uint32_t pfc_frames_received_lo; + uint32_t pfc_frames_sent_hi; + uint32_t pfc_frames_sent_lo; + + /* Recovery */ + uint32_t recoverable_error; + uint32_t unrecoverable_error; + + /* src: Clear-on-Read register; Will not survive PMF Migration */ + uint32_t eee_tx_lpi; + + /* receive path driver statistics */ + uint32_t rx_calls; + uint32_t rx_pkts; + uint32_t rx_soft_errors; + uint32_t rx_hw_csum_errors; + uint32_t rx_ofld_frames_csum_ip; + uint32_t rx_ofld_frames_csum_tcp_udp; + uint32_t rx_budget_reached; + + /* tx path driver statistics */ + uint32_t tx_pkts; + uint32_t tx_soft_errors; + uint32_t tx_ofld_frames_csum_ip; + uint32_t tx_ofld_frames_csum_tcp; + uint32_t tx_ofld_frames_csum_udp; + uint32_t tx_encap_failures; + uint32_t tx_hw_queue_full; + uint32_t tx_hw_max_queue_depth; + uint32_t tx_dma_mapping_failure; + uint32_t tx_max_drbr_queue_depth; + uint32_t tx_window_violation_std; + uint32_t tx_chain_lost_mbuf; + uint32_t tx_frames_deferred; + uint32_t tx_queue_xoff; + + /* mbuf driver statistics */ + uint32_t mbuf_defrag_attempts; + uint32_t mbuf_defrag_failures; + uint32_t mbuf_rx_bd_alloc_failed; + uint32_t mbuf_rx_bd_mapping_failed; + + /* track the number of allocated mbufs */ + uint32_t mbuf_alloc_tx; + uint32_t mbuf_alloc_rx; +}; + + +struct bnx2x_eth_q_stats { + uint32_t total_unicast_bytes_received_hi; + uint32_t total_unicast_bytes_received_lo; + uint32_t total_broadcast_bytes_received_hi; + uint32_t total_broadcast_bytes_received_lo; + uint32_t total_multicast_bytes_received_hi; + uint32_t total_multicast_bytes_received_lo; + uint32_t total_bytes_received_hi; + uint32_t total_bytes_received_lo; + uint32_t total_unicast_bytes_transmitted_hi; + uint32_t total_unicast_bytes_transmitted_lo; + uint32_t total_broadcast_bytes_transmitted_hi; + uint32_t total_broadcast_bytes_transmitted_lo; + uint32_t total_multicast_bytes_transmitted_hi; + uint32_t total_multicast_bytes_transmitted_lo; + uint32_t total_bytes_transmitted_hi; + uint32_t total_bytes_transmitted_lo; + uint32_t total_unicast_packets_received_hi; + uint32_t total_unicast_packets_received_lo; + uint32_t total_multicast_packets_received_hi; + uint32_t total_multicast_packets_received_lo; + uint32_t total_broadcast_packets_received_hi; + uint32_t total_broadcast_packets_received_lo; + uint32_t total_unicast_packets_transmitted_hi; + uint32_t total_unicast_packets_transmitted_lo; + uint32_t total_multicast_packets_transmitted_hi; + uint32_t total_multicast_packets_transmitted_lo; + uint32_t total_broadcast_packets_transmitted_hi; + uint32_t total_broadcast_packets_transmitted_lo; + uint32_t valid_bytes_received_hi; + uint32_t valid_bytes_received_lo; + + uint32_t etherstatsoverrsizepkts_hi; + uint32_t etherstatsoverrsizepkts_lo; + uint32_t no_buff_discard_hi; + uint32_t no_buff_discard_lo; + + uint32_t total_packets_received_checksum_discarded_hi; + uint32_t total_packets_received_checksum_discarded_lo; + uint32_t total_packets_received_ttl0_discarded_hi; + uint32_t total_packets_received_ttl0_discarded_lo; + uint32_t total_transmitted_dropped_packets_error_hi; + uint32_t total_transmitted_dropped_packets_error_lo; + + /* receive path driver statistics */ + uint32_t rx_calls; + uint32_t rx_pkts; + uint32_t rx_soft_errors; + uint32_t rx_hw_csum_errors; + uint32_t rx_ofld_frames_csum_ip; + uint32_t rx_ofld_frames_csum_tcp_udp; + uint32_t rx_budget_reached; + + /* tx path driver statistics */ + uint32_t tx_pkts; + uint32_t tx_soft_errors; + uint32_t tx_ofld_frames_csum_ip; + uint32_t tx_ofld_frames_csum_tcp; + uint32_t tx_ofld_frames_csum_udp; + uint32_t tx_encap_failures; + uint32_t tx_hw_queue_full; + uint32_t tx_hw_max_queue_depth; + uint32_t tx_dma_mapping_failure; + uint32_t tx_max_drbr_queue_depth; + uint32_t tx_window_violation_std; + uint32_t tx_chain_lost_mbuf; + uint32_t tx_frames_deferred; + uint32_t tx_queue_xoff; + + /* mbuf driver statistics */ + uint32_t mbuf_defrag_attempts; + uint32_t mbuf_defrag_failures; + uint32_t mbuf_rx_bd_alloc_failed; + uint32_t mbuf_rx_bd_mapping_failed; + + /* track the number of allocated mbufs */ + uint32_t mbuf_alloc_tx; + uint32_t mbuf_alloc_rx; +}; + +struct bnx2x_eth_stats_old { + uint32_t rx_stat_dot3statsframestoolong_hi; + uint32_t rx_stat_dot3statsframestoolong_lo; +}; + +struct bnx2x_eth_q_stats_old { + /* Fields to perserve over fw reset*/ + uint32_t total_unicast_bytes_received_hi; + uint32_t total_unicast_bytes_received_lo; + uint32_t total_broadcast_bytes_received_hi; + uint32_t total_broadcast_bytes_received_lo; + uint32_t total_multicast_bytes_received_hi; + uint32_t total_multicast_bytes_received_lo; + uint32_t total_unicast_bytes_transmitted_hi; + uint32_t total_unicast_bytes_transmitted_lo; + uint32_t total_broadcast_bytes_transmitted_hi; + uint32_t total_broadcast_bytes_transmitted_lo; + uint32_t total_multicast_bytes_transmitted_hi; + uint32_t total_multicast_bytes_transmitted_lo; + + /* Fields to perserve last of */ + uint32_t total_bytes_received_hi; + uint32_t total_bytes_received_lo; + uint32_t total_bytes_transmitted_hi; + uint32_t total_bytes_transmitted_lo; + uint32_t total_unicast_packets_received_hi; + uint32_t total_unicast_packets_received_lo; + uint32_t total_multicast_packets_received_hi; + uint32_t total_multicast_packets_received_lo; + uint32_t total_broadcast_packets_received_hi; + uint32_t total_broadcast_packets_received_lo; + uint32_t total_unicast_packets_transmitted_hi; + uint32_t total_unicast_packets_transmitted_lo; + uint32_t total_multicast_packets_transmitted_hi; + uint32_t total_multicast_packets_transmitted_lo; + uint32_t total_broadcast_packets_transmitted_hi; + uint32_t total_broadcast_packets_transmitted_lo; + uint32_t valid_bytes_received_hi; + uint32_t valid_bytes_received_lo; + + /* receive path driver statistics */ + uint32_t rx_calls_old; + uint32_t rx_pkts_old; + uint32_t rx_soft_errors_old; + uint32_t rx_hw_csum_errors_old; + uint32_t rx_ofld_frames_csum_ip_old; + uint32_t rx_ofld_frames_csum_tcp_udp_old; + uint32_t rx_budget_reached_old; + + /* tx path driver statistics */ + uint32_t tx_pkts_old; + uint32_t tx_soft_errors_old; + uint32_t tx_ofld_frames_csum_ip_old; + uint32_t tx_ofld_frames_csum_tcp_old; + uint32_t tx_ofld_frames_csum_udp_old; + uint32_t tx_encap_failures_old; + uint32_t tx_hw_queue_full_old; + uint32_t tx_hw_max_queue_depth_old; + uint32_t tx_dma_mapping_failure_old; + uint32_t tx_max_drbr_queue_depth_old; + uint32_t tx_window_violation_std_old; + uint32_t tx_chain_lost_mbuf_old; + uint32_t tx_frames_deferred_old; + uint32_t tx_queue_xoff_old; + + /* mbuf driver statistics */ + uint32_t mbuf_defrag_attempts_old; + uint32_t mbuf_defrag_failures_old; + uint32_t mbuf_rx_bd_alloc_failed_old; + uint32_t mbuf_rx_bd_mapping_failed_old; + + /* track the number of allocated mbufs */ + int mbuf_alloc_tx_old; + int mbuf_alloc_rx_old; +}; + +struct bnx2x_net_stats_old { + uint32_t rx_dropped; +}; + +struct bnx2x_fw_port_stats_old { + uint32_t pfc_frames_tx_hi; + uint32_t pfc_frames_tx_lo; + uint32_t pfc_frames_rx_hi; + uint32_t pfc_frames_rx_lo; + + uint32_t mac_filter_discard; + uint32_t mf_tag_discard; + uint32_t brb_truncate_discard; + uint32_t mac_discard; +}; + +/* sum[hi:lo] += add[hi:lo] */ +#define ADD_64(s_hi, a_hi, s_lo, a_lo) \ + do { \ + s_lo += a_lo; \ + s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ + } while (0) + +#define LE32_0 ((uint32_t) 0) +#define LE16_0 ((uint16_t) 0) + +/* The _force is for cases where high value is 0 */ +#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \ + ADD_64(s_hi, le32toh(a_hi_le), \ + s_lo, le32toh(a_lo_le)) + +#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \ + ADD_64(s_hi, le16toh(a_hi_le), \ + s_lo, le16toh(a_lo_le)) + +/* difference = minuend - subtrahend */ +#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ + do { \ + if (m_lo < s_lo) { \ + /* underflow */ \ + d_hi = m_hi - s_hi; \ + if (d_hi > 0) { \ + /* we can 'loan' 1 */ \ + d_hi--; \ + d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ + } else { \ + /* m_hi <= s_hi */ \ + d_hi = 0; \ + d_lo = 0; \ + } \ + } else { \ + /* m_lo >= s_lo */ \ + if (m_hi < s_hi) { \ + d_hi = 0; \ + d_lo = 0; \ + } else { \ + /* m_hi >= s_hi */ \ + d_hi = m_hi - s_hi; \ + d_lo = m_lo - s_lo; \ + } \ + } \ + } while (0) + +#define UPDATE_STAT64(s, t) \ + do { \ + DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ + diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ + pstats->mac_stx[0].t##_hi = new->s##_hi; \ + pstats->mac_stx[0].t##_lo = new->s##_lo; \ + ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ + pstats->mac_stx[1].t##_lo, diff.lo); \ + } while (0) + +#define UPDATE_STAT64_NIG(s, t) \ + do { \ + DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ + diff.lo, new->s##_lo, old->s##_lo); \ + ADD_64(estats->t##_hi, diff.hi, \ + estats->t##_lo, diff.lo); \ + } while (0) + +/* sum[hi:lo] += add */ +#define ADD_EXTEND_64(s_hi, s_lo, a) \ + do { \ + s_lo += a; \ + s_hi += (s_lo < a) ? 1 : 0; \ + } while (0) + +#define ADD_STAT64(diff, t) \ + do { \ + ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \ + pstats->mac_stx[1].t##_lo, new->diff##_lo); \ + } while (0) + +#define UPDATE_EXTEND_STAT(s) \ + do { \ + ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ + pstats->mac_stx[1].s##_lo, \ + new->s); \ + } while (0) + +#define UPDATE_EXTEND_TSTAT_X(s, t, size) \ + do { \ + diff = le##size##toh(tclient->s) - \ + le##size##toh(old_tclient->s); \ + old_tclient->s = tclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32) + +#define UPDATE_EXTEND_E_TSTAT(s, t, size) \ + do { \ + UPDATE_EXTEND_TSTAT_X(s, t, size); \ + ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_USTAT(s, t) \ + do { \ + diff = le32toh(uclient->s) - le32toh(old_uclient->s); \ + old_uclient->s = uclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_E_USTAT(s, t) \ + do { \ + UPDATE_EXTEND_USTAT(s, t); \ + ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_XSTAT(s, t) \ + do { \ + diff = le32toh(xclient->s) - le32toh(old_xclient->s); \ + old_xclient->s = xclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_QSTAT(s, t) \ + do { \ + qstats->t##_hi = qstats_old->t##_hi + le32toh(s.hi); \ + qstats->t##_lo = qstats_old->t##_lo + le32toh(s.lo); \ + } while (0) + +#define UPDATE_QSTAT_OLD(f) \ + do { \ + qstats_old->f = qstats->f; \ + } while (0) + +#define UPDATE_ESTAT_QSTAT_64(s) \ + do { \ + ADD_64(estats->s##_hi, qstats->s##_hi, \ + estats->s##_lo, qstats->s##_lo); \ + SUB_64(estats->s##_hi, qstats_old->s##_hi_old, \ + estats->s##_lo, qstats_old->s##_lo_old); \ + qstats_old->s##_hi_old = qstats->s##_hi; \ + qstats_old->s##_lo_old = qstats->s##_lo; \ + } while (0) + +#define UPDATE_ESTAT_QSTAT(s) \ + do { \ + estats->s += qstats->s; \ + estats->s -= qstats_old->s##_old; \ + qstats_old->s##_old = qstats->s; \ + } while (0) + +#define UPDATE_FSTAT_QSTAT(s) \ + do { \ + ADD_64(fstats->s##_hi, qstats->s##_hi, \ + fstats->s##_lo, qstats->s##_lo); \ + SUB_64(fstats->s##_hi, qstats_old->s##_hi, \ + fstats->s##_lo, qstats_old->s##_lo); \ + estats->s##_hi = fstats->s##_hi; \ + estats->s##_lo = fstats->s##_lo; \ + qstats_old->s##_hi = qstats->s##_hi; \ + qstats_old->s##_lo = qstats->s##_lo; \ + } while (0) + +#define UPDATE_FW_STAT(s) \ + do { \ + estats->s = le32toh(tport->s) + fwstats->s; \ + } while (0) + +#define UPDATE_FW_STAT_OLD(f) \ + do { \ + fwstats->f = estats->f; \ + } while (0) + +#define UPDATE_ESTAT(s, t) \ + do { \ + SUB_64(estats->s##_hi, estats_old->t##_hi, \ + estats->s##_lo, estats_old->t##_lo); \ + ADD_64(estats->s##_hi, estats->t##_hi, \ + estats->s##_lo, estats->t##_lo); \ + estats_old->t##_hi = estats->t##_hi; \ + estats_old->t##_lo = estats->t##_lo; \ + } while (0) + +/* minuend -= subtrahend */ +#define SUB_64(m_hi, s_hi, m_lo, s_lo) \ + do { \ + DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ + } while (0) + +/* minuend[hi:lo] -= subtrahend */ +#define SUB_EXTEND_64(m_hi, m_lo, s) \ + do { \ + uint32_t s_hi = 0; \ + SUB_64(m_hi, s_hi, m_lo, s); \ + } while (0) + +#define SUB_EXTEND_USTAT(s, t) \ + do { \ + diff = le32toh(uclient->s) - le32toh(old_uclient->s); \ + SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +struct bnx2x_softc; +void bnx2x_stats_init(struct bnx2x_softc *sc); +void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event); +void bnx2x_save_statistics(struct bnx2x_softc *sc); +void bnx2x_memset_stats(struct bnx2x_softc *sc); + +#endif /* BNX2X_STATS_H */ diff --git a/drivers/net/bnx2x/bnx2x_vfpf.c b/drivers/net/bnx2x/bnx2x_vfpf.c new file mode 100644 index 00000000..14b1d10a --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_vfpf.c @@ -0,0 +1,601 @@ +/* + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#include "bnx2x.h" + +/* calculate the crc in the bulletin board */ +static inline uint32_t +bnx2x_vf_crc(struct bnx2x_vf_bulletin *bull) +{ + uint32_t crc_sz = sizeof(bull->crc), length = bull->length - crc_sz; + + return ECORE_CRC32_LE(0, (uint8_t *)bull + crc_sz, length); +} + +/* Checks are there mac/channel updates for VF + * returns TRUE if something was updated +*/ +int +bnx2x_check_bull(struct bnx2x_softc *sc) +{ + struct bnx2x_vf_bulletin *bull; + uint8_t tries = 0; + uint16_t old_version = sc->old_bulletin.version; + uint64_t valid_bitmap; + + bull = sc->pf2vf_bulletin; + if (old_version == bull->version) { + return FALSE; + } else { + /* Check the crc until we get the correct data */ + while (tries < BNX2X_VF_BULLETIN_TRIES) { + bull = sc->pf2vf_bulletin; + if (bull->crc == bnx2x_vf_crc(bull)) + break; + + PMD_DRV_LOG(ERR, "bad crc on bulletin board. contained %x computed %x", + bull->crc, bnx2x_vf_crc(bull)); + ++tries; + } + if (tries == BNX2X_VF_BULLETIN_TRIES) { + PMD_DRV_LOG(ERR, "pf to vf bulletin board crc was wrong %d consecutive times. Aborting", + tries); + return FALSE; + } + } + + valid_bitmap = bull->valid_bitmap; + + /* check the mac address and VLAN and allocate memory if valid */ + if (valid_bitmap & (1 << MAC_ADDR_VALID) && memcmp(bull->mac, sc->old_bulletin.mac, ETH_ALEN)) + rte_memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN); + if (valid_bitmap & (1 << VLAN_VALID)) + rte_memcpy(&bull->vlan, &sc->old_bulletin.vlan, VLAN_HLEN); + + sc->old_bulletin = *bull; + + return TRUE; +} + +/* add tlv to a buffer */ +#define BNX2X_TLV_APPEND(_tlvs, _offset, _type, _length) \ + ((struct vf_first_tlv *)((unsigned long)_tlvs + _offset))->type = _type; \ + ((struct vf_first_tlv *)((unsigned long)_tlvs + _offset))->length = _length + +/* Initiliaze header of the first tlv and clear mailbox*/ +static void +bnx2x_init_first_tlv(struct bnx2x_softc *sc, struct vf_first_tlv *tlv, + uint16_t type, uint16_t len) +{ + struct bnx2x_vf_mbx_msg *mbox = sc->vf2pf_mbox; + PMD_DRV_LOG(DEBUG, "Preparing %d tlv for sending", type); + + memset(mbox, 0, sizeof(struct bnx2x_vf_mbx_msg)); + + BNX2X_TLV_APPEND(tlv, 0, type, len); + + /* Initialize header of the first tlv */ + tlv->reply_offset = sizeof(mbox->query); +} + +#define BNX2X_VF_CMD_ADDR_LO PXP_VF_ADDR_CSDM_GLOBAL_START +#define BNX2X_VF_CMD_ADDR_HI BNX2X_VF_CMD_ADDR_LO + 4 +#define BNX2X_VF_CMD_TRIGGER BNX2X_VF_CMD_ADDR_HI + 4 +#define BNX2X_VF_CHANNEL_DELAY 100 +#define BNX2X_VF_CHANNEL_TRIES 100 + +static int +bnx2x_do_req4pf(struct bnx2x_softc *sc, phys_addr_t phys_addr) +{ + uint8_t *status = &sc->vf2pf_mbox->resp.common_reply.status; + uint8_t i; + + if (!*status) { + bnx2x_check_bull(sc); + if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) { + PMD_DRV_LOG(ERR, "channel is down. Aborting message sending"); + *status = BNX2X_VF_STATUS_SUCCESS; + return 0; + } + + REG_WR(sc, BNX2X_VF_CMD_ADDR_LO, U64_LO(phys_addr)); + REG_WR(sc, BNX2X_VF_CMD_ADDR_HI, U64_HI(phys_addr)); + + /* memory barrier to ensure that FW can read phys_addr */ + wmb(); + + REG_WR8(sc, BNX2X_VF_CMD_TRIGGER, 1); + + /* Do several attempts until PF completes + * "." is used to show progress + */ + for (i = 0; i < BNX2X_VF_CHANNEL_TRIES; i++) { + DELAY_MS(BNX2X_VF_CHANNEL_DELAY); + if (*status) + break; + } + + if (!*status) { + PMD_DRV_LOG(ERR, "Response from PF timed out"); + return -EAGAIN; + } + } else { + PMD_DRV_LOG(ERR, "status should be zero before message" + "to pf was sent"); + return -EINVAL; + } + + PMD_DRV_LOG(DEBUG, "Response from PF was received"); + return 0; +} + +static inline uint16_t bnx2x_check_me_flags(uint32_t val) +{ + if (((val) & ME_REG_VF_VALID) && (!((val) & ME_REG_VF_ERR))) + return ME_REG_VF_VALID; + else + return 0; +} + +#define BNX2X_ME_ANSWER_DELAY 100 +#define BNX2X_ME_ANSWER_TRIES 10 + +static inline int bnx2x_read_vf_id(struct bnx2x_softc *sc) +{ + uint32_t val; + uint8_t i = 0; + + while (i <= BNX2X_ME_ANSWER_TRIES) { + val = BNX2X_DB_READ(DOORBELL_ADDR(sc, 0)); + if (bnx2x_check_me_flags(val)) + return VF_ID(val); + + DELAY_MS(BNX2X_ME_ANSWER_DELAY); + i++; + } + + return -EINVAL; +} + +#define BNX2X_VF_OBTAIN_MAX_TRIES 3 +#define BNX2X_VF_OBTAIN_MAC_FILTERS 1 +#define BNX2X_VF_OBTAIN_MC_FILTERS 10 + +struct bnx2x_obtain_status { + int success; + int err_code; +}; + +static +struct bnx2x_obtain_status bnx2x_loop_obtain_resources(struct bnx2x_softc *sc) +{ + int tries = 0; + struct vf_acquire_resp_tlv *resp = &sc->vf2pf_mbox->resp.acquire_resp, + *sc_resp = &sc->acquire_resp; + struct vf_resource_query *res_query; + struct vf_resc *resc; + struct bnx2x_obtain_status status; + int res_obtained = false; + + do { + PMD_DRV_LOG(DEBUG, "trying to get resources"); + + if (bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr)) { + /* timeout */ + status.success = 0; + status.err_code = -EAGAIN; + return status; + } + + memcpy(sc_resp, resp, sizeof(sc->acquire_resp)); + + tries++; + + /* check PF to request acceptance */ + if (sc_resp->status == BNX2X_VF_STATUS_SUCCESS) { + PMD_DRV_LOG(DEBUG, "resources obtained successfully"); + res_obtained = true; + } else if (sc_resp->status == BNX2X_VF_STATUS_NO_RESOURCES && + tries < BNX2X_VF_OBTAIN_MAX_TRIES) { + PMD_DRV_LOG(DEBUG, + "PF cannot allocate requested amount of resources"); + + res_query = &sc->vf2pf_mbox->query[0].acquire.res_query; + resc = &sc_resp->resc; + + /* PF refused our request. Try to decrease request params */ + res_query->num_txqs = min(res_query->num_txqs, resc->num_txqs); + res_query->num_rxqs = min(res_query->num_rxqs, resc->num_rxqs); + res_query->num_sbs = min(res_query->num_sbs, resc->num_sbs); + res_query->num_mac_filters = min(res_query->num_mac_filters, resc->num_mac_filters); + res_query->num_vlan_filters = min(res_query->num_vlan_filters, resc->num_vlan_filters); + res_query->num_mc_filters = min(res_query->num_mc_filters, resc->num_mc_filters); + + memset(&sc->vf2pf_mbox->resp, 0, sizeof(union resp_tlvs)); + } else { + PMD_DRV_LOG(ERR, "Resources cannot be obtained. Status of handling: %d. Aborting", + sc_resp->status); + status.success = 0; + status.err_code = -EAGAIN; + return status; + } + } while (!res_obtained); + + status.success = 1; + return status; +} + +int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count) +{ + struct vf_acquire_tlv *acq = &sc->vf2pf_mbox->query[0].acquire; + int vf_id; + struct bnx2x_obtain_status obtain_status; + + bnx2x_vf_close(sc); + bnx2x_init_first_tlv(sc, &acq->first_tlv, BNX2X_VF_TLV_ACQUIRE, sizeof(*acq)); + + vf_id = bnx2x_read_vf_id(sc); + if (vf_id < 0) + return -EAGAIN; + + acq->vf_id = vf_id; + + acq->res_query.num_rxqs = rx_count; + acq->res_query.num_txqs = tx_count; + acq->res_query.num_sbs = sc->igu_sb_cnt; + acq->res_query.num_mac_filters = BNX2X_VF_OBTAIN_MAC_FILTERS; + acq->res_query.num_mc_filters = BNX2X_VF_OBTAIN_MC_FILTERS; + + acq->bulletin_addr = sc->pf2vf_bulletin_mapping.paddr; + + /* Request physical port identifier */ + BNX2X_TLV_APPEND(acq, acq->first_tlv.length, + BNX2X_VF_TLV_PHYS_PORT_ID, + sizeof(struct channel_tlv)); + + BNX2X_TLV_APPEND(acq, + (acq->first_tlv.length + sizeof(struct channel_tlv)), + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* requesting the resources in loop */ + obtain_status = bnx2x_loop_obtain_resources(sc); + if (!obtain_status.success) + return obtain_status.err_code; + + struct vf_acquire_resp_tlv sc_resp = sc->acquire_resp; + + sc->devinfo.chip_id |= (sc_resp.chip_num & 0xFFFF); + sc->devinfo.int_block = INT_BLOCK_IGU; + sc->devinfo.chip_port_mode = CHIP_2_PORT_MODE; + sc->devinfo.mf_info.mf_ov = 0; + sc->devinfo.mf_info.mf_mode = 0; + sc->devinfo.flash_size = 0; + + sc->igu_sb_cnt = sc_resp.resc.num_sbs; + sc->igu_base_sb = sc_resp.resc.hw_sbs[0] & 0xFF; + sc->igu_dsb_id = -1; + sc->max_tx_queues = sc_resp.resc.num_txqs; + sc->max_rx_queues = sc_resp.resc.num_rxqs; + + sc->link_params.chip_id = sc->devinfo.chip_id; + sc->doorbell_size = sc_resp.db_size; + sc->flags |= BNX2X_NO_WOL_FLAG | BNX2X_NO_ISCSI_OOO_FLAG | BNX2X_NO_ISCSI_FLAG | BNX2X_NO_FCOE_FLAG; + + PMD_DRV_LOG(DEBUG, "status block count = %d, base status block = %x", + sc->igu_sb_cnt, sc->igu_base_sb); + strncpy(sc->fw_ver, sc_resp.fw_ver, sizeof(sc->fw_ver)); + + if (is_valid_ether_addr(sc_resp.resc.current_mac_addr)) + (void)rte_memcpy(sc->link_params.mac_addr, + sc_resp.resc.current_mac_addr, + ETH_ALEN); + + return 0; +} + +/* Ask PF to release VF's resources */ +void +bnx2x_vf_close(struct bnx2x_softc *sc) +{ + struct vf_release_tlv *query; + struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; + int vf_id = bnx2x_read_vf_id(sc); + + if (vf_id >= 0) { + query = &sc->vf2pf_mbox->query[0].release; + bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_RELEASE, + sizeof(*query)); + + query->vf_id = vf_id; + BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (reply->status != BNX2X_VF_STATUS_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to release VF"); + } +} + +/* Let PF know the VF status blocks phys_addrs */ +int +bnx2x_vf_init(struct bnx2x_softc *sc) +{ + struct vf_init_tlv *query; + struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; + int i; + + query = &sc->vf2pf_mbox->query[0].init; + bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_INIT, + sizeof(*query)); + + FOR_EACH_QUEUE(sc, i) { + query->sb_addr[i] = (unsigned long)(sc->fp[i].sb_dma.paddr); + } + + query->stats_step = sizeof(struct per_queue_stats); + query->stats_addr = sc->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, queue_stats); + + BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (reply->status != BNX2X_VF_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to init VF"); + return -EINVAL; + } + + PMD_DRV_LOG(DEBUG, "VF was initialized"); + return 0; +} + +void +bnx2x_vf_unload(struct bnx2x_softc *sc) +{ + struct vf_close_tlv *query; + struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; + struct vf_q_op_tlv *query_op; + int i, vf_id; + + vf_id = bnx2x_read_vf_id(sc); + if (vf_id > 0) { + FOR_EACH_QUEUE(sc, i) { + query_op = &sc->vf2pf_mbox->query[0].q_op; + bnx2x_init_first_tlv(sc, &query_op->first_tlv, + BNX2X_VF_TLV_TEARDOWN_Q, + sizeof(*query_op)); + + query_op->vf_qid = i; + + BNX2X_TLV_APPEND(query_op, query_op->first_tlv.length, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (reply->status != BNX2X_VF_STATUS_SUCCESS) + PMD_DRV_LOG(ERR, + "Bad reply for vf_q %d teardown", i); + } + + bnx2x_vf_set_mac(sc, false); + + query = &sc->vf2pf_mbox->query[0].close; + bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_CLOSE, + sizeof(*query)); + + query->vf_id = vf_id; + + BNX2X_TLV_APPEND(query, query->first_tlv.length, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (reply->status != BNX2X_VF_STATUS_SUCCESS) + PMD_DRV_LOG(ERR, + "Bad reply from PF for close message"); + } +} + +static inline uint16_t +bnx2x_vf_q_flags(uint8_t leading) +{ + uint16_t flags = leading ? BNX2X_VF_Q_FLAG_LEADING_RSS : 0; + + flags |= BNX2X_VF_Q_FLAG_CACHE_ALIGN; + flags |= BNX2X_VF_Q_FLAG_STATS; + flags |= BNX2X_VF_Q_FLAG_VLAN; + + return flags; +} + +static void +bnx2x_vf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + struct vf_rxq_params *rxq_init, uint16_t flags) +{ + struct bnx2x_rx_queue *rxq; + + rxq = sc->rx_queues[fp->index]; + if (!rxq) { + PMD_DRV_LOG(ERR, "RX queue %d is NULL", fp->index); + return; + } + + rxq_init->rcq_addr = rxq->cq_ring_phys_addr; + rxq_init->rcq_np_addr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE; + rxq_init->rxq_addr = rxq->rx_ring_phys_addr; + rxq_init->vf_sb_id = fp->index; + rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; + rxq_init->mtu = sc->mtu; + rxq_init->buf_sz = fp->rx_buf_size; + rxq_init->flags = flags; + rxq_init->stat_id = -1; + rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; +} + +static void +bnx2x_vf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, + struct vf_txq_params *txq_init, uint16_t flags) +{ + struct bnx2x_tx_queue *txq; + + txq = sc->tx_queues[fp->index]; + if (!txq) { + PMD_DRV_LOG(ERR, "TX queue %d is NULL", fp->index); + return; + } + + txq_init->txq_addr = txq->tx_ring_phys_addr; + txq_init->sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0; + txq_init->flags = flags; + txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; + txq_init->vf_sb_id = fp->index; +} + +int +bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, int leading) +{ + struct vf_setup_q_tlv *query; + struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; + uint16_t flags = bnx2x_vf_q_flags(leading); + + query = &sc->vf2pf_mbox->query[0].setup_q; + bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_SETUP_Q, + sizeof(*query)); + + query->vf_qid = fp->index; + query->param_valid = VF_RXQ_VALID | VF_TXQ_VALID; + + bnx2x_vf_rx_q_prep(sc, fp, &query->rxq, flags); + bnx2x_vf_tx_q_prep(sc, fp, &query->txq, flags); + + BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (reply->status != BNX2X_VF_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to setup VF queue[%d]", + fp->index); + return -EINVAL; + } + + return 0; +} + +int +bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set) +{ + struct vf_set_q_filters_tlv *query; + struct vf_common_reply_tlv *reply; + + query = &sc->vf2pf_mbox->query[0].set_q_filters; + bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS, + sizeof(*query)); + + query->vf_qid = sc->fp->index; + query->mac_filters_cnt = 1; + query->flags = BNX2X_VF_MAC_VLAN_CHANGED; + + query->filters[0].flags = (set ? BNX2X_VF_Q_FILTER_SET_MAC : 0) | + BNX2X_VF_Q_FILTER_DEST_MAC_VALID; + + bnx2x_check_bull(sc); + + rte_memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN); + + BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + reply = &sc->vf2pf_mbox->resp.common_reply; + + while (BNX2X_VF_STATUS_FAILURE == reply->status && + bnx2x_check_bull(sc)) { + /* A new mac was configured by PF for us */ + rte_memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac, + ETH_ALEN); + rte_memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac, + ETH_ALEN); + + bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + } + + if (BNX2X_VF_STATUS_SUCCESS != reply->status) { + PMD_DRV_LOG(ERR, "Bad reply from PF for SET MAC message: %d", + reply->status); + return -EINVAL; + } + + return 0; +} + +int +bnx2x_vf_config_rss(struct bnx2x_softc *sc, + struct ecore_config_rss_params *params) +{ + struct vf_rss_tlv *query; + struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; + + query = &sc->vf2pf_mbox->query[0].update_rss; + + bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_UPDATE_RSS, + sizeof(*query)); + + /* add list termination tlv */ + BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rte_memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key)); + query->rss_key_size = T_ETH_RSS_KEY; + + rte_memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + query->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE; + + query->rss_result_mask = params->rss_result_mask; + query->rss_flags = params->rss_flags; + + bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (reply->status != BNX2X_VF_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to configure RSS"); + return -EINVAL; + } + + return 0; +} + +int +bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc) +{ + struct vf_set_q_filters_tlv *query; + struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; + unsigned long tx_mask; + + query = &sc->vf2pf_mbox->query[0].set_q_filters; + bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS, + sizeof(*query)); + + query->vf_qid = 0; + query->flags = BNX2X_VF_RX_MASK_CHANGED; + + if (bnx2x_fill_accept_flags(sc, sc->rx_mode, &query->rx_mask, &tx_mask)) { + return -EINVAL; + } + + BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (reply->status != BNX2X_VF_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to set RX mode"); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/bnx2x/bnx2x_vfpf.h b/drivers/net/bnx2x/bnx2x_vfpf.h new file mode 100644 index 00000000..966240cc --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_vfpf.h @@ -0,0 +1,334 @@ +/* + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#ifndef BNX2X_VFPF_H +#define BNX2X_VFPF_H + +#include "ecore_sp.h" + +#define VLAN_HLEN 4 + +struct vf_resource_query { + uint8_t num_rxqs; + uint8_t num_txqs; + uint8_t num_sbs; + uint8_t num_mac_filters; + uint8_t num_vlan_filters; + uint8_t num_mc_filters; +}; + +#define BNX2X_VF_STATUS_SUCCESS 1 +#define BNX2X_VF_STATUS_FAILURE 2 +#define BNX2X_VF_STATUS_NO_RESOURCES 4 +#define BNX2X_VF_BULLETIN_TRIES 5 + +#define BNX2X_VF_Q_FLAG_CACHE_ALIGN 0x0008 +#define BNX2X_VF_Q_FLAG_STATS 0x0010 +#define BNX2X_VF_Q_FLAG_OV 0x0020 +#define BNX2X_VF_Q_FLAG_VLAN 0x0040 +#define BNX2X_VF_Q_FLAG_COS 0x0080 +#define BNX2X_VF_Q_FLAG_HC 0x0100 +#define BNX2X_VF_Q_FLAG_DHC 0x0200 +#define BNX2X_VF_Q_FLAG_LEADING_RSS 0x0400 + +#define TLV_BUFFER_SIZE 1024 + +/* general tlv header (used for both vf->pf request and pf->vf response) */ +struct channel_tlv { + uint16_t type; + uint16_t length; +}; + +struct vf_first_tlv { + uint16_t type; + uint16_t length; + uint32_t reply_offset; +}; + +struct tlv_buffer_size { + uint8_t tlv_buffer[TLV_BUFFER_SIZE]; +}; + +/* tlv struct for all PF replies except acquire */ +struct vf_common_reply_tlv { + uint16_t type; + uint16_t length; + uint8_t status; + uint8_t pad[3]; +}; + +/* used to terminate and pad a tlv list */ +struct channel_list_end_tlv { + uint16_t type; + uint16_t length; + uint32_t pad; +}; + +/* Acquire */ +struct vf_acquire_tlv { + struct vf_first_tlv first_tlv; + + uint8_t vf_id; + uint8_t pad[3]; + + struct vf_resource_query res_query; + + uint64_t bulletin_addr; +}; + +/* simple operation request on queue */ +struct vf_q_op_tlv { + struct vf_first_tlv first_tlv; + uint8_t vf_qid; + uint8_t pad[3]; +}; + +/* receive side scaling tlv */ +struct vf_rss_tlv { + struct vf_first_tlv first_tlv; + uint32_t rss_flags; + uint8_t rss_result_mask; + uint8_t ind_table_size; + uint8_t rss_key_size; + uint8_t pad; + uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + uint32_t rss_key[T_ETH_RSS_KEY]; /* hash values */ +}; + +struct vf_resc { +#define BNX2X_VF_MAX_QUEUES_PER_VF 16 +#define BNX2X_VF_MAX_SBS_PER_VF 16 + uint16_t hw_sbs[BNX2X_VF_MAX_SBS_PER_VF]; + uint8_t hw_qid[BNX2X_VF_MAX_QUEUES_PER_VF]; + uint8_t num_rxqs; + uint8_t num_txqs; + uint8_t num_sbs; + uint8_t num_mac_filters; + uint8_t num_vlan_filters; + uint8_t num_mc_filters; + uint8_t permanent_mac_addr[ETH_ALEN]; + uint8_t current_mac_addr[ETH_ALEN]; + uint16_t pf_link_speed; + uint32_t pf_link_supported; +}; + +/* tlv struct holding reply for acquire */ +struct vf_acquire_resp_tlv { + uint16_t type; + uint16_t length; + uint8_t status; + uint8_t pad1[3]; + uint32_t chip_num; + uint8_t pad2[4]; + char fw_ver[32]; + uint16_t db_size; + uint8_t pad3[2]; + struct vf_resc resc; +}; + +/* Init VF */ +struct vf_init_tlv { + struct vf_first_tlv first_tlv; + uint64_t sb_addr[BNX2X_VF_MAX_SBS_PER_VF]; + uint64_t spq_addr; + uint64_t stats_addr; + uint16_t stats_step; + uint32_t flags; + uint32_t pad[2]; +}; + +struct vf_rxq_params { + /* physical addresses */ + uint64_t rcq_addr; + uint64_t rcq_np_addr; + uint64_t rxq_addr; + uint64_t pad1; + + /* sb + hc info */ + uint8_t vf_sb_id; + uint8_t sb_cq_index; + uint16_t hc_rate; /* desired interrupts per sec. */ + /* rx buffer info */ + uint16_t mtu; + uint16_t buf_sz; + uint16_t flags; /* for BNX2X_VF_Q_FLAG_X flags */ + uint16_t stat_id; /* valid if BNX2X_VF_Q_FLAG_STATS */ + + uint8_t pad2[5]; + + uint8_t drop_flags; + uint8_t cache_line_log; /* BNX2X_VF_Q_FLAG_CACHE_ALIGN */ + uint8_t pad3; +}; + +struct vf_txq_params { + /* physical addresses */ + uint64_t txq_addr; + + /* sb + hc info */ + uint8_t vf_sb_id; /* index in hw_sbs[] */ + uint8_t sb_index; /* Index in the SB */ + uint16_t hc_rate; /* desired interrupts per sec. */ + uint32_t flags; /* for BNX2X_VF_Q_FLAG_X flags */ + uint16_t stat_id; /* valid if BNX2X_VF_Q_FLAG_STATS */ + uint8_t traffic_type; /* see in setup_context() */ + uint8_t pad; +}; + +/* Setup Queue */ +struct vf_setup_q_tlv { + struct vf_first_tlv first_tlv; + + struct vf_rxq_params rxq; + struct vf_txq_params txq; + + uint8_t vf_qid; /* index in hw_qid[] */ + uint8_t param_valid; + #define VF_RXQ_VALID 0x01 + #define VF_TXQ_VALID 0x02 + uint8_t pad[2]; +}; + +/* Set Queue Filters */ +struct vf_q_mac_vlan_filter { + uint32_t flags; + #define BNX2X_VF_Q_FILTER_DEST_MAC_VALID 0x01 + #define BNX2X_VF_Q_FILTER_VLAN_TAG_VALID 0x02 + #define BNX2X_VF_Q_FILTER_SET_MAC 0x100 /* set/clear */ + uint8_t mac[ETH_ALEN]; + uint16_t vlan_tag; +}; + + +#define _UP_ETH_ALEN (6) + +/* configure queue filters */ +struct vf_set_q_filters_tlv { + struct vf_first_tlv first_tlv; + + uint32_t flags; + #define BNX2X_VF_MAC_VLAN_CHANGED 0x01 + #define BNX2X_VF_MULTICAST_CHANGED 0x02 + #define BNX2X_VF_RX_MASK_CHANGED 0x04 + + uint8_t vf_qid; /* index in hw_qid[] */ + uint8_t mac_filters_cnt; + uint8_t multicast_cnt; + uint8_t pad; + + #define VF_MAX_MAC_FILTERS 16 + #define VF_MAX_VLAN_FILTERS 16 + #define VF_MAX_FILTERS (VF_MAX_MAC_FILTERS +\ + VF_MAX_VLAN_FILTERS) + struct vf_q_mac_vlan_filter filters[VF_MAX_FILTERS]; + + #define VF_MAX_MULTICAST_PER_VF 32 + uint8_t multicast[VF_MAX_MULTICAST_PER_VF][_UP_ETH_ALEN]; + unsigned long rx_mask; +}; + + +/* close VF (disable VF) */ +struct vf_close_tlv { + struct vf_first_tlv first_tlv; + uint16_t vf_id; /* for debug */ + uint8_t pad[2]; +}; + +/* rlease the VF's acquired resources */ +struct vf_release_tlv { + struct vf_first_tlv first_tlv; + uint16_t vf_id; /* for debug */ + uint8_t pad[2]; +}; + +union query_tlvs { + struct vf_first_tlv first_tlv; + struct vf_acquire_tlv acquire; + struct vf_init_tlv init; + struct vf_close_tlv close; + struct vf_q_op_tlv q_op; + struct vf_setup_q_tlv setup_q; + struct vf_set_q_filters_tlv set_q_filters; + struct vf_release_tlv release; + struct vf_rss_tlv update_rss; + struct channel_list_end_tlv list_end; + struct tlv_buffer_size tlv_buf_size; +}; + +union resp_tlvs { + struct vf_common_reply_tlv common_reply; + struct vf_acquire_resp_tlv acquire_resp; + struct channel_list_end_tlv list_end; + struct tlv_buffer_size tlv_buf_size; +}; + +/* struct allocated by VF driver, PF sends updates to VF via bulletin */ +struct bnx2x_vf_bulletin { + uint32_t crc; /* crc of structure to ensure is not in + * mid-update + */ + uint16_t version; + uint16_t length; + + uint64_t valid_bitmap; /* bitmap indicating wich fields + * hold valid values + */ + +#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address + * is available for it + */ +#define VLAN_VALID 1 /* when set, the vf should no access the + * vf channel + */ +#define CHANNEL_DOWN 2 /* vf channel is disabled. VFs are not + * to attempt to send messages on the + * channel after this bit is set + */ + uint8_t mac[ETH_ALEN]; + uint8_t mac_pad[2]; + + uint16_t vlan; + uint8_t vlan_pad[6]; +}; + +#define MAX_TLVS_IN_LIST 50 +enum channel_tlvs { + BNX2X_VF_TLV_NONE, /* ends tlv sequence */ + BNX2X_VF_TLV_ACQUIRE, + BNX2X_VF_TLV_INIT, + BNX2X_VF_TLV_SETUP_Q, + BNX2X_VF_TLV_SET_Q_FILTERS, + BNX2X_VF_TLV_ACTIVATE_Q, + BNX2X_VF_TLV_DEACTIVATE_Q, + BNX2X_VF_TLV_TEARDOWN_Q, + BNX2X_VF_TLV_CLOSE, + BNX2X_VF_TLV_RELEASE, + BNX2X_VF_TLV_UPDATE_RSS_OLD, + BNX2X_VF_TLV_PF_RELEASE_VF, + BNX2X_VF_TLV_LIST_END, + BNX2X_VF_TLV_FLR, + BNX2X_VF_TLV_PF_SET_MAC, + BNX2X_VF_TLV_PF_SET_VLAN, + BNX2X_VF_TLV_UPDATE_RSS, + BNX2X_VF_TLV_PHYS_PORT_ID, + BNX2X_VF_TLV_MAX +}; + +struct bnx2x_vf_mbx_msg { + union query_tlvs query[BNX2X_VF_MAX_QUEUES_PER_VF]; + union resp_tlvs resp; +}; + +void bnx2x_add_tlv(void *tlvs_list, uint16_t offset, uint16_t type, uint16_t length); +int bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set); +int bnx2x_vf_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *params); + +#endif /* BNX2X_VFPF_H */ diff --git a/drivers/net/bnx2x/debug.c b/drivers/net/bnx2x/debug.c new file mode 100644 index 00000000..cc50845c --- /dev/null +++ b/drivers/net/bnx2x/debug.c @@ -0,0 +1,96 @@ +/*- + * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#include "bnx2x.h" + + +/* + * Debug versions of the 8/16/32 bit OS register read/write functions to + * capture/display values read/written from/to the controller. + */ +void +bnx2x_reg_write8(struct bnx2x_softc *sc, size_t offset, uint8_t val) +{ + PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x", (unsigned long)offset, val); + *((volatile uint8_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val; +} + +void +bnx2x_reg_write16(struct bnx2x_softc *sc, size_t offset, uint16_t val) +{ + if ((offset % 2) != 0) { + PMD_DRV_LOG(NOTICE, "Unaligned 16-bit write to 0x%08lx", + (unsigned long)offset); + } + + PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%04x", (unsigned long)offset, val); + *((volatile uint16_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val; +} + +void +bnx2x_reg_write32(struct bnx2x_softc *sc, size_t offset, uint32_t val) +{ + if ((offset % 4) != 0) { + PMD_DRV_LOG(NOTICE, "Unaligned 32-bit write to 0x%08lx", + (unsigned long)offset); + } + + PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x", (unsigned long)offset, val); + *((volatile uint32_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val; +} + +uint8_t +bnx2x_reg_read8(struct bnx2x_softc *sc, size_t offset) +{ + uint8_t val; + + val = (uint8_t)(*((volatile uint8_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset))); + PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x", (unsigned long)offset, val); + + return val; +} + +uint16_t +bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset) +{ + uint16_t val; + + if ((offset % 2) != 0) { + PMD_DRV_LOG(NOTICE, "Unaligned 16-bit read from 0x%08lx", + (unsigned long)offset); + } + + val = (uint16_t)(*((volatile uint16_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset))); + PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x", (unsigned long)offset, val); + + return val; +} + +uint32_t +bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset) +{ + uint32_t val; + + if ((offset % 4) != 0) { + PMD_DRV_LOG(NOTICE, "Unaligned 32-bit read from 0x%08lx", + (unsigned long)offset); + return 0; + } + + val = (uint32_t)(*((volatile uint32_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset))); + PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x", (unsigned long)offset, val); + + return val; +} diff --git a/drivers/net/bnx2x/ecore_fw_defs.h b/drivers/net/bnx2x/ecore_fw_defs.h new file mode 100644 index 00000000..ab490efa --- /dev/null +++ b/drivers/net/bnx2x/ecore_fw_defs.h @@ -0,0 +1,403 @@ +/*- + * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2014-2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#ifndef ECORE_FW_DEFS_H +#define ECORE_FW_DEFS_H + + +#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base) +#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[147].base + ((assertListEntry) * IRO[147].m1)) +#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \ + (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \ + IRO[153].m2)) +#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \ + (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \ + IRO[154].m2)) +#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \ + (IRO[155].base + ((vfId) * IRO[155].m1)) +#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \ + (IRO[156].base + ((vfId) * IRO[156].m1)) +#define CSTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[150].base + ((funcId) * IRO[150].m1)) +#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \ + (IRO[159].base + ((funcId) * IRO[159].m1)) +#define CSTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[149].base + ((funcId) * IRO[149].m1)) +#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \ + (IRO[139].base + ((hcIndex) * IRO[139].m1) + ((sbId) * IRO[139].m2)) +#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \ + (IRO[138].base + (((hcIndex)>>2) * IRO[138].m1) + (((hcIndex)&3) \ + * IRO[138].m2) + ((sbId) * IRO[138].m3)) +#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) +#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ + (IRO[317].base + ((pfId) * IRO[317].m1)) +#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ + (IRO[318].base + ((pfId) * IRO[318].m1)) +#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ + (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) +#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ + (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) +#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ + (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) +#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ + (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) +#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ + (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) +#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ + (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2)) +#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ + (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2)) +#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ + (IRO[316].base + ((pfId) * IRO[316].m1)) +#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[308].base + ((pfId) * IRO[308].m1)) +#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[307].base + ((pfId) * IRO[307].m1)) +#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[306].base + ((pfId) * IRO[306].m1)) +#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[151].base + ((funcId) * IRO[151].m1)) +#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ + (IRO[142].base + ((pfId) * IRO[142].m1)) +#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \ + (IRO[143].base + ((pfId) * IRO[143].m1)) +#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \ + (IRO[141].base + ((pfId) * IRO[141].m1)) +#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size) +#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \ + (IRO[144].base + ((pfId) * IRO[144].m1)) +#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size) +#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \ + (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2)) +#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \ + (IRO[133].base + ((sbId) * IRO[133].m1)) +#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \ + (IRO[134].base + ((sbId) * IRO[134].m1)) +#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \ + (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2)) +#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \ + (IRO[132].base + ((sbId) * IRO[132].m1)) +#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size) +#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ + (IRO[137].base + ((sbId) * IRO[137].m1)) +#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size) +#define CSTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[150].base + ((funcId) * IRO[150].m1)) +#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base) +#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ + (IRO[203].base + ((pfId) * IRO[203].m1)) +#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) +#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[101].base + ((assertListEntry) * IRO[101].m1)) +#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ + (IRO[201].base + ((pfId) * IRO[201].m1)) +#define TSTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[103].base + ((funcId) * IRO[103].m1)) +#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ + (IRO[272].base + ((pfId) * IRO[272].m1)) +#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[271].base + ((pfId) * IRO[271].m1)) +#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[270].base + ((pfId) * IRO[270].m1)) +#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[269].base + ((pfId) * IRO[269].m1)) +#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ + (IRO[268].base + ((pfId) * IRO[268].m1)) +#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ + (IRO[278].base + ((pfId) * IRO[278].m1)) +#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ + (IRO[264].base + ((pfId) * IRO[264].m1)) +#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[265].base + ((pfId) * IRO[265].m1)) +#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[266].base + ((pfId) * IRO[266].m1)) +#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[267].base + ((pfId) * IRO[267].m1)) +#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ + (IRO[202].base + ((pfId) * IRO[202].m1)) +#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[105].base + ((funcId) * IRO[105].m1)) +#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ + (IRO[217].base + ((pfId) * IRO[217].m1)) +#define TSTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[104].base + ((funcId) * IRO[104].m1)) +#define USTORM_AGG_DATA_OFFSET (IRO[206].base) +#define USTORM_AGG_DATA_SIZE (IRO[206].size) +#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) +#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[176].base + ((assertListEntry) * IRO[176].m1)) +#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \ + (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * IRO[205].m2)) +#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ + (IRO[183].base + ((portId) * IRO[183].m1)) +#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ + (IRO[319].base + ((pfId) * IRO[319].m1)) +#define USTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[178].base + ((funcId) * IRO[178].m1)) +#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ + (IRO[283].base + ((pfId) * IRO[283].m1)) +#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ + (IRO[284].base + ((pfId) * IRO[284].m1)) +#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ + (IRO[288].base + ((pfId) * IRO[288].m1)) +#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ + (IRO[285].base + ((pfId) * IRO[285].m1)) +#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[281].base + ((pfId) * IRO[281].m1)) +#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[280].base + ((pfId) * IRO[280].m1)) +#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[279].base + ((pfId) * IRO[279].m1)) +#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ + (IRO[282].base + ((pfId) * IRO[282].m1)) +#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ + (IRO[286].base + ((pfId) * IRO[286].m1)) +#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ + (IRO[287].base + ((pfId) * IRO[287].m1)) +#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ + (IRO[182].base + ((pfId) * IRO[182].m1)) +#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[180].base + ((funcId) * IRO[180].m1)) +#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ + (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \ + IRO[209].m2)) +#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ + (IRO[210].base + ((qzoneId) * IRO[210].m1)) +#define USTORM_TPA_BTR_OFFSET (IRO[207].base) +#define USTORM_TPA_BTR_SIZE (IRO[207].size) +#define USTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[179].base + ((funcId) * IRO[179].m1)) +#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base) +#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base) +#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base) +#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[50].base + ((assertListEntry) * IRO[50].m1)) +#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \ + (IRO[43].base + ((portId) * IRO[43].m1)) +#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \ + (IRO[45].base + ((pfId) * IRO[45].m1)) +#define XSTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[47].base + ((funcId) * IRO[47].m1)) +#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ + (IRO[296].base + ((pfId) * IRO[296].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ + (IRO[299].base + ((pfId) * IRO[299].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ + (IRO[300].base + ((pfId) * IRO[300].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ + (IRO[301].base + ((pfId) * IRO[301].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ + (IRO[302].base + ((pfId) * IRO[302].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ + (IRO[303].base + ((pfId) * IRO[303].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ + (IRO[304].base + ((pfId) * IRO[304].m1)) +#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ + (IRO[305].base + ((pfId) * IRO[305].m1)) +#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[295].base + ((pfId) * IRO[295].m1)) +#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[294].base + ((pfId) * IRO[294].m1)) +#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[293].base + ((pfId) * IRO[293].m1)) +#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ + (IRO[298].base + ((pfId) * IRO[298].m1)) +#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ + (IRO[297].base + ((pfId) * IRO[297].m1)) +#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ + (IRO[292].base + ((pfId) * IRO[292].m1)) +#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ + (IRO[291].base + ((pfId) * IRO[291].m1)) +#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ + (IRO[290].base + ((pfId) * IRO[290].m1)) +#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ + (IRO[289].base + ((pfId) * IRO[289].m1)) +#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ + (IRO[44].base + ((pfId) * IRO[44].m1)) +#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[49].base + ((funcId) * IRO[49].m1)) +#define XSTORM_SPQ_DATA_OFFSET(funcId) \ + (IRO[32].base + ((funcId) * IRO[32].m1)) +#define XSTORM_SPQ_DATA_SIZE (IRO[32].size) +#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \ + (IRO[30].base + ((funcId) * IRO[30].m1)) +#define XSTORM_SPQ_PROD_OFFSET(funcId) \ + (IRO[31].base + ((funcId) * IRO[31].m1)) +#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ + (IRO[211].base + ((portId) * IRO[211].m1)) +#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ + (IRO[212].base + ((portId) * IRO[212].m1)) +#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ + (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \ + IRO[214].m2)) +#define XSTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[48].base + ((funcId) * IRO[48].m1)) +#define COMMON_ASM_INVALID_ASSERT_OPCODE (IRO[7].base) + + +/* Ethernet Ring parameters */ +#define X_ETH_LOCAL_RING_SIZE 13 +#define FIRST_BD_IN_PKT 0 +#define PARSE_BD_INDEX 1 +#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8)) + +/* Rx ring params */ +#define U_ETH_LOCAL_BD_RING_SIZE 8 +#define U_ETH_SGL_SIZE 8 + /* The fw will padd the buffer with this value, so the IP header \ + will be align to 4 Byte */ +#define IP_HEADER_ALIGNMENT_PADDING 2 + +#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8)) +#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8)) + +#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1) +#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1) + +#define U_ETH_UNDEFINED_Q 0xFF + +#define T_ETH_INDIRECTION_TABLE_SIZE 128 +#define T_ETH_RSS_KEY 10 +#define ETH_NUM_OF_RSS_ENGINES_E2 72 + +#define FILTER_RULES_COUNT 16 +#define MULTICAST_RULES_COUNT 16 +#define CLASSIFY_RULES_COUNT 16 + +/*The CRC32 seed, that is used for the hash(reduction) multicast address */ +#define ETH_CRC32_HASH_SEED 0x00000000 + +#define ETH_CRC32_HASH_BIT_SIZE (8) +#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1) + +/* Maximal L2 clients supported */ +#define ETH_MAX_RX_CLIENTS_E1H 28 +#define ETH_MAX_RX_CLIENTS_E2 152 + +/* Maximal statistics client Ids */ +#define MAX_STAT_COUNTER_ID_E1H 56 +#define MAX_STAT_COUNTER_ID_E2 140 + +#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */ +#define MAX_MAC_CREDIT_E2 272 /* Per Path */ +#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */ +#define MAX_VLAN_CREDIT_E2 272 /* Per Path */ + + +/* Maximal aggregation queues supported */ +#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64 + + +#define ETH_NUM_OF_MCAST_BINS 256 +#define ETH_NUM_OF_MCAST_ENGINES_E2 72 + +#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3) +#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \ + (ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA) + +#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0 + + +/* This file defines HSI constants common to all microcode flows */ + +/* offset in bits of protocol in the state context parameter */ +#define PROTOCOL_STATE_BIT_OFFSET 6 + +#define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) +#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) +#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) + +/* microcode fixed page page size 4K (chains and ring segments) */ +#define MC_PAGE_SIZE 4096 + +/* Number of indices per slow-path SB */ +#define HC_SP_SB_MAX_INDICES 16 /* The Maximum of all */ + +/* Number of indices per SB */ +#define HC_SB_MAX_INDICES_E1X 8 /* Multiple of 4 */ +#define HC_SB_MAX_INDICES_E2 8 /* Multiple of 4 */ + +/* Number of SB */ +#define HC_SB_MAX_SB_E1X 32 +#define HC_SB_MAX_SB_E2 136 /* include PF */ + +/* ID of slow path status block */ +#define HC_SP_SB_ID 0xde + +/* Num of State machines */ +#define HC_SB_MAX_SM 2 /* Fixed */ + +/* Num of dynamic indices */ +#define HC_SB_MAX_DYNAMIC_INDICES 4 /* 0..3 fixed */ + +/* max number of slow path commands per port */ +#define MAX_RAMRODS_PER_PORT 8 + + +/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ + +/* chip timers frequency constants */ +#define TIMERS_TICK_SIZE_CHIP (1e-3) + +/* used in toe: TsRecentAge, MaxRt, and temporarily RTT */ +#define TSEMI_CLK1_RESUL_CHIP (1e-3) + +/* temporarily used for RTT */ +#define XSEMI_CLK1_RESUL_CHIP (1e-3) + +/* used for Host Coallescing */ +#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6)) + +/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ + +#define XSTORM_IP_ID_ROLL_HALF 0x8000 +#define XSTORM_IP_ID_ROLL_ALL 0 + +/* assert list: number of entries */ +#define FW_LOG_LIST_SIZE 50 + +#define NUM_OF_SAFC_BITS 16 +#define MAX_COS_NUMBER 4 +#define MAX_TRAFFIC_TYPES 8 +#define MAX_PFC_PRIORITIES 8 + + /* used by array traffic_type_to_priority[] to mark traffic type \ + that is not mapped to priority*/ +#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF + +/* Event Ring definitions */ +#define C_ERES_PER_PAGE \ + (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem))) +#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1) + +/* number of statistic command */ +#define STATS_QUERY_CMD_COUNT 16 + +/* niv list table size */ +#define AFEX_LIST_TABLE_SIZE 4096 + +/* invalid VNIC Id. used in VNIC classification */ +#define INVALID_VNIC_ID 0xFF + +/* used for indicating an undefined RAM offset in the IRO arrays */ +#define UNDEF_IRO 0x80000000 + +/* used for defining the amount of FCoE tasks supported for PF */ +#define MAX_FCOE_FUNCS_PER_ENGINE 2 +#define MAX_NUM_FCOE_TASKS_PER_ENGINE \ + 4096 /*Each port can have at max 1 function*/ + + +#endif /* ECORE_FW_DEFS_H */ diff --git a/drivers/net/bnx2x/ecore_hsi.h b/drivers/net/bnx2x/ecore_hsi.h new file mode 100644 index 00000000..5808e1ae --- /dev/null +++ b/drivers/net/bnx2x/ecore_hsi.h @@ -0,0 +1,6330 @@ +/*- + * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2014-2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#ifndef ECORE_HSI_H +#define ECORE_HSI_H + +#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e + +struct license_key { + uint32_t reserved[6]; + + uint32_t max_iscsi_conn; +#define LICENSE_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF +#define LICENSE_MAX_ISCSI_TRGT_CONN_SHIFT 0 +#define LICENSE_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000 +#define LICENSE_MAX_ISCSI_INIT_CONN_SHIFT 16 + + uint32_t reserved_a; + + uint32_t max_fcoe_conn; +#define LICENSE_MAX_FCOE_TRGT_CONN_MASK 0xFFFF +#define LICENSE_MAX_FCOE_TRGT_CONN_SHIFT 0 +#define LICENSE_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000 +#define LICENSE_MAX_FCOE_INIT_CONN_SHIFT 16 + + uint32_t reserved_b[4]; +}; + +typedef struct license_key license_key_t; + + +/**************************************************************************** + * Shared HW configuration * + ****************************************************************************/ +#define PIN_CFG_NA 0x00000000 +#define PIN_CFG_GPIO0_P0 0x00000001 +#define PIN_CFG_GPIO1_P0 0x00000002 +#define PIN_CFG_GPIO2_P0 0x00000003 +#define PIN_CFG_GPIO3_P0 0x00000004 +#define PIN_CFG_GPIO0_P1 0x00000005 +#define PIN_CFG_GPIO1_P1 0x00000006 +#define PIN_CFG_GPIO2_P1 0x00000007 +#define PIN_CFG_GPIO3_P1 0x00000008 +#define PIN_CFG_EPIO0 0x00000009 +#define PIN_CFG_EPIO1 0x0000000a +#define PIN_CFG_EPIO2 0x0000000b +#define PIN_CFG_EPIO3 0x0000000c +#define PIN_CFG_EPIO4 0x0000000d +#define PIN_CFG_EPIO5 0x0000000e +#define PIN_CFG_EPIO6 0x0000000f +#define PIN_CFG_EPIO7 0x00000010 +#define PIN_CFG_EPIO8 0x00000011 +#define PIN_CFG_EPIO9 0x00000012 +#define PIN_CFG_EPIO10 0x00000013 +#define PIN_CFG_EPIO11 0x00000014 +#define PIN_CFG_EPIO12 0x00000015 +#define PIN_CFG_EPIO13 0x00000016 +#define PIN_CFG_EPIO14 0x00000017 +#define PIN_CFG_EPIO15 0x00000018 +#define PIN_CFG_EPIO16 0x00000019 +#define PIN_CFG_EPIO17 0x0000001a +#define PIN_CFG_EPIO18 0x0000001b +#define PIN_CFG_EPIO19 0x0000001c +#define PIN_CFG_EPIO20 0x0000001d +#define PIN_CFG_EPIO21 0x0000001e +#define PIN_CFG_EPIO22 0x0000001f +#define PIN_CFG_EPIO23 0x00000020 +#define PIN_CFG_EPIO24 0x00000021 +#define PIN_CFG_EPIO25 0x00000022 +#define PIN_CFG_EPIO26 0x00000023 +#define PIN_CFG_EPIO27 0x00000024 +#define PIN_CFG_EPIO28 0x00000025 +#define PIN_CFG_EPIO29 0x00000026 +#define PIN_CFG_EPIO30 0x00000027 +#define PIN_CFG_EPIO31 0x00000028 + +/* EPIO definition */ +#define EPIO_CFG_NA 0x00000000 +#define EPIO_CFG_EPIO0 0x00000001 +#define EPIO_CFG_EPIO1 0x00000002 +#define EPIO_CFG_EPIO2 0x00000003 +#define EPIO_CFG_EPIO3 0x00000004 +#define EPIO_CFG_EPIO4 0x00000005 +#define EPIO_CFG_EPIO5 0x00000006 +#define EPIO_CFG_EPIO6 0x00000007 +#define EPIO_CFG_EPIO7 0x00000008 +#define EPIO_CFG_EPIO8 0x00000009 +#define EPIO_CFG_EPIO9 0x0000000a +#define EPIO_CFG_EPIO10 0x0000000b +#define EPIO_CFG_EPIO11 0x0000000c +#define EPIO_CFG_EPIO12 0x0000000d +#define EPIO_CFG_EPIO13 0x0000000e +#define EPIO_CFG_EPIO14 0x0000000f +#define EPIO_CFG_EPIO15 0x00000010 +#define EPIO_CFG_EPIO16 0x00000011 +#define EPIO_CFG_EPIO17 0x00000012 +#define EPIO_CFG_EPIO18 0x00000013 +#define EPIO_CFG_EPIO19 0x00000014 +#define EPIO_CFG_EPIO20 0x00000015 +#define EPIO_CFG_EPIO21 0x00000016 +#define EPIO_CFG_EPIO22 0x00000017 +#define EPIO_CFG_EPIO23 0x00000018 +#define EPIO_CFG_EPIO24 0x00000019 +#define EPIO_CFG_EPIO25 0x0000001a +#define EPIO_CFG_EPIO26 0x0000001b +#define EPIO_CFG_EPIO27 0x0000001c +#define EPIO_CFG_EPIO28 0x0000001d +#define EPIO_CFG_EPIO29 0x0000001e +#define EPIO_CFG_EPIO30 0x0000001f +#define EPIO_CFG_EPIO31 0x00000020 + +struct mac_addr { + uint32_t upper; + uint32_t lower; +}; + + +struct shared_hw_cfg { /* NVRAM Offset */ + /* Up to 16 bytes of NULL-terminated string */ + uint8_t part_num[16]; /* 0x104 */ + + uint32_t config; /* 0x114 */ + #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001 + #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0 + #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000 + #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001 + + #define SHARED_HW_CFG_PORT_SWAP 0x00000004 + + #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008 + + #define SHARED_HW_CFG_PCIE_GEN3_DISABLED 0x00000000 + #define SHARED_HW_CFG_PCIE_GEN3_ENABLED 0x00000010 + + #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700 + #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8 + /* Whatever MFW found in NVM + (if multiple found, priority order is: NC-SI, UMP, IPMI) */ + #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000 + #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100 + #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200 + #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300 + /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI + (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400 + /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI + (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500 + /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP + (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600 + + /* Adjust the PCIe G2 Tx amplitude driver for all Tx lanes. For + backwards compatibility, value of 0 is disabling this feature. + That means that though 0 is a valid value, it cannot be + configured. */ + #define SHARED_HW_CFG_G2_TX_DRIVE_MASK 0x0000F000 + #define SHARED_HW_CFG_G2_TX_DRIVE_SHIFT 12 + + #define SHARED_HW_CFG_LED_MODE_MASK 0x000F0000 + #define SHARED_HW_CFG_LED_MODE_SHIFT 16 + #define SHARED_HW_CFG_LED_MAC1 0x00000000 + #define SHARED_HW_CFG_LED_PHY1 0x00010000 + #define SHARED_HW_CFG_LED_PHY2 0x00020000 + #define SHARED_HW_CFG_LED_PHY3 0x00030000 + #define SHARED_HW_CFG_LED_MAC2 0x00040000 + #define SHARED_HW_CFG_LED_PHY4 0x00050000 + #define SHARED_HW_CFG_LED_PHY5 0x00060000 + #define SHARED_HW_CFG_LED_PHY6 0x00070000 + #define SHARED_HW_CFG_LED_MAC3 0x00080000 + #define SHARED_HW_CFG_LED_PHY7 0x00090000 + #define SHARED_HW_CFG_LED_PHY9 0x000a0000 + #define SHARED_HW_CFG_LED_PHY11 0x000b0000 + #define SHARED_HW_CFG_LED_MAC4 0x000c0000 + #define SHARED_HW_CFG_LED_PHY8 0x000d0000 + #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 + #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000 + + #define SHARED_HW_CFG_SRIOV_MASK 0x40000000 + #define SHARED_HW_CFG_SRIOV_DISABLED 0x00000000 + #define SHARED_HW_CFG_SRIOV_ENABLED 0x40000000 + + #define SHARED_HW_CFG_ATC_MASK 0x80000000 + #define SHARED_HW_CFG_ATC_DISABLED 0x00000000 + #define SHARED_HW_CFG_ATC_ENABLED 0x80000000 + + uint32_t config2; /* 0x118 */ + + #define SHARED_HW_CFG_PCIE_GEN2_MASK 0x00000100 + #define SHARED_HW_CFG_PCIE_GEN2_SHIFT 8 + #define SHARED_HW_CFG_PCIE_GEN2_DISABLED 0x00000000 + #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100 + + #define SHARED_HW_CFG_SMBUS_TIMING_MASK 0x00001000 + #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000 + #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000 + + #define SHARED_HW_CFG_HIDE_PORT1 0x00002000 + + + /* Output low when PERST is asserted */ + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK 0x00008000 + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED 0x00000000 + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED 0x00008000 + + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK 0x00070000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT 16 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW 0x00000000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB 0x00010000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB 0x00020000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB 0x00030000 + + /* The fan failure mechanism is usually related to the PHY type + since the power consumption of the board is determined by the PHY. + Currently, fan is required for most designs with SFX7101, BNX2X8727 + and BNX2X8481. If a fan is not required for a board which uses one + of those PHYs, this field should be set to "Disabled". If a fan is + required for a different PHY type, this option should be set to + "Enabled". The fan failure indication is expected on SPIO5 */ + #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000 + #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19 + #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000 + #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000 + + /* ASPM Power Management support */ + #define SHARED_HW_CFG_ASPM_SUPPORT_MASK 0x00600000 + #define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT 21 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED 0x00000000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED 0x00200000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED 0x00400000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED 0x00600000 + + /* The value of PM_TL_IGNORE_REQS (bit0) in PCI register + tl_control_0 (register 0x2800) */ + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK 0x00800000 + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED 0x00000000 + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED 0x00800000 + + + /* Set the MDC/MDIO access for the first external phy */ + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000 + + /* Set the MDC/MDIO access for the second external phy */ + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000 + + /* Max number of PF MSIX vectors */ + uint32_t config_3; /* 0x11C */ + #define SHARED_HW_CFG_PF_MSIX_MAX_NUM_MASK 0x0000007F + #define SHARED_HW_CFG_PF_MSIX_MAX_NUM_SHIFT 0 + + uint32_t ump_nc_si_config; /* 0x120 */ + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002 + + /* Reserved bits: 226-230 */ + + /* The output pin template BSC_SEL which selects the I2C for this + port in the I2C Mux */ + uint32_t board; /* 0x124 */ + #define SHARED_HW_CFG_E3_I2C_MUX0_MASK 0x0000003F + #define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT 0 + + #define SHARED_HW_CFG_E3_I2C_MUX1_MASK 0x00000FC0 + #define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT 6 + /* Use the PIN_CFG_XXX defines on top */ + #define SHARED_HW_CFG_BOARD_REV_MASK 0x00FF0000 + #define SHARED_HW_CFG_BOARD_REV_SHIFT 16 + + #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0F000000 + #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24 + + #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xF0000000 + #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28 + + uint32_t wc_lane_config; /* 0x128 */ + #define SHARED_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF + #define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT 0 + #define SHARED_HW_CFG_LANE_SWAP_CFG_32103210 0x00001b1b + #define SHARED_HW_CFG_LANE_SWAP_CFG_32100123 0x00001be4 + #define SHARED_HW_CFG_LANE_SWAP_CFG_31200213 0x000027d8 + #define SHARED_HW_CFG_LANE_SWAP_CFG_02133120 0x0000d827 + #define SHARED_HW_CFG_LANE_SWAP_CFG_01233210 0x0000e41b + #define SHARED_HW_CFG_LANE_SWAP_CFG_01230123 0x0000e4e4 + #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF + #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 + #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00 + #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 + + /* TX lane Polarity swap */ + #define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED 0x00010000 + #define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED 0x00020000 + #define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED 0x00040000 + #define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED 0x00080000 + /* TX lane Polarity swap */ + #define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED 0x00100000 + #define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED 0x00200000 + #define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED 0x00400000 + #define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED 0x00800000 + + /* Selects the port layout of the board */ + #define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK 0x0F000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT 24 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01 0x00000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10 0x01000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123 0x02000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032 0x03000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301 0x04000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210 0x05000000 +}; + + +/**************************************************************************** + * Port HW configuration * + ****************************************************************************/ +struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ + + uint32_t pci_id; + #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000FFFF + #define PORT_HW_CFG_PCI_DEVICE_ID_SHIFT 0 + + #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xFFFF0000 + #define PORT_HW_CFG_PCI_VENDOR_ID_SHIFT 16 + + uint32_t pci_sub_id; + #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000FFFF + #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_SHIFT 0 + + #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xFFFF0000 + #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_SHIFT 16 + + uint32_t power_dissipated; + #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000FF + #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0 + #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000FF00 + #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8 + #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00FF0000 + #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16 + #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xFF000000 + #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24 + + uint32_t power_consumed; + #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000FF + #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0 + #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000FF00 + #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8 + #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00FF0000 + #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16 + #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xFF000000 + #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24 + + uint32_t mac_upper; + uint32_t mac_lower; /* 0x140 */ + #define PORT_HW_CFG_UPPERMAC_MASK 0x0000FFFF + #define PORT_HW_CFG_UPPERMAC_SHIFT 0 + + + uint32_t iscsi_mac_upper; /* Upper 16 bits are always zeroes */ + uint32_t iscsi_mac_lower; + + uint32_t rdma_mac_upper; /* Upper 16 bits are always zeroes */ + uint32_t rdma_mac_lower; + + uint32_t serdes_config; + #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000FFFF + #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0 + + #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xFFFF0000 + #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 + + + /* Default values: 2P-64, 4P-32 */ + uint32_t reserved; + + uint32_t vf_config; /* 0x15C */ + #define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK 0xFFFF0000 + #define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT 16 + + uint32_t mf_pci_id; /* 0x160 */ + #define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK 0x0000FFFF + #define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT 0 + + /* Controls the TX laser of the SFP+ module */ + uint32_t sfp_ctrl; /* 0x164 */ + #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF + #define PORT_HW_CFG_TX_LASER_SHIFT 0 + #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000 + #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001 + #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002 + #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003 + #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004 + + /* Controls the fault module LED of the SFP+ */ + #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00 + #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300 + #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400 + + /* The output pin TX_DIS that controls the TX laser of the SFP+ + module. Use the PIN_CFG_XXX defines on top */ + uint32_t e3_sfp_ctrl; /* 0x168 */ + #define PORT_HW_CFG_E3_TX_LASER_MASK 0x000000FF + #define PORT_HW_CFG_E3_TX_LASER_SHIFT 0 + + /* The output pin for SFPP_TYPE which turns on the Fault module LED */ + #define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK 0x0000FF00 + #define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT 8 + + /* The input pin MOD_ABS that indicates whether SFP+ module is + present or not. Use the PIN_CFG_XXX defines on top */ + #define PORT_HW_CFG_E3_MOD_ABS_MASK 0x00FF0000 + #define PORT_HW_CFG_E3_MOD_ABS_SHIFT 16 + + /* The output pin PWRDIS_SFP_X which disable the power of the SFP+ + module. Use the PIN_CFG_XXX defines on top */ + #define PORT_HW_CFG_E3_PWR_DIS_MASK 0xFF000000 + #define PORT_HW_CFG_E3_PWR_DIS_SHIFT 24 + + /* + * The input pin which signals module transmit fault. Use the + * PIN_CFG_XXX defines on top + */ + uint32_t e3_cmn_pin_cfg; /* 0x16C */ + #define PORT_HW_CFG_E3_TX_FAULT_MASK 0x000000FF + #define PORT_HW_CFG_E3_TX_FAULT_SHIFT 0 + + /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on + top */ + #define PORT_HW_CFG_E3_PHY_RESET_MASK 0x0000FF00 + #define PORT_HW_CFG_E3_PHY_RESET_SHIFT 8 + + /* + * The output pin which powers down the PHY. Use the PIN_CFG_XXX + * defines on top + */ + #define PORT_HW_CFG_E3_PWR_DOWN_MASK 0x00FF0000 + #define PORT_HW_CFG_E3_PWR_DOWN_SHIFT 16 + + /* The output pin values BSC_SEL which selects the I2C for this port + in the I2C Mux */ + #define PORT_HW_CFG_E3_I2C_MUX0_MASK 0x01000000 + #define PORT_HW_CFG_E3_I2C_MUX1_MASK 0x02000000 + + + /* + * The input pin I_FAULT which indicate over-current has occurred. + * Use the PIN_CFG_XXX defines on top + */ + uint32_t e3_cmn_pin_cfg1; /* 0x170 */ + #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF + #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0 + + /* pause on host ring */ + uint32_t generic_features; /* 0x174 */ + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_MASK 0x00000001 + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_SHIFT 0 + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000 + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001 + + /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2 + * LOM recommended and tested value is 0xBEB2. Using a different + * value means using a value not tested by BRCM + */ + uint32_t sfi_tap_values; /* 0x178 */ + #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF + #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0 + + /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested + * value is 0x2. LOM recommended and tested value is 0x2. Using a + * different value means using a value not tested by BRCM + */ + #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000 + #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16 + + uint32_t reserved0[5]; /* 0x17c */ + + uint32_t aeu_int_mask; /* 0x190 */ + + uint32_t media_type; /* 0x194 */ + #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF + #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0 + + #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00 + #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8 + + #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000 + #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16 + + /* 4 times 16 bits for all 4 lanes. In case external PHY is present + (not direct mode), those values will not take effect on the 4 XGXS + lanes. For some external PHYs (such as 8706 and 8726) the values + will be used to configure the external PHY in those cases, not + all 4 values are needed. */ + uint16_t xgxs_config_rx[4]; /* 0x198 */ + uint16_t xgxs_config_tx[4]; /* 0x1A0 */ + + + /* For storing FCOE mac on shared memory */ + uint32_t fcoe_fip_mac_upper; + #define PORT_HW_CFG_FCOE_UPPERMAC_MASK 0x0000ffff + #define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT 0 + uint32_t fcoe_fip_mac_lower; + + uint32_t fcoe_wwn_port_name_upper; + uint32_t fcoe_wwn_port_name_lower; + + uint32_t fcoe_wwn_node_name_upper; + uint32_t fcoe_wwn_node_name_lower; + + /* wwpn for npiv enabled */ + uint32_t wwpn_for_npiv_config; /* 0x1C0 */ + #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_MASK 0x00000001 + #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_SHIFT 0 + #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_DISABLED 0x00000000 + #define PORT_HW_CFG_WWPN_FOR_NPIV_ENABLED_ENABLED 0x00000001 + + /* wwpn for npiv valid addresses */ + uint32_t wwpn_for_npiv_valid_addresses; /* 0x1C4 */ + #define PORT_HW_CFG_WWPN_FOR_NPIV_ADDRESS_BITMAP_MASK 0x0000FFFF + #define PORT_HW_CFG_WWPN_FOR_NPIV_ADDRESS_BITMAP_SHIFT 0 + + struct mac_addr wwpn_for_niv_macs[16]; + + /* Reserved bits: 2272-2336 For storing FCOE mac on shared memory */ + uint32_t Reserved1[14]; + + uint32_t pf_allocation; /* 0x280 */ + /* number of vfs per PF, if 0 - sriov disabled */ + #define PORT_HW_CFG_NUMBER_OF_VFS_MASK 0x000000FF + #define PORT_HW_CFG_NUMBER_OF_VFS_SHIFT 0 + + /* Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default), + 84833 only */ + uint32_t xgbt_phy_cfg; /* 0x284 */ + #define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK 0x000000FF + #define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT 0 + + uint32_t default_cfg; /* 0x288 */ + #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003 + #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0 + #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001 + #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002 + #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003 + + #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C + #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2 + #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004 + #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008 + #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c + + #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030 + #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4 + #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010 + #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020 + #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030 + + #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0 + #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6 + #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040 + #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080 + #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0 + + /* When KR link is required to be set to force which is not + KR-compliant, this parameter determine what is the trigger for it. + When GPIO is selected, low input will force the speed. Currently + default speed is 1G. In the future, it may be widen to select the + forced speed in with another parameter. Note when force-1G is + enabled, it override option 56: Link Speed option. */ + #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00 + #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8 + #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800 + #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900 + /* Enable to determine with which GPIO to reset the external phy */ + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000 + + /* Enable BAM on KR */ + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 + + /* Enable Common Mode Sense */ + #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000 + #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21 + #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 + #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 + + /* Determine the Serdes electrical interface */ + #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000 + #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24 + #define PORT_HW_CFG_NET_SERDES_IF_SGMII 0x00000000 + #define PORT_HW_CFG_NET_SERDES_IF_XFI 0x01000000 + #define PORT_HW_CFG_NET_SERDES_IF_SFI 0x02000000 + #define PORT_HW_CFG_NET_SERDES_IF_KR 0x03000000 + #define PORT_HW_CFG_NET_SERDES_IF_DXGXS 0x04000000 + #define PORT_HW_CFG_NET_SERDES_IF_KR2 0x05000000 + + /* SFP+ main TAP and post TAP volumes */ + #define PORT_HW_CFG_TAP_LEVELS_MASK 0x70000000 + #define PORT_HW_CFG_TAP_LEVELS_SHIFT 28 + #define PORT_HW_CFG_TAP_LEVELS_POST_15_MAIN_43 0x00000000 + #define PORT_HW_CFG_TAP_LEVELS_POST_14_MAIN_44 0x10000000 + #define PORT_HW_CFG_TAP_LEVELS_POST_13_MAIN_45 0x20000000 + #define PORT_HW_CFG_TAP_LEVELS_POST_12_MAIN_46 0x30000000 + #define PORT_HW_CFG_TAP_LEVELS_POST_11_MAIN_47 0x40000000 + #define PORT_HW_CFG_TAP_LEVELS_POST_10_MAIN_48 0x50000000 + + uint32_t speed_capability_mask2; /* 0x28C */ + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_HALF 0x00000002 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_HALF 0x00000004 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_5G 0x00000020 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G 0x00000080 + + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_HALF 0x00020000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_HALF 0x00040000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_5G 0x00200000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G 0x00800000 + + + /* In the case where two media types (e.g. copper and fiber) are + present and electrically active at the same time, PHY Selection + will determine which of the two PHYs will be designated as the + Active PHY and used for a connection to the network. */ + uint32_t multi_phy_config; /* 0x290 */ + #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007 + #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0 + #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000 + #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001 + #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002 + #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003 + #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004 + + /* When enabled, all second phy nvram parameters will be swapped + with the first phy parameters */ + #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008 + #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3 + #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000 + #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008 + + + /* Address of the second external phy */ + uint32_t external_phy_config2; /* 0x294 */ + #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF + #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0 + + /* The second XGXS external PHY type */ + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8071 0x00000100 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8072 0x00000200 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8073 0x00000300 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8705 0x00000400 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8706 0x00000500 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8726 0x00000600 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8481 0x00000700 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8727 0x00000900 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8727_NOC 0x00000a00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X84823 0x00000b00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54640 0x00000c00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X84833 0x00000d00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54618SE 0x00000e00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X8722 0x00000f00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X54616 0x00001000 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BNX2X84834 0x00001100 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 + + + /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as + 8706, 8726 and 8727) not all 4 values are needed. */ + uint16_t xgxs_config2_rx[4]; /* 0x296 */ + uint16_t xgxs_config2_tx[4]; /* 0x2A0 */ + + uint32_t lane_config; + #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF + #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0 + /* AN and forced */ + #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4 + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 + #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF + #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 + #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00 + #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 + #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000C000 + #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14 + + /* Indicate whether to swap the external phy polarity */ + #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000 + #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000 + #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000 + + + uint32_t external_phy_config; + #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000FF + #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0 + + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000FF00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8071 0x00000100 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8072 0x00000200 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073 0x00000300 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8705 0x00000400 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8706 0x00000500 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726 0x00000600 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8481 0x00000700 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727 0x00000900 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727_NOC 0x00000a00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823 0x00000b00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54640 0x00000c00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833 0x00000d00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE 0x00000e00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722 0x00000f00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54616 0x00001000 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834 0x00001100 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 + + #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00FF0000 + #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16 + + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xFF000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BNX2X5482 0x01000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000 + + uint32_t speed_capability_mask; + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000FFFF + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G 0x00000080 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000 + + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xFFFF0000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G 0x00800000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000 + + /* A place to hold the original MAC address as a backup */ + uint32_t backup_mac_upper; /* 0x2B4 */ + uint32_t backup_mac_lower; /* 0x2B8 */ + +}; + + +/**************************************************************************** + * Shared Feature configuration * + ****************************************************************************/ +struct shared_feat_cfg { /* NVRAM Offset */ + + uint32_t config; /* 0x450 */ + #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 + + /* Use NVRAM values instead of HW default values */ + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \ + 0x00000002 + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \ + 0x00000000 + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \ + 0x00000002 + + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK 0x00000008 + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO 0x00000000 + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM 0x00000008 + + #define SHARED_FEAT_CFG_NCSI_ID_MASK 0x00000030 + #define SHARED_FEAT_CFG_NCSI_ID_SHIFT 4 + + /* Override the OTP back to single function mode. When using GPIO, + high means only SF, 0 is according to CLP configuration */ + #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400 + + /* Act as if the FCoE license is invalid */ + #define SHARED_FEAT_CFG_PREVENT_FCOE 0x00001000 + + /* Force FLR capability to all ports */ + #define SHARED_FEAT_CFG_FORCE_FLR_CAPABILITY 0x00002000 + + /* Act as if the iSCSI license is invalid */ + #define SHARED_FEAT_CFG_PREVENT_ISCSI_MASK 0x00004000 + #define SHARED_FEAT_CFG_PREVENT_ISCSI_SHIFT 14 + #define SHARED_FEAT_CFG_PREVENT_ISCSI_DISABLED 0x00000000 + #define SHARED_FEAT_CFG_PREVENT_ISCSI_ENABLED 0x00004000 + + /* The interval in seconds between sending LLDP packets. Set to zero + to disable the feature */ + #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK 0x00FF0000 + #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT 16 + + /* The assigned device type ID for LLDP usage */ + #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK 0xFF000000 + #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT 24 + +}; + + +/**************************************************************************** + * Port Feature configuration * + ****************************************************************************/ +struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ + + uint32_t config; + #define PORT_FEAT_CFG_BAR1_SIZE_MASK 0x0000000F + #define PORT_FEAT_CFG_BAR1_SIZE_SHIFT 0 + #define PORT_FEAT_CFG_BAR1_SIZE_DISABLED 0x00000000 + #define PORT_FEAT_CFG_BAR1_SIZE_64K 0x00000001 + #define PORT_FEAT_CFG_BAR1_SIZE_128K 0x00000002 + #define PORT_FEAT_CFG_BAR1_SIZE_256K 0x00000003 + #define PORT_FEAT_CFG_BAR1_SIZE_512K 0x00000004 + #define PORT_FEAT_CFG_BAR1_SIZE_1M 0x00000005 + #define PORT_FEAT_CFG_BAR1_SIZE_2M 0x00000006 + #define PORT_FEAT_CFG_BAR1_SIZE_4M 0x00000007 + #define PORT_FEAT_CFG_BAR1_SIZE_8M 0x00000008 + #define PORT_FEAT_CFG_BAR1_SIZE_16M 0x00000009 + #define PORT_FEAT_CFG_BAR1_SIZE_32M 0x0000000a + #define PORT_FEAT_CFG_BAR1_SIZE_64M 0x0000000b + #define PORT_FEAT_CFG_BAR1_SIZE_128M 0x0000000c + #define PORT_FEAT_CFG_BAR1_SIZE_256M 0x0000000d + #define PORT_FEAT_CFG_BAR1_SIZE_512M 0x0000000e + #define PORT_FEAT_CFG_BAR1_SIZE_1G 0x0000000f + #define PORT_FEAT_CFG_BAR2_SIZE_MASK 0x000000F0 + #define PORT_FEAT_CFG_BAR2_SIZE_SHIFT 4 + #define PORT_FEAT_CFG_BAR2_SIZE_DISABLED 0x00000000 + #define PORT_FEAT_CFG_BAR2_SIZE_64K 0x00000010 + #define PORT_FEAT_CFG_BAR2_SIZE_128K 0x00000020 + #define PORT_FEAT_CFG_BAR2_SIZE_256K 0x00000030 + #define PORT_FEAT_CFG_BAR2_SIZE_512K 0x00000040 + #define PORT_FEAT_CFG_BAR2_SIZE_1M 0x00000050 + #define PORT_FEAT_CFG_BAR2_SIZE_2M 0x00000060 + #define PORT_FEAT_CFG_BAR2_SIZE_4M 0x00000070 + #define PORT_FEAT_CFG_BAR2_SIZE_8M 0x00000080 + #define PORT_FEAT_CFG_BAR2_SIZE_16M 0x00000090 + #define PORT_FEAT_CFG_BAR2_SIZE_32M 0x000000a0 + #define PORT_FEAT_CFG_BAR2_SIZE_64M 0x000000b0 + #define PORT_FEAT_CFG_BAR2_SIZE_128M 0x000000c0 + #define PORT_FEAT_CFG_BAR2_SIZE_256M 0x000000d0 + #define PORT_FEAT_CFG_BAR2_SIZE_512M 0x000000e0 + #define PORT_FEAT_CFG_BAR2_SIZE_1G 0x000000f0 + + #define PORT_FEAT_CFG_DCBX_MASK 0x00000100 + #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000 + #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100 + + #define PORT_FEAT_CFG_AUTOGREEEN_MASK 0x00000200 + #define PORT_FEAT_CFG_AUTOGREEEN_SHIFT 9 + #define PORT_FEAT_CFG_AUTOGREEEN_DISABLED 0x00000000 + #define PORT_FEAT_CFG_AUTOGREEEN_ENABLED 0x00000200 + + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK 0x00000C00 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_SHIFT 10 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_DEFAULT 0x00000000 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE 0x00000400 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI 0x00000800 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_BOTH 0x00000c00 + + #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000 + #define PORT_FEATURE_EN_SIZE_SHIFT 24 + #define PORT_FEATURE_WOL_ENABLED 0x01000000 + #define PORT_FEATURE_MBA_ENABLED 0x02000000 + #define PORT_FEATURE_MFW_ENABLED 0x04000000 + + /* Advertise expansion ROM even if MBA is disabled */ + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK 0x08000000 + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED 0x00000000 + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED 0x08000000 + + /* Check the optic vendor via i2c against a list of approved modules + in a separate nvram image */ + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xE0000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \ + 0x00000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \ + 0x20000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000 + + uint32_t wol_config; + /* Default is used when driver sets to "auto" mode */ + #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010 + + uint32_t mba_config; + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000007 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT 0x00000004 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE 0x00000007 + + #define PORT_FEATURE_MBA_BOOT_RETRY_MASK 0x00000038 + #define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT 3 + + #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400 + #define PORT_FEATURE_MBA_HOTKEY_MASK 0x00000800 + #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000 + #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800 + + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000FF000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000 + #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00F00000 + #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000 + #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3C000000 + #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26 + #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10M_HALF 0x04000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10M_FULL 0x08000000 + #define PORT_FEATURE_MBA_LINK_SPEED_100M_HALF 0x0c000000 + #define PORT_FEATURE_MBA_LINK_SPEED_100M_FULL 0x10000000 + #define PORT_FEATURE_MBA_LINK_SPEED_1G 0x14000000 + #define PORT_FEATURE_MBA_LINK_SPEED_2_5G 0x18000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10G 0x1c000000 + #define PORT_FEATURE_MBA_LINK_SPEED_20G 0x20000000 + + uint32_t Reserved0; /* 0x460 */ + + uint32_t mba_vlan_cfg; + #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000FFFF + #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0 + #define PORT_FEATURE_MBA_VLAN_EN 0x00010000 + + uint32_t Reserved1; + uint32_t smbus_config; + #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe + #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1 + + uint32_t vf_config; + #define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK 0x0000000F + #define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT 0 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED 0x00000000 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_4K 0x00000001 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_8K 0x00000002 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_16K 0x00000003 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_32K 0x00000004 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_64K 0x00000005 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_128K 0x00000006 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_256K 0x00000007 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_512K 0x00000008 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_1M 0x00000009 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_2M 0x0000000a + #define PORT_FEAT_CFG_VF_BAR2_SIZE_4M 0x0000000b + #define PORT_FEAT_CFG_VF_BAR2_SIZE_8M 0x0000000c + #define PORT_FEAT_CFG_VF_BAR2_SIZE_16M 0x0000000d + #define PORT_FEAT_CFG_VF_BAR2_SIZE_32M 0x0000000e + #define PORT_FEAT_CFG_VF_BAR2_SIZE_64M 0x0000000f + + uint32_t link_config; /* Used as HW defaults for the driver */ + + #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700 + #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8 + #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000 + #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100 + #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200 + #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300 + #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400 + #define PORT_FEATURE_FLOW_CONTROL_SAFC_RX 0x00000500 + #define PORT_FEATURE_FLOW_CONTROL_SAFC_TX 0x00000600 + #define PORT_FEATURE_FLOW_CONTROL_SAFC_BOTH 0x00000700 + + #define PORT_FEATURE_LINK_SPEED_MASK 0x000F0000 + #define PORT_FEATURE_LINK_SPEED_SHIFT 16 + #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000 + #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000 + #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000 + #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000 + #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000 + #define PORT_FEATURE_LINK_SPEED_1G 0x00050000 + #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000 + #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000 + #define PORT_FEATURE_LINK_SPEED_20G 0x00080000 + + #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000 + #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24 + /* (forced) low speed switch (< 10G) */ + #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000 + /* (forced) high speed switch (>= 10G) */ + #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000 + #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000 + #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000 + + + /* The default for MCP link configuration, + uses the same defines as link_config */ + uint32_t mfw_wol_link_cfg; + + /* The default for the driver of the second external phy, + uses the same defines as link_config */ + uint32_t link_config2; /* 0x47C */ + + /* The default for MCP of the second external phy, + uses the same defines as link_config */ + uint32_t mfw_wol_link_cfg2; /* 0x480 */ + + + /* EEE power saving mode */ + uint32_t eee_power_mode; /* 0x484 */ + #define PORT_FEAT_CFG_EEE_POWER_MODE_MASK 0x000000FF + #define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT 0 + #define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED 0x00000000 + #define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED 0x00000001 + #define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE 0x00000002 + #define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY 0x00000003 + + + uint32_t Reserved2[16]; /* 0x488 */ +}; + +/**************************************************************************** + * Device Information * + ****************************************************************************/ +struct shm_dev_info { /* size */ + + uint32_t bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ + + struct shared_hw_cfg shared_hw_config; /* 40 */ + + struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */ + + struct shared_feat_cfg shared_feature_config; /* 4 */ + + struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */ + +}; + +struct extended_dev_info_shared_cfg { /* NVRAM OFFSET */ + + /* Threshold in celcius to start using the fan */ + uint32_t temperature_monitor1; /* 0x4000 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_THRESH_MASK 0x0000007F + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_THRESH_SHIFT 0 + + /* Threshold in celcius to shut down the board */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_THRESH_MASK 0x00007F00 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_THRESH_SHIFT 8 + + /* EPIO of fan temperature status */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_MASK 0x00FF0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_SHIFT 16 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_NA 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO0 0x00010000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO1 0x00020000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO2 0x00030000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO3 0x00040000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO4 0x00050000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO5 0x00060000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO6 0x00070000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO7 0x00080000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO8 0x00090000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO9 0x000a0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO10 0x000b0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO11 0x000c0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO12 0x000d0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO13 0x000e0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO14 0x000f0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO15 0x00100000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO16 0x00110000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO17 0x00120000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO18 0x00130000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO19 0x00140000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO20 0x00150000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO21 0x00160000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO22 0x00170000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO23 0x00180000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO24 0x00190000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO25 0x001a0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO26 0x001b0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO27 0x001c0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO28 0x001d0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO29 0x001e0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO30 0x001f0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_FAN_EPIO_EPIO31 0x00200000 + + /* EPIO of shut down temperature status */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_MASK 0xFF000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_SHIFT 24 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_NA 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO0 0x01000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO1 0x02000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO2 0x03000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO3 0x04000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO4 0x05000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO5 0x06000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO6 0x07000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO7 0x08000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO8 0x09000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO9 0x0a000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO10 0x0b000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO11 0x0c000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO12 0x0d000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO13 0x0e000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO14 0x0f000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO15 0x10000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO16 0x11000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO17 0x12000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO18 0x13000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO19 0x14000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO20 0x15000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO21 0x16000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO22 0x17000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO23 0x18000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO24 0x19000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO25 0x1a000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO26 0x1b000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO27 0x1c000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO28 0x1d000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO29 0x1e000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO30 0x1f000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SHUT_EPIO_EPIO31 0x20000000 + + + /* EPIO of shut down temperature status */ + uint32_t temperature_monitor2; /* 0x4004 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_PERIOD_MASK 0x0000FFFF + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_PERIOD_SHIFT 0 + + + /* MFW flavor to be used */ + uint32_t mfw_cfg; /* 0x4008 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_MASK 0x000000FF + #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_NA 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_MFW_FLAVOR_A 0x00000001 + + /* Should NIC data query remain enabled upon last drv unload */ + #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_MASK 0x00000100 + #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_SHIFT 8 + #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_OCBB_EN_LAST_DRV_ENABLED 0x00000100 + + /* Hide DCBX feature in CCM/BACS menus */ + #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_MASK 0x00010000 + #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_SHIFT 16 + #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_HIDE_DCBX_FEAT_ENABLED 0x00010000 + + uint32_t smbus_config; /* 0x400C */ + #define EXTENDED_DEV_INFO_SHARED_CFG_SMBUS_ADDR_MASK 0x000000FF + #define EXTENDED_DEV_INFO_SHARED_CFG_SMBUS_ADDR_SHIFT 0 + + /* Switching regulator loop gain */ + uint32_t board_cfg; /* 0x4010 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_MASK 0x0000000F + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_HW_DEFAULT 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X2 0x00000008 + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X4 0x00000009 + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X8 0x0000000a + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X16 0x0000000b + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_DIV8 0x0000000c + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_DIV4 0x0000000d + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_DIV2 0x0000000e + #define EXTENDED_DEV_INFO_SHARED_CFG_LOOP_GAIN_X1 0x0000000f + + /* whether shadow swim feature is supported */ + #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_MASK 0x00000100 + #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_SHIFT 8 + #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_SHADOW_SWIM_ENABLED 0x00000100 + + /* whether to show/hide SRIOV menu in CCM */ + #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_SHOW_MENU_MASK 0x00000200 + #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_SHOW_MENU_SHIFT 9 + #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_SHOW_MENU 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_SRIOV_HIDE_MENU 0x00000200 + + /* Threshold in celcius for max continuous operation */ + uint32_t temperature_report; /* 0x4014 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_MCOT_MASK 0x0000007F + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_MCOT_SHIFT 0 + + /* Threshold in celcius for sensor caution */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SCT_MASK 0x00007F00 + #define EXTENDED_DEV_INFO_SHARED_CFG_TEMP_SCT_SHIFT 8 + + /* wwn node prefix to be used (unless value is 0) */ + uint32_t wwn_prefix; /* 0x4018 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX0_MASK 0x000000FF + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX0_SHIFT 0 + + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX1_MASK 0x0000FF00 + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_NODE_PREFIX1_SHIFT 8 + + /* wwn port prefix to be used (unless value is 0) */ + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX0_MASK 0x00FF0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX0_SHIFT 16 + + /* wwn port prefix to be used (unless value is 0) */ + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX1_MASK 0xFF000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_WWN_PORT_PREFIX1_SHIFT 24 + + /* General debug nvm cfg */ + uint32_t dbg_cfg_flags; /* 0x401C */ + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_MASK 0x000FFFFF + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_ENABLE 0x00000001 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_EN_SIGDET_FILTER 0x00000002 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_LP_TX_PRESET7 0x00000004 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_TX_ANA_DEFAULT 0x00000008 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_PLL_ANA_DEFAULT 0x00000010 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_FORCE_G1PLL_RETUNE 0x00000020 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_RX_ANA_DEFAULT 0x00000040 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_FORCE_SERDES_RX_CLK 0x00000080 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_DIS_RX_LP_EIEOS 0x00000100 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_FINALIZE_UCODE 0x00000200 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_HOLDOFF_REQ 0x00000400 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_RX_SIGDET_OVERRIDE 0x00000800 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_GP_PORG_UC_RESET 0x00001000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SUPPRESS_COMPEN_EVT 0x00002000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_ADJ_TXEQ_P0_P1 0x00004000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_G3_PLL_RETUNE 0x00008000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_SET_MAC_PHY_CTL8 0x00010000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_DIS_MAC_G3_FRM_ERR 0x00020000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_INFERRED_EI 0x00040000 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_GEN3_COMPLI_ENA 0x00080000 + + /* Debug signet rx threshold */ + uint32_t dbg_rx_sigdet_threshold; /* 0x4020 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_RX_SIGDET_MASK 0x00000007 + #define EXTENDED_DEV_INFO_SHARED_CFG_DBG_RX_SIGDET_SHIFT 0 + + /* Enable IFFE feature */ + uint32_t iffe_features; /* 0x4024 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_MASK 0x00000001 + #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_ENABLE_IFFE_ENABLED 0x00000001 + + /* Allowable port enablement (bitmask for ports 3-1) */ + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_PORT_MASK 0x0000000E + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_PORT_SHIFT 1 + + /* Allow iSCSI offload override */ + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_MASK 0x00000010 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_SHIFT 4 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_ISCSI_ENABLED 0x00000010 + + /* Allow FCoE offload override */ + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_MASK 0x00000020 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_SHIFT 5 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_FCOE_ENABLED 0x00000020 + + /* Tie to adaptor */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_MASK 0x00008000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_SHIFT 15 + #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TIE_ADAPTOR_ENABLED 0x00008000 + + /* Currently enabled port(s) (bitmask for ports 3-1) */ + uint32_t current_iffe_mask; /* 0x4028 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_CFG_MASK 0x0000000E + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_CFG_SHIFT 1 + + /* Current iSCSI offload */ + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_MASK 0x00000010 + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_SHIFT 4 + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_ISCSI_ENABLED 0x00000010 + + /* Current FCoE offload */ + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_MASK 0x00000020 + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_SHIFT 5 + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_DISABLED 0x00000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_CURRENT_FCOE_ENABLED 0x00000020 + + /* FW set this pin to "0" (assert) these signal if either of its MAC + * or PHY specific threshold values is exceeded. + * Values are standard GPIO/EPIO pins. + */ + uint32_t threshold_pin; /* 0x402C */ + #define EXTENDED_DEV_INFO_SHARED_CFG_TCONTROL_PIN_MASK 0x000000FF + #define EXTENDED_DEV_INFO_SHARED_CFG_TCONTROL_PIN_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_TWARNING_PIN_MASK 0x0000FF00 + #define EXTENDED_DEV_INFO_SHARED_CFG_TWARNING_PIN_SHIFT 8 + #define EXTENDED_DEV_INFO_SHARED_CFG_TCRITICAL_PIN_MASK 0x00FF0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_TCRITICAL_PIN_SHIFT 16 + + /* MAC die temperature threshold in Celsius. */ + uint32_t mac_threshold_val; /* 0x4030 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_MAC_THRESH_MASK 0x000000FF + #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_MAC_THRESH_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_MAC_THRESH_MASK 0x0000FF00 + #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_MAC_THRESH_SHIFT 8 + #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_MAC_THRESH_MASK 0x00FF0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_MAC_THRESH_SHIFT 16 + + /* PHY die temperature threshold in Celsius. */ + uint32_t phy_threshold_val; /* 0x4034 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_PHY_THRESH_MASK 0x000000FF + #define EXTENDED_DEV_INFO_SHARED_CFG_CONTROL_PHY_THRESH_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_PHY_THRESH_MASK 0x0000FF00 + #define EXTENDED_DEV_INFO_SHARED_CFG_WARNING_PHY_THRESH_SHIFT 8 + #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_PHY_THRESH_MASK 0x00FF0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_CRITICAL_PHY_THRESH_SHIFT 16 + + /* External pins to communicate with host. + * Values are standard GPIO/EPIO pins. + */ + uint32_t host_pin; /* 0x4038 */ + #define EXTENDED_DEV_INFO_SHARED_CFG_I2C_ISOLATE_MASK 0x000000FF + #define EXTENDED_DEV_INFO_SHARED_CFG_I2C_ISOLATE_SHIFT 0 + #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_FAULT_MASK 0x0000FF00 + #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_FAULT_SHIFT 8 + #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_VPD_UPDATE_MASK 0x00FF0000 + #define EXTENDED_DEV_INFO_SHARED_CFG_MEZZ_VPD_UPDATE_SHIFT 16 + #define EXTENDED_DEV_INFO_SHARED_CFG_VPD_CACHE_COMP_MASK 0xFF000000 + #define EXTENDED_DEV_INFO_SHARED_CFG_VPD_CACHE_COMP_SHIFT 24 +}; + + +#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) + #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition." +#endif + +#define FUNC_0 0 +#define FUNC_1 1 +#define FUNC_2 2 +#define FUNC_3 3 +#define FUNC_4 4 +#define FUNC_5 5 +#define FUNC_6 6 +#define FUNC_7 7 +#define E1H_FUNC_MAX 8 +#define E2_FUNC_MAX 4 /* per path */ + +#define VN_0 0 +#define VN_1 1 +#define VN_2 2 +#define VN_3 3 +#define E1VN_MAX 1 +#define E1HVN_MAX 4 + +#define E2_VF_MAX 64 /* HC_REG_VF_CONFIGURATION_SIZE */ +/* This value (in milliseconds) determines the frequency of the driver + * issuing the PULSE message code. The firmware monitors this periodic + * pulse to determine when to switch to an OS-absent mode. */ +#define DRV_PULSE_PERIOD_MS 250 + +/* This value (in milliseconds) determines how long the driver should + * wait for an acknowledgement from the firmware before timing out. Once + * the firmware has timed out, the driver will assume there is no firmware + * running and there won't be any firmware-driver synchronization during a + * driver reset. */ +#define FW_ACK_TIME_OUT_MS 5000 + +#define FW_ACK_POLL_TIME_MS 1 + +#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) + +#define MFW_TRACE_SIGNATURE 0x54524342 + +/**************************************************************************** + * Driver <-> FW Mailbox * + ****************************************************************************/ +struct drv_port_mb { + + uint32_t link_status; + /* Driver should update this field on any link change event */ + + #define LINK_STATUS_NONE (0<<0) + #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 + #define LINK_STATUS_LINK_UP 0x00000001 + #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E + #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD (11<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD (11<<1) + + #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020 + #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 + + #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 + #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080 + #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 + + #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 + #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 + #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800 + #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000 + #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000 + #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000 + #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000 + + #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000 + #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000 + + #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000 + #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000 + + #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 + #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18) + #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18) + #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18) + #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18) + + #define LINK_STATUS_SERDES_LINK 0x00100000 + + #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000 + #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000 + #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000 + #define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE 0x10000000 + + #define LINK_STATUS_PFC_ENABLED 0x20000000 + + #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000 + #define LINK_STATUS_SFP_TX_FAULT 0x80000000 + + uint32_t port_stx; + + uint32_t stat_nig_timer; + + /* MCP firmware does not use this field */ + uint32_t ext_phy_fw_version; + +}; + + +struct drv_func_mb { + + uint32_t drv_mb_header; + #define DRV_MSG_CODE_MASK 0xffff0000 + #define DRV_MSG_CODE_LOAD_REQ 0x10000000 + #define DRV_MSG_CODE_LOAD_DONE 0x11000000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000 + #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 + #define DRV_MSG_CODE_DCC_OK 0x30000000 + #define DRV_MSG_CODE_DCC_FAILURE 0x31000000 + #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000 + #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000 + #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000 + #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000 + #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 + #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 + #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 + + /* + * The optic module verification command requires bootcode + * v5.0.6 or later, te specific optic module verification command + * requires bootcode v5.2.12 or later + */ + #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000 + #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 + #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 + #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 + #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000 + #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002 + #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 + #define REQ_BC_VER_4_MT_SUPPORTED 0x00070201 + #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201 + #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209 + + #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 + #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 + #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401 + + #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 + + #define DRV_MSG_CODE_AFEX_DRIVER_SETMAC 0xd0000000 + #define DRV_MSG_CODE_AFEX_LISTGET_ACK 0xd1000000 + #define DRV_MSG_CODE_AFEX_LISTSET_ACK 0xd2000000 + #define DRV_MSG_CODE_AFEX_STATSGET_ACK 0xd3000000 + #define DRV_MSG_CODE_AFEX_VIFSET_ACK 0xd4000000 + + #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000 + #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000 + + #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 + + #define DRV_MSG_CODE_RMMOD 0xdb000000 + #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f + + #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 + #define REQ_BC_VER_4_SET_MF_BW 0x00060202 + #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 + + #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000 + + #define DRV_MSG_CODE_INITIATE_FLR 0x02000000 + #define REQ_BC_VER_4_INITIATE_FLR 0x00070213 + + #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 + #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 + #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 + #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 + + #define DRV_MSG_CODE_IMG_OFFSET_REQ 0xe2000000 + #define DRV_MSG_CODE_IMG_SIZE_REQ 0xe3000000 + + #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff + + uint32_t drv_mb_param; + #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000 + #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000 + + #define DRV_MSG_CODE_UNLOAD_NON_D3_POWER 0x00000001 + #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002 + + #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a + #define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA 0x00002000 + + #define DRV_MSG_CODE_USR_BLK_IMAGE_REQ 0x00000001 + + uint32_t fw_mb_header; + #define FW_MSG_CODE_MASK 0xffff0000 + #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 + #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 + #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 + /* Load common chip is supported from bc 6.0.0 */ + #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000 + #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000 + + #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 + #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 + #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 + #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000 + #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000 + #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 + #define FW_MSG_CODE_DCC_DONE 0x30100000 + #define FW_MSG_CODE_LLDP_DONE 0x40100000 + #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000 + #define FW_MSG_CODE_DIAG_REFUSE 0x50200000 + #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000 + #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000 + #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000 + #define FW_MSG_CODE_GET_KEY_DONE 0x80100000 + #define FW_MSG_CODE_NO_KEY 0x80f00000 + #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000 + #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000 + #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000 + #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000 + #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000 + #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000 + #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000 + #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 + #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 + #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 + #define FW_MSG_CODE_HW_SET_INVALID_IMAGE 0xb0100000 + + #define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE 0xd0100000 + #define FW_MSG_CODE_AFEX_LISTGET_ACK 0xd1100000 + #define FW_MSG_CODE_AFEX_LISTSET_ACK 0xd2100000 + #define FW_MSG_CODE_AFEX_STATSGET_ACK 0xd3100000 + #define FW_MSG_CODE_AFEX_VIFSET_ACK 0xd4100000 + + #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000 + #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000 + + #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 + + #define FW_MSG_CODE_RMMOD_ACK 0xdb100000 + + #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 + #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 + + #define FW_MSG_CODE_LINK_CHANGED_ACK 0x01100000 + + #define FW_MSG_CODE_FLR_ACK 0x02000000 + #define FW_MSG_CODE_FLR_NACK 0x02100000 + + #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000 + #define FW_MSG_CODE_LIC_RESPONSE 0xff020000 + #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000 + #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 + + #define FW_MSG_CODE_IMG_OFFSET_RESPONSE 0xe2100000 + #define FW_MSG_CODE_IMG_SIZE_RESPONSE 0xe3100000 + + #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff + + uint32_t fw_mb_param; + + #define FW_PARAM_INVALID_IMG 0xffffffff + + uint32_t drv_pulse_mb; + #define DRV_PULSE_SEQ_MASK 0x00007fff + #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 + /* + * The system time is in the format of + * (year-2001)*12*32 + month*32 + day. + */ + #define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + /* + * Indicate to the firmware not to go into the + * OS-absent when it is not getting driver pulse. + * This is used for debugging as well for PXE(MBA). + */ + + uint32_t mcp_pulse_mb; + #define MCP_PULSE_SEQ_MASK 0x00007fff + #define MCP_PULSE_ALWAYS_ALIVE 0x00008000 + /* Indicates to the driver not to assert due to lack + * of MCP response */ + #define MCP_EVENT_MASK 0xffff0000 + #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + + uint32_t iscsi_boot_signature; + uint32_t iscsi_boot_block_offset; + + uint32_t drv_status; + #define DRV_STATUS_PMF 0x00000001 + #define DRV_STATUS_VF_DISABLED 0x00000002 + #define DRV_STATUS_SET_MF_BW 0x00000004 + #define DRV_STATUS_LINK_EVENT 0x00000008 + + #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 + #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 + #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200 + #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400 + #define DRV_STATUS_DCC_RESERVED1 0x00000800 + #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000 + #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000 + + #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 + #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 + #define DRV_STATUS_AFEX_EVENT_MASK 0x03f00000 + #define DRV_STATUS_AFEX_LISTGET_REQ 0x00100000 + #define DRV_STATUS_AFEX_LISTSET_REQ 0x00200000 + #define DRV_STATUS_AFEX_STATSGET_REQ 0x00400000 + #define DRV_STATUS_AFEX_VIFSET_REQ 0x00800000 + + #define DRV_STATUS_DRV_INFO_REQ 0x04000000 + + #define DRV_STATUS_EEE_NEGOTIATION_RESULTS 0x08000000 + + uint32_t virt_mac_upper; + #define VIRT_MAC_SIGN_MASK 0xffff0000 + #define VIRT_MAC_SIGNATURE 0x564d0000 + uint32_t virt_mac_lower; + +}; + + +/**************************************************************************** + * Management firmware state * + ****************************************************************************/ +/* Allocate 440 bytes for management firmware */ +#define MGMTFW_STATE_WORD_SIZE 110 + +struct mgmtfw_state { + uint32_t opaque[MGMTFW_STATE_WORD_SIZE]; +}; + + +/**************************************************************************** + * Multi-Function configuration * + ****************************************************************************/ +struct shared_mf_cfg { + + uint32_t clp_mb; + #define SHARED_MF_CLP_SET_DEFAULT 0x00000000 + /* set by CLP */ + #define SHARED_MF_CLP_EXIT 0x00000001 + /* set by MCP */ + #define SHARED_MF_CLP_EXIT_DONE 0x00010000 + +}; + +struct port_mf_cfg { + + uint32_t dynamic_cfg; /* device control channel */ + #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff + #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0 + #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK + + uint32_t reserved[1]; + +}; + +struct func_mf_cfg { + + uint32_t config; + /* E/R/I/D */ + /* function 0 of each port cannot be hidden */ + #define FUNC_MF_CFG_FUNC_HIDE 0x00000001 + + #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000006 + #define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000000 + #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002 + #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004 + #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006 + #define FUNC_MF_CFG_PROTOCOL_DEFAULT \ + FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA + + #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008 + #define FUNC_MF_CFG_FUNC_DELETED 0x00000010 + + #define FUNC_MF_CFG_FUNC_BOOT_MASK 0x00000060 + #define FUNC_MF_CFG_FUNC_BOOT_BIOS_CTRL 0x00000000 + #define FUNC_MF_CFG_FUNC_BOOT_VCM_DISABLED 0x00000020 + #define FUNC_MF_CFG_FUNC_BOOT_VCM_ENABLED 0x00000040 + + /* PRI */ + /* 0 - low priority, 3 - high priority */ + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300 + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8 + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000 + + /* MINBW, MAXBW */ + /* value range - 0..100, increments in 100Mbps */ + #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000 + #define FUNC_MF_CFG_MIN_BW_SHIFT 16 + #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 + #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000 + #define FUNC_MF_CFG_MAX_BW_SHIFT 24 + #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000 + + uint32_t mac_upper; /* MAC */ + #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff + #define FUNC_MF_CFG_UPPERMAC_SHIFT 0 + #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK + uint32_t mac_lower; + #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff + + uint32_t e1hov_tag; /* VNI */ + #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff + #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 + #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK + + /* afex default VLAN ID - 12 bits */ + #define FUNC_MF_CFG_AFEX_VLAN_MASK 0x0fff0000 + #define FUNC_MF_CFG_AFEX_VLAN_SHIFT 16 + + uint32_t afex_config; + #define FUNC_MF_CFG_AFEX_COS_FILTER_MASK 0x000000ff + #define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT 0 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK 0x0000ff00 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT 8 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL 0x00000100 + #define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK 0x000f0000 + #define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT 16 + + uint32_t pf_allocation; + /* number of vfs in function, if 0 - sriov disabled */ + #define FUNC_MF_CFG_NUMBER_OF_VFS_MASK 0x000000FF + #define FUNC_MF_CFG_NUMBER_OF_VFS_SHIFT 0 +}; + +enum mf_cfg_afex_vlan_mode { + FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0, + FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE, + FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE +}; + +/* This structure is not applicable and should not be accessed on 57711 */ +struct func_ext_cfg { + uint32_t func_cfg; + #define MACP_FUNC_CFG_FLAGS_MASK 0x0000007F + #define MACP_FUNC_CFG_FLAGS_SHIFT 0 + #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001 + #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002 + #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004 + #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008 + #define MACP_FUNC_CFG_PAUSE_ON_HOST_RING 0x00000080 + + uint32_t iscsi_mac_addr_upper; + uint32_t iscsi_mac_addr_lower; + + uint32_t fcoe_mac_addr_upper; + uint32_t fcoe_mac_addr_lower; + + uint32_t fcoe_wwn_port_name_upper; + uint32_t fcoe_wwn_port_name_lower; + + uint32_t fcoe_wwn_node_name_upper; + uint32_t fcoe_wwn_node_name_lower; + + uint32_t preserve_data; + #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0) + #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1) + #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2) + #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3) + #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4) + #define MF_FUNC_CFG_PRESERVE_TX_BW (1<<5) +}; + +struct mf_cfg { + + struct shared_mf_cfg shared_mf_config; /* 0x4 */ + struct port_mf_cfg port_mf_config[NVM_PATH_MAX][PORT_MAX]; + /* 0x10*2=0x20 */ + /* for all chips, there are 8 mf functions */ + struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */ + /* + * Extended configuration per function - this array does not exist and + * should not be accessed on 57711 + */ + struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/ +}; /* 0x224 */ + +/**************************************************************************** + * Shared Memory Region * + ****************************************************************************/ +struct shmem_region { /* SharedMem Offset (size) */ + + uint32_t validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */ + #define SHR_MEM_FORMAT_REV_MASK 0xff000000 + #define SHR_MEM_FORMAT_REV_ID ('A'<<24) + /* validity bits */ + #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000 + #define SHR_MEM_VALIDITY_MB 0x00200000 + #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000 + #define SHR_MEM_VALIDITY_RESERVED 0x00000007 + /* One licensing bit should be set */ + #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 + #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 + #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 + #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 + /* Active MFW */ + #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 + + struct shm_dev_info dev_info; /* 0x8 (0x438) */ + + license_key_t drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */ + + /* FW information (for internal FW use) */ + uint32_t fw_info_fio_offset; /* 0x4a8 (0x4) */ + struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ + + struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ + + +#ifdef BMAPI + /* This is a variable length array */ + /* the number of function depends on the chip type */ + struct drv_func_mb func_mb[1]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ +#else + /* the number of function depends on the chip type */ + struct drv_func_mb func_mb[]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ +#endif /* BMAPI */ + +}; /* 57711 = 0x7E4 | 57712 = 0x734 */ + +/**************************************************************************** + * Shared Memory 2 Region * + ****************************************************************************/ +/* The fw_flr_ack is actually built in the following way: */ +/* 8 bit: PF ack */ +/* 64 bit: VF ack */ +/* 8 bit: ios_dis_ack */ +/* In order to maintain endianity in the mailbox hsi, we want to keep using */ +/* uint32_t. The fw must have the VF right after the PF since this is how it */ +/* access arrays(it expects always the VF to reside after the PF, and that */ +/* makes the calculation much easier for it. ) */ +/* In order to answer both limitations, and keep the struct small, the code */ +/* will abuse the structure defined here to achieve the actual partition */ +/* above */ +/****************************************************************************/ +struct fw_flr_ack { + uint32_t pf_ack; + uint32_t vf_ack[1]; + uint32_t iov_dis_ack; +}; + +struct fw_flr_mb { + uint32_t aggint; + uint32_t opgen_addr; + struct fw_flr_ack ack; +}; + +struct eee_remote_vals { + uint32_t tx_tw; + uint32_t rx_tw; +}; + +/**** SUPPORT FOR SHMEM ARRRAYS *** + * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to + * define arrays with storage types smaller then unsigned dwords. + * The macros below add generic support for SHMEM arrays with numeric elements + * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword + * array with individual bit-filed elements accessed using shifts and masks. + * + */ + +/* eb is the bitwidth of a single element */ +#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1) +#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb))) + +/* the bit-position macro allows the used to flip the order of the arrays + * elements on a per byte or word boundary. + * + * example: an array with 8 entries each 4 bit wide. This array will fit into + * a single dword. The diagrmas below show the array order of the nibbles. + * + * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering: + * + * | | | | + * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | | | | + * + * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte: + * + * | | | | + * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 | + * | | | | + * + * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word: + * + * | | | | + * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 | + * | | | | + */ +#define SHMEM_ARRAY_BITPOS(i, eb, fb) \ + ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \ + (((i)%((fb)/(eb))) * (eb))) + +#define SHMEM_ARRAY_GET(a, i, eb, fb) \ + ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \ + SHMEM_ARRAY_MASK(eb)) + +#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \ +do { \ + a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \ + SHMEM_ARRAY_BITPOS(i, eb, fb)); \ + a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \ + SHMEM_ARRAY_BITPOS(i, eb, fb)); \ +} while (0) + + +/****START OF DCBX STRUCTURES DECLARATIONS****/ +#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8 +#define DCBX_PRI_PG_BITWIDTH 4 +#define DCBX_PRI_PG_FBITS 8 +#define DCBX_PRI_PG_GET(a, i) \ + SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS) +#define DCBX_PRI_PG_SET(a, i, val) \ + SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val) +#define DCBX_MAX_NUM_PG_BW_ENTRIES 8 +#define DCBX_BW_PG_BITWIDTH 8 +#define DCBX_PG_BW_GET(a, i) \ + SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH) +#define DCBX_PG_BW_SET(a, i, val) \ + SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val) +#define DCBX_STRICT_PRI_PG 15 +#define DCBX_MAX_APP_PROTOCOL 16 +#define DCBX_MAX_APP_LOCAL 32 +#define FCOE_APP_IDX 0 +#define ISCSI_APP_IDX 1 +#define PREDEFINED_APP_IDX_MAX 2 + + +/* Big/Little endian have the same representation. */ +struct dcbx_ets_feature { + /* + * For Admin MIB - is this feature supported by the + * driver | For Local MIB - should this feature be enabled. + */ + uint32_t enabled; + uint32_t pg_bw_tbl[2]; + uint32_t pri_pg_tbl[1]; +}; + +/* Driver structure in LE */ +struct dcbx_pfc_feature { +#ifdef __BIG_ENDIAN + uint8_t pri_en_bitmap; + #define DCBX_PFC_PRI_0 0x01 + #define DCBX_PFC_PRI_1 0x02 + #define DCBX_PFC_PRI_2 0x04 + #define DCBX_PFC_PRI_3 0x08 + #define DCBX_PFC_PRI_4 0x10 + #define DCBX_PFC_PRI_5 0x20 + #define DCBX_PFC_PRI_6 0x40 + #define DCBX_PFC_PRI_7 0x80 + uint8_t pfc_caps; + uint8_t reserved; + uint8_t enabled; +#elif defined(__LITTLE_ENDIAN) + uint8_t enabled; + uint8_t reserved; + uint8_t pfc_caps; + uint8_t pri_en_bitmap; + #define DCBX_PFC_PRI_0 0x01 + #define DCBX_PFC_PRI_1 0x02 + #define DCBX_PFC_PRI_2 0x04 + #define DCBX_PFC_PRI_3 0x08 + #define DCBX_PFC_PRI_4 0x10 + #define DCBX_PFC_PRI_5 0x20 + #define DCBX_PFC_PRI_6 0x40 + #define DCBX_PFC_PRI_7 0x80 +#endif +}; + +struct dcbx_app_priority_entry { +#ifdef __BIG_ENDIAN + uint16_t app_id; + uint8_t pri_bitmap; + uint8_t appBitfield; + #define DCBX_APP_ENTRY_VALID 0x01 + #define DCBX_APP_ENTRY_SF_MASK 0x30 + #define DCBX_APP_ENTRY_SF_SHIFT 4 + #define DCBX_APP_SF_ETH_TYPE 0x10 + #define DCBX_APP_SF_PORT 0x20 +#elif defined(__LITTLE_ENDIAN) + uint8_t appBitfield; + #define DCBX_APP_ENTRY_VALID 0x01 + #define DCBX_APP_ENTRY_SF_MASK 0x30 + #define DCBX_APP_ENTRY_SF_SHIFT 4 + #define DCBX_APP_SF_ETH_TYPE 0x10 + #define DCBX_APP_SF_PORT 0x20 + uint8_t pri_bitmap; + uint16_t app_id; +#endif +}; + + +/* FW structure in BE */ +struct dcbx_app_priority_feature { +#ifdef __BIG_ENDIAN + uint8_t reserved; + uint8_t default_pri; + uint8_t tc_supported; + uint8_t enabled; +#elif defined(__LITTLE_ENDIAN) + uint8_t enabled; + uint8_t tc_supported; + uint8_t default_pri; + uint8_t reserved; +#endif + struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; +}; + +/* FW structure in BE */ +struct dcbx_features { + /* PG feature */ + struct dcbx_ets_feature ets; + /* PFC feature */ + struct dcbx_pfc_feature pfc; + /* APP feature */ + struct dcbx_app_priority_feature app; +}; + +/* LLDP protocol parameters */ +/* FW structure in BE */ +struct lldp_params { +#ifdef __BIG_ENDIAN + uint8_t msg_fast_tx_interval; + uint8_t msg_tx_hold; + uint8_t msg_tx_interval; + uint8_t admin_status; + #define LLDP_TX_ONLY 0x01 + #define LLDP_RX_ONLY 0x02 + #define LLDP_TX_RX 0x03 + #define LLDP_DISABLED 0x04 + uint8_t reserved1; + uint8_t tx_fast; + uint8_t tx_crd_max; + uint8_t tx_crd; +#elif defined(__LITTLE_ENDIAN) + uint8_t admin_status; + #define LLDP_TX_ONLY 0x01 + #define LLDP_RX_ONLY 0x02 + #define LLDP_TX_RX 0x03 + #define LLDP_DISABLED 0x04 + uint8_t msg_tx_interval; + uint8_t msg_tx_hold; + uint8_t msg_fast_tx_interval; + uint8_t tx_crd; + uint8_t tx_crd_max; + uint8_t tx_fast; + uint8_t reserved1; +#endif + #define REM_CHASSIS_ID_STAT_LEN 4 + #define REM_PORT_ID_STAT_LEN 4 + /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */ + uint32_t peer_chassis_id[REM_CHASSIS_ID_STAT_LEN]; + /* Holds remote Port ID TLV header, subtype and 9B of payload. */ + uint32_t peer_port_id[REM_PORT_ID_STAT_LEN]; +}; + +struct lldp_dcbx_stat { + #define LOCAL_CHASSIS_ID_STAT_LEN 2 + #define LOCAL_PORT_ID_STAT_LEN 2 + /* Holds local Chassis ID 8B payload of constant subtype 4. */ + uint32_t local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN]; + /* Holds local Port ID 8B payload of constant subtype 3. */ + uint32_t local_port_id[LOCAL_PORT_ID_STAT_LEN]; + /* Number of DCBX frames transmitted. */ + uint32_t num_tx_dcbx_pkts; + /* Number of DCBX frames received. */ + uint32_t num_rx_dcbx_pkts; +}; + +/* ADMIN MIB - DCBX local machine default configuration. */ +struct lldp_admin_mib { + uint32_t ver_cfg_flags; + #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001 + #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002 + #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004 + #define DCBX_ETS_RECO_TX_ENABLED 0x00000008 + #define DCBX_ETS_RECO_VALID 0x00000010 + #define DCBX_ETS_WILLING 0x00000020 + #define DCBX_PFC_WILLING 0x00000040 + #define DCBX_APP_WILLING 0x00000080 + #define DCBX_VERSION_CEE 0x00000100 + #define DCBX_VERSION_IEEE 0x00000200 + #define DCBX_DCBX_ENABLED 0x00000400 + #define DCBX_CEE_VERSION_MASK 0x0000f000 + #define DCBX_CEE_VERSION_SHIFT 12 + #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000 + #define DCBX_CEE_MAX_VERSION_SHIFT 16 + struct dcbx_features features; +}; + +/* REMOTE MIB - remote machine DCBX configuration. */ +struct lldp_remote_mib { + uint32_t prefix_seq_num; + uint32_t flags; + #define DCBX_ETS_TLV_RX 0x00000001 + #define DCBX_PFC_TLV_RX 0x00000002 + #define DCBX_APP_TLV_RX 0x00000004 + #define DCBX_ETS_RX_ERROR 0x00000010 + #define DCBX_PFC_RX_ERROR 0x00000020 + #define DCBX_APP_RX_ERROR 0x00000040 + #define DCBX_ETS_REM_WILLING 0x00000100 + #define DCBX_PFC_REM_WILLING 0x00000200 + #define DCBX_APP_REM_WILLING 0x00000400 + #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000 + #define DCBX_REMOTE_MIB_VALID 0x00002000 + struct dcbx_features features; + uint32_t suffix_seq_num; +}; + +/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */ +struct lldp_local_mib { + uint32_t prefix_seq_num; + /* Indicates if there is mismatch with negotiation results. */ + uint32_t error; + #define DCBX_LOCAL_ETS_ERROR 0x00000001 + #define DCBX_LOCAL_PFC_ERROR 0x00000002 + #define DCBX_LOCAL_APP_ERROR 0x00000004 + #define DCBX_LOCAL_PFC_MISMATCH 0x00000010 + #define DCBX_LOCAL_APP_MISMATCH 0x00000020 + #define DCBX_REMOTE_MIB_ERROR 0x00000040 + #define DCBX_REMOTE_ETS_TLV_NOT_FOUND 0x00000080 + #define DCBX_REMOTE_PFC_TLV_NOT_FOUND 0x00000100 + #define DCBX_REMOTE_APP_TLV_NOT_FOUND 0x00000200 + struct dcbx_features features; + uint32_t suffix_seq_num; +}; + +struct lldp_local_mib_ext { + uint32_t prefix_seq_num; + /* APP TLV extension - 16 more entries for negotiation results*/ + struct dcbx_app_priority_entry app_pri_tbl_ext[DCBX_MAX_APP_PROTOCOL]; + uint32_t suffix_seq_num; +}; +/***END OF DCBX STRUCTURES DECLARATIONS***/ + +/***********************************************************/ +/* Elink section */ +/***********************************************************/ +#define SHMEM_LINK_CONFIG_SIZE 2 +struct shmem_lfa { + uint32_t req_duplex; + #define REQ_DUPLEX_PHY0_MASK 0x0000ffff + #define REQ_DUPLEX_PHY0_SHIFT 0 + #define REQ_DUPLEX_PHY1_MASK 0xffff0000 + #define REQ_DUPLEX_PHY1_SHIFT 16 + uint32_t req_flow_ctrl; + #define REQ_FLOW_CTRL_PHY0_MASK 0x0000ffff + #define REQ_FLOW_CTRL_PHY0_SHIFT 0 + #define REQ_FLOW_CTRL_PHY1_MASK 0xffff0000 + #define REQ_FLOW_CTRL_PHY1_SHIFT 16 + uint32_t req_line_speed; /* Also determine AutoNeg */ + #define REQ_LINE_SPD_PHY0_MASK 0x0000ffff + #define REQ_LINE_SPD_PHY0_SHIFT 0 + #define REQ_LINE_SPD_PHY1_MASK 0xffff0000 + #define REQ_LINE_SPD_PHY1_SHIFT 16 + uint32_t speed_cap_mask[SHMEM_LINK_CONFIG_SIZE]; + uint32_t additional_config; + #define REQ_FC_AUTO_ADV_MASK 0x0000ffff + #define REQ_FC_AUTO_ADV0_SHIFT 0 + #define NO_LFA_DUE_TO_DCC_MASK 0x00010000 + uint32_t lfa_sts; + #define LFA_LINK_FLAP_REASON_OFFSET 0 + #define LFA_LINK_FLAP_REASON_MASK 0x000000ff + #define LFA_LINK_DOWN 0x1 + #define LFA_LOOPBACK_ENABLED 0x2 + #define LFA_DUPLEX_MISMATCH 0x3 + #define LFA_MFW_IS_TOO_OLD 0x4 + #define LFA_LINK_SPEED_MISMATCH 0x5 + #define LFA_FLOW_CTRL_MISMATCH 0x6 + #define LFA_SPEED_CAP_MISMATCH 0x7 + #define LFA_DCC_LFA_DISABLED 0x8 + #define LFA_EEE_MISMATCH 0x9 + + #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8 + #define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00 + + #define LINK_FLAP_COUNT_OFFSET 16 + #define LINK_FLAP_COUNT_MASK 0x00ff0000 + + #define LFA_FLAGS_MASK 0xff000000 + #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24) + +}; + +struct shmem2_region { + + uint32_t size; /* 0x0000 */ + + uint32_t dcc_support; /* 0x0004 */ + #define SHMEM_DCC_SUPPORT_NONE 0x00000000 + #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001 + #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004 + #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008 + #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040 + #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080 + + uint32_t ext_phy_fw_version2[PORT_MAX]; /* 0x0008 */ + /* + * For backwards compatibility, if the mf_cfg_addr does not exist + * (the size filed is smaller than 0xc) the mf_cfg resides at the + * end of struct shmem_region + */ + uint32_t mf_cfg_addr; /* 0x0010 */ + #define SHMEM_MF_CFG_ADDR_NONE 0x00000000 + + struct fw_flr_mb flr_mb; /* 0x0014 */ + uint32_t dcbx_lldp_params_offset; /* 0x0028 */ + #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000 + uint32_t dcbx_neg_res_offset; /* 0x002c */ + #define SHMEM_DCBX_NEG_RES_NONE 0x00000000 + uint32_t dcbx_remote_mib_offset; /* 0x0030 */ + #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000 + /* + * The other shmemX_base_addr holds the other path's shmem address + * required for example in case of common phy init, or for path1 to know + * the address of mcp debug trace which is located in offset from shmem + * of path0 + */ + uint32_t other_shmem_base_addr; /* 0x0034 */ + uint32_t other_shmem2_base_addr; /* 0x0038 */ + /* + * mcp_vf_disabled is set by the MCP to indicate the driver about VFs + * which were disabled/flred + */ + uint32_t mcp_vf_disabled[E2_VF_MAX / 32]; /* 0x003c */ + + /* + * drv_ack_vf_disabled is set by the PF driver to ack handled disabled + * VFs + */ + uint32_t drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */ + + uint32_t dcbx_lldp_dcbx_stat_offset; /* 0x0064 */ + #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000 + + /* + * edebug_driver_if field is used to transfer messages between edebug + * app to the driver through shmem2. + * + * message format: + * bits 0-2 - function number / instance of driver to perform request + * bits 3-5 - op code / is_ack? + * bits 6-63 - data + */ + uint32_t edebug_driver_if[2]; /* 0x0068 */ + #define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR 1 + #define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR 2 + #define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT 3 + + uint32_t nvm_retain_bitmap_addr; /* 0x0070 */ + + /* afex support of that driver */ + uint32_t afex_driver_support; /* 0x0074 */ + #define SHMEM_AFEX_VERSION_MASK 0x100f + #define SHMEM_AFEX_SUPPORTED_VERSION_ONE 0x1001 + #define SHMEM_AFEX_REDUCED_DRV_LOADED 0x8000 + + /* driver receives addr in scratchpad to which it should respond */ + uint32_t afex_scratchpad_addr_to_write[E2_FUNC_MAX]; + + /* + * generic params from MCP to driver (value depends on the msg sent + * to driver + */ + uint32_t afex_param1_to_driver[E2_FUNC_MAX]; /* 0x0088 */ + uint32_t afex_param2_to_driver[E2_FUNC_MAX]; /* 0x0098 */ + + uint32_t swim_base_addr; /* 0x0108 */ + uint32_t swim_funcs; + uint32_t swim_main_cb; + + /* + * bitmap notifying which VIF profiles stored in nvram are enabled by + * switch + */ + uint32_t afex_profiles_enabled[2]; + + /* generic flags controlled by the driver */ + uint32_t drv_flags; + #define DRV_FLAGS_DCB_CONFIGURED 0x0 + #define DRV_FLAGS_DCB_CONFIGURATION_ABORTED 0x1 + #define DRV_FLAGS_DCB_MFW_CONFIGURED 0x2 + + #define DRV_FLAGS_PORT_MASK ((1 << DRV_FLAGS_DCB_CONFIGURED) | \ + (1 << DRV_FLAGS_DCB_CONFIGURATION_ABORTED) | \ + (1 << DRV_FLAGS_DCB_MFW_CONFIGURED)) + /* Port offset*/ + #define DRV_FLAGS_P0_OFFSET 0 + #define DRV_FLAGS_P1_OFFSET 16 + #define DRV_FLAGS_GET_PORT_OFFSET(_port) ((0 == _port) ? \ + DRV_FLAGS_P0_OFFSET : \ + DRV_FLAGS_P1_OFFSET) + + #define DRV_FLAGS_GET_PORT_MASK(_port) (DRV_FLAGS_PORT_MASK << \ + DRV_FLAGS_GET_PORT_OFFSET(_port)) + + #define DRV_FLAGS_FILED_BY_PORT(_field_bit, _port) (1 << ( \ + (_field_bit) + DRV_FLAGS_GET_PORT_OFFSET(_port))) + + /* pointer to extended dev_info shared data copied from nvm image */ + uint32_t extended_dev_info_shared_addr; + uint32_t ncsi_oem_data_addr; + + uint32_t sensor_data_addr; + uint32_t buffer_block_addr; + uint32_t sensor_data_req_update_interval; + uint32_t temperature_in_half_celsius; + uint32_t glob_struct_in_host; + + uint32_t dcbx_neg_res_ext_offset; + #define SHMEM_DCBX_NEG_RES_EXT_NONE 0x00000000 + + uint32_t drv_capabilities_flag[E2_FUNC_MAX]; + #define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001 + #define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002 + #define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004 + #define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008 + + uint32_t extended_dev_info_shared_cfg_size; + + uint32_t dcbx_en[PORT_MAX]; + + /* The offset points to the multi threaded meta structure */ + uint32_t multi_thread_data_offset; + + /* address of DMAable host address holding values from the drivers */ + uint32_t drv_info_host_addr_lo; + uint32_t drv_info_host_addr_hi; + + /* general values written by the MFW (such as current version) */ + uint32_t drv_info_control; + #define DRV_INFO_CONTROL_VER_MASK 0x000000ff + #define DRV_INFO_CONTROL_VER_SHIFT 0 + #define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00 + #define DRV_INFO_CONTROL_OP_CODE_SHIFT 8 + uint32_t ibft_host_addr; /* initialized by option ROM */ + + struct eee_remote_vals eee_remote_vals[PORT_MAX]; + uint32_t pf_allocation[E2_FUNC_MAX]; + #define PF_ALLOACTION_MSIX_VECTORS_MASK 0x000000ff /* real value, as PCI config space can show only maximum of 64 vectors */ + #define PF_ALLOACTION_MSIX_VECTORS_SHIFT 0 + + /* the status of EEE auto-negotiation + * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31. + * bits 19:16 the supported modes for EEE. + * bits 23:20 the speeds advertised for EEE. + * bits 27:24 the speeds the Link partner advertised for EEE. + * The supported/adv. modes in bits 27:19 originate from the + * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed). + * bit 28 when 1'b1 EEE was requested. + * bit 29 when 1'b1 tx lpi was requested. + * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff + * 30:29 are 2'b11. + * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as + * value. When 1'b1 those bits contains a value times 16 microseconds. + */ + uint32_t eee_status[PORT_MAX]; + #define SHMEM_EEE_TIMER_MASK 0x0000ffff + #define SHMEM_EEE_SUPPORTED_MASK 0x000f0000 + #define SHMEM_EEE_SUPPORTED_SHIFT 16 + #define SHMEM_EEE_ADV_STATUS_MASK 0x00f00000 + #define SHMEM_EEE_100M_ADV (1<<0) + #define SHMEM_EEE_1G_ADV (1U<<1) + #define SHMEM_EEE_10G_ADV (1<<2) + #define SHMEM_EEE_ADV_STATUS_SHIFT 20 + #define SHMEM_EEE_LP_ADV_STATUS_MASK 0x0f000000 + #define SHMEM_EEE_LP_ADV_STATUS_SHIFT 24 + #define SHMEM_EEE_REQUESTED_BIT 0x10000000 + #define SHMEM_EEE_LPI_REQUESTED_BIT 0x20000000 + #define SHMEM_EEE_ACTIVE_BIT 0x40000000 + #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000 + + uint32_t sizeof_port_stats; + + /* Link Flap Avoidance */ + uint32_t lfa_host_addr[PORT_MAX]; + + /* External PHY temperature in deg C. */ + uint32_t extphy_temps_in_celsius; + #define EXTPHY1_TEMP_MASK 0x0000ffff + #define EXTPHY1_TEMP_SHIFT 0 + + uint32_t ocdata_info_addr; /* Offset 0x148 */ + uint32_t drv_func_info_addr; /* Offset 0x14C */ + uint32_t drv_func_info_size; /* Offset 0x150 */ + uint32_t link_attr_sync[PORT_MAX]; /* Offset 0x154 */ + #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0) +}; + + +struct emac_stats { + uint32_t rx_stat_ifhcinoctets; + uint32_t rx_stat_ifhcinbadoctets; + uint32_t rx_stat_etherstatsfragments; + uint32_t rx_stat_ifhcinucastpkts; + uint32_t rx_stat_ifhcinmulticastpkts; + uint32_t rx_stat_ifhcinbroadcastpkts; + uint32_t rx_stat_dot3statsfcserrors; + uint32_t rx_stat_dot3statsalignmenterrors; + uint32_t rx_stat_dot3statscarriersenseerrors; + uint32_t rx_stat_xonpauseframesreceived; + uint32_t rx_stat_xoffpauseframesreceived; + uint32_t rx_stat_maccontrolframesreceived; + uint32_t rx_stat_xoffstateentered; + uint32_t rx_stat_dot3statsframestoolong; + uint32_t rx_stat_etherstatsjabbers; + uint32_t rx_stat_etherstatsundersizepkts; + uint32_t rx_stat_etherstatspkts64octets; + uint32_t rx_stat_etherstatspkts65octetsto127octets; + uint32_t rx_stat_etherstatspkts128octetsto255octets; + uint32_t rx_stat_etherstatspkts256octetsto511octets; + uint32_t rx_stat_etherstatspkts512octetsto1023octets; + uint32_t rx_stat_etherstatspkts1024octetsto1522octets; + uint32_t rx_stat_etherstatspktsover1522octets; + + uint32_t rx_stat_falsecarriererrors; + + uint32_t tx_stat_ifhcoutoctets; + uint32_t tx_stat_ifhcoutbadoctets; + uint32_t tx_stat_etherstatscollisions; + uint32_t tx_stat_outxonsent; + uint32_t tx_stat_outxoffsent; + uint32_t tx_stat_flowcontroldone; + uint32_t tx_stat_dot3statssinglecollisionframes; + uint32_t tx_stat_dot3statsmultiplecollisionframes; + uint32_t tx_stat_dot3statsdeferredtransmissions; + uint32_t tx_stat_dot3statsexcessivecollisions; + uint32_t tx_stat_dot3statslatecollisions; + uint32_t tx_stat_ifhcoutucastpkts; + uint32_t tx_stat_ifhcoutmulticastpkts; + uint32_t tx_stat_ifhcoutbroadcastpkts; + uint32_t tx_stat_etherstatspkts64octets; + uint32_t tx_stat_etherstatspkts65octetsto127octets; + uint32_t tx_stat_etherstatspkts128octetsto255octets; + uint32_t tx_stat_etherstatspkts256octetsto511octets; + uint32_t tx_stat_etherstatspkts512octetsto1023octets; + uint32_t tx_stat_etherstatspkts1024octetsto1522octets; + uint32_t tx_stat_etherstatspktsover1522octets; + uint32_t tx_stat_dot3statsinternalmactransmiterrors; +}; + + +struct bmac1_stats { + uint32_t tx_stat_gtpkt_lo; + uint32_t tx_stat_gtpkt_hi; + uint32_t tx_stat_gtxpf_lo; + uint32_t tx_stat_gtxpf_hi; + uint32_t tx_stat_gtfcs_lo; + uint32_t tx_stat_gtfcs_hi; + uint32_t tx_stat_gtmca_lo; + uint32_t tx_stat_gtmca_hi; + uint32_t tx_stat_gtbca_lo; + uint32_t tx_stat_gtbca_hi; + uint32_t tx_stat_gtfrg_lo; + uint32_t tx_stat_gtfrg_hi; + uint32_t tx_stat_gtovr_lo; + uint32_t tx_stat_gtovr_hi; + uint32_t tx_stat_gt64_lo; + uint32_t tx_stat_gt64_hi; + uint32_t tx_stat_gt127_lo; + uint32_t tx_stat_gt127_hi; + uint32_t tx_stat_gt255_lo; + uint32_t tx_stat_gt255_hi; + uint32_t tx_stat_gt511_lo; + uint32_t tx_stat_gt511_hi; + uint32_t tx_stat_gt1023_lo; + uint32_t tx_stat_gt1023_hi; + uint32_t tx_stat_gt1518_lo; + uint32_t tx_stat_gt1518_hi; + uint32_t tx_stat_gt2047_lo; + uint32_t tx_stat_gt2047_hi; + uint32_t tx_stat_gt4095_lo; + uint32_t tx_stat_gt4095_hi; + uint32_t tx_stat_gt9216_lo; + uint32_t tx_stat_gt9216_hi; + uint32_t tx_stat_gt16383_lo; + uint32_t tx_stat_gt16383_hi; + uint32_t tx_stat_gtmax_lo; + uint32_t tx_stat_gtmax_hi; + uint32_t tx_stat_gtufl_lo; + uint32_t tx_stat_gtufl_hi; + uint32_t tx_stat_gterr_lo; + uint32_t tx_stat_gterr_hi; + uint32_t tx_stat_gtbyt_lo; + uint32_t tx_stat_gtbyt_hi; + + uint32_t rx_stat_gr64_lo; + uint32_t rx_stat_gr64_hi; + uint32_t rx_stat_gr127_lo; + uint32_t rx_stat_gr127_hi; + uint32_t rx_stat_gr255_lo; + uint32_t rx_stat_gr255_hi; + uint32_t rx_stat_gr511_lo; + uint32_t rx_stat_gr511_hi; + uint32_t rx_stat_gr1023_lo; + uint32_t rx_stat_gr1023_hi; + uint32_t rx_stat_gr1518_lo; + uint32_t rx_stat_gr1518_hi; + uint32_t rx_stat_gr2047_lo; + uint32_t rx_stat_gr2047_hi; + uint32_t rx_stat_gr4095_lo; + uint32_t rx_stat_gr4095_hi; + uint32_t rx_stat_gr9216_lo; + uint32_t rx_stat_gr9216_hi; + uint32_t rx_stat_gr16383_lo; + uint32_t rx_stat_gr16383_hi; + uint32_t rx_stat_grmax_lo; + uint32_t rx_stat_grmax_hi; + uint32_t rx_stat_grpkt_lo; + uint32_t rx_stat_grpkt_hi; + uint32_t rx_stat_grfcs_lo; + uint32_t rx_stat_grfcs_hi; + uint32_t rx_stat_grmca_lo; + uint32_t rx_stat_grmca_hi; + uint32_t rx_stat_grbca_lo; + uint32_t rx_stat_grbca_hi; + uint32_t rx_stat_grxcf_lo; + uint32_t rx_stat_grxcf_hi; + uint32_t rx_stat_grxpf_lo; + uint32_t rx_stat_grxpf_hi; + uint32_t rx_stat_grxuo_lo; + uint32_t rx_stat_grxuo_hi; + uint32_t rx_stat_grjbr_lo; + uint32_t rx_stat_grjbr_hi; + uint32_t rx_stat_grovr_lo; + uint32_t rx_stat_grovr_hi; + uint32_t rx_stat_grflr_lo; + uint32_t rx_stat_grflr_hi; + uint32_t rx_stat_grmeg_lo; + uint32_t rx_stat_grmeg_hi; + uint32_t rx_stat_grmeb_lo; + uint32_t rx_stat_grmeb_hi; + uint32_t rx_stat_grbyt_lo; + uint32_t rx_stat_grbyt_hi; + uint32_t rx_stat_grund_lo; + uint32_t rx_stat_grund_hi; + uint32_t rx_stat_grfrg_lo; + uint32_t rx_stat_grfrg_hi; + uint32_t rx_stat_grerb_lo; + uint32_t rx_stat_grerb_hi; + uint32_t rx_stat_grfre_lo; + uint32_t rx_stat_grfre_hi; + uint32_t rx_stat_gripj_lo; + uint32_t rx_stat_gripj_hi; +}; + +struct bmac2_stats { + uint32_t tx_stat_gtpk_lo; /* gtpok */ + uint32_t tx_stat_gtpk_hi; /* gtpok */ + uint32_t tx_stat_gtxpf_lo; /* gtpf */ + uint32_t tx_stat_gtxpf_hi; /* gtpf */ + uint32_t tx_stat_gtpp_lo; /* NEW BMAC2 */ + uint32_t tx_stat_gtpp_hi; /* NEW BMAC2 */ + uint32_t tx_stat_gtfcs_lo; + uint32_t tx_stat_gtfcs_hi; + uint32_t tx_stat_gtuca_lo; /* NEW BMAC2 */ + uint32_t tx_stat_gtuca_hi; /* NEW BMAC2 */ + uint32_t tx_stat_gtmca_lo; + uint32_t tx_stat_gtmca_hi; + uint32_t tx_stat_gtbca_lo; + uint32_t tx_stat_gtbca_hi; + uint32_t tx_stat_gtovr_lo; + uint32_t tx_stat_gtovr_hi; + uint32_t tx_stat_gtfrg_lo; + uint32_t tx_stat_gtfrg_hi; + uint32_t tx_stat_gtpkt1_lo; /* gtpkt */ + uint32_t tx_stat_gtpkt1_hi; /* gtpkt */ + uint32_t tx_stat_gt64_lo; + uint32_t tx_stat_gt64_hi; + uint32_t tx_stat_gt127_lo; + uint32_t tx_stat_gt127_hi; + uint32_t tx_stat_gt255_lo; + uint32_t tx_stat_gt255_hi; + uint32_t tx_stat_gt511_lo; + uint32_t tx_stat_gt511_hi; + uint32_t tx_stat_gt1023_lo; + uint32_t tx_stat_gt1023_hi; + uint32_t tx_stat_gt1518_lo; + uint32_t tx_stat_gt1518_hi; + uint32_t tx_stat_gt2047_lo; + uint32_t tx_stat_gt2047_hi; + uint32_t tx_stat_gt4095_lo; + uint32_t tx_stat_gt4095_hi; + uint32_t tx_stat_gt9216_lo; + uint32_t tx_stat_gt9216_hi; + uint32_t tx_stat_gt16383_lo; + uint32_t tx_stat_gt16383_hi; + uint32_t tx_stat_gtmax_lo; + uint32_t tx_stat_gtmax_hi; + uint32_t tx_stat_gtufl_lo; + uint32_t tx_stat_gtufl_hi; + uint32_t tx_stat_gterr_lo; + uint32_t tx_stat_gterr_hi; + uint32_t tx_stat_gtbyt_lo; + uint32_t tx_stat_gtbyt_hi; + + uint32_t rx_stat_gr64_lo; + uint32_t rx_stat_gr64_hi; + uint32_t rx_stat_gr127_lo; + uint32_t rx_stat_gr127_hi; + uint32_t rx_stat_gr255_lo; + uint32_t rx_stat_gr255_hi; + uint32_t rx_stat_gr511_lo; + uint32_t rx_stat_gr511_hi; + uint32_t rx_stat_gr1023_lo; + uint32_t rx_stat_gr1023_hi; + uint32_t rx_stat_gr1518_lo; + uint32_t rx_stat_gr1518_hi; + uint32_t rx_stat_gr2047_lo; + uint32_t rx_stat_gr2047_hi; + uint32_t rx_stat_gr4095_lo; + uint32_t rx_stat_gr4095_hi; + uint32_t rx_stat_gr9216_lo; + uint32_t rx_stat_gr9216_hi; + uint32_t rx_stat_gr16383_lo; + uint32_t rx_stat_gr16383_hi; + uint32_t rx_stat_grmax_lo; + uint32_t rx_stat_grmax_hi; + uint32_t rx_stat_grpkt_lo; + uint32_t rx_stat_grpkt_hi; + uint32_t rx_stat_grfcs_lo; + uint32_t rx_stat_grfcs_hi; + uint32_t rx_stat_gruca_lo; + uint32_t rx_stat_gruca_hi; + uint32_t rx_stat_grmca_lo; + uint32_t rx_stat_grmca_hi; + uint32_t rx_stat_grbca_lo; + uint32_t rx_stat_grbca_hi; + uint32_t rx_stat_grxpf_lo; /* grpf */ + uint32_t rx_stat_grxpf_hi; /* grpf */ + uint32_t rx_stat_grpp_lo; + uint32_t rx_stat_grpp_hi; + uint32_t rx_stat_grxuo_lo; /* gruo */ + uint32_t rx_stat_grxuo_hi; /* gruo */ + uint32_t rx_stat_grjbr_lo; + uint32_t rx_stat_grjbr_hi; + uint32_t rx_stat_grovr_lo; + uint32_t rx_stat_grovr_hi; + uint32_t rx_stat_grxcf_lo; /* grcf */ + uint32_t rx_stat_grxcf_hi; /* grcf */ + uint32_t rx_stat_grflr_lo; + uint32_t rx_stat_grflr_hi; + uint32_t rx_stat_grpok_lo; + uint32_t rx_stat_grpok_hi; + uint32_t rx_stat_grmeg_lo; + uint32_t rx_stat_grmeg_hi; + uint32_t rx_stat_grmeb_lo; + uint32_t rx_stat_grmeb_hi; + uint32_t rx_stat_grbyt_lo; + uint32_t rx_stat_grbyt_hi; + uint32_t rx_stat_grund_lo; + uint32_t rx_stat_grund_hi; + uint32_t rx_stat_grfrg_lo; + uint32_t rx_stat_grfrg_hi; + uint32_t rx_stat_grerb_lo; /* grerrbyt */ + uint32_t rx_stat_grerb_hi; /* grerrbyt */ + uint32_t rx_stat_grfre_lo; /* grfrerr */ + uint32_t rx_stat_grfre_hi; /* grfrerr */ + uint32_t rx_stat_gripj_lo; + uint32_t rx_stat_gripj_hi; +}; + +struct mstat_stats { + struct { + /* OTE MSTAT on E3 has a bug where this register's contents are + * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp + */ + uint32_t tx_gtxpok_lo; + uint32_t tx_gtxpok_hi; + uint32_t tx_gtxpf_lo; + uint32_t tx_gtxpf_hi; + uint32_t tx_gtxpp_lo; + uint32_t tx_gtxpp_hi; + uint32_t tx_gtfcs_lo; + uint32_t tx_gtfcs_hi; + uint32_t tx_gtuca_lo; + uint32_t tx_gtuca_hi; + uint32_t tx_gtmca_lo; + uint32_t tx_gtmca_hi; + uint32_t tx_gtgca_lo; + uint32_t tx_gtgca_hi; + uint32_t tx_gtpkt_lo; + uint32_t tx_gtpkt_hi; + uint32_t tx_gt64_lo; + uint32_t tx_gt64_hi; + uint32_t tx_gt127_lo; + uint32_t tx_gt127_hi; + uint32_t tx_gt255_lo; + uint32_t tx_gt255_hi; + uint32_t tx_gt511_lo; + uint32_t tx_gt511_hi; + uint32_t tx_gt1023_lo; + uint32_t tx_gt1023_hi; + uint32_t tx_gt1518_lo; + uint32_t tx_gt1518_hi; + uint32_t tx_gt2047_lo; + uint32_t tx_gt2047_hi; + uint32_t tx_gt4095_lo; + uint32_t tx_gt4095_hi; + uint32_t tx_gt9216_lo; + uint32_t tx_gt9216_hi; + uint32_t tx_gt16383_lo; + uint32_t tx_gt16383_hi; + uint32_t tx_gtufl_lo; + uint32_t tx_gtufl_hi; + uint32_t tx_gterr_lo; + uint32_t tx_gterr_hi; + uint32_t tx_gtbyt_lo; + uint32_t tx_gtbyt_hi; + uint32_t tx_collisions_lo; + uint32_t tx_collisions_hi; + uint32_t tx_singlecollision_lo; + uint32_t tx_singlecollision_hi; + uint32_t tx_multiplecollisions_lo; + uint32_t tx_multiplecollisions_hi; + uint32_t tx_deferred_lo; + uint32_t tx_deferred_hi; + uint32_t tx_excessivecollisions_lo; + uint32_t tx_excessivecollisions_hi; + uint32_t tx_latecollisions_lo; + uint32_t tx_latecollisions_hi; + } stats_tx; + + struct { + uint32_t rx_gr64_lo; + uint32_t rx_gr64_hi; + uint32_t rx_gr127_lo; + uint32_t rx_gr127_hi; + uint32_t rx_gr255_lo; + uint32_t rx_gr255_hi; + uint32_t rx_gr511_lo; + uint32_t rx_gr511_hi; + uint32_t rx_gr1023_lo; + uint32_t rx_gr1023_hi; + uint32_t rx_gr1518_lo; + uint32_t rx_gr1518_hi; + uint32_t rx_gr2047_lo; + uint32_t rx_gr2047_hi; + uint32_t rx_gr4095_lo; + uint32_t rx_gr4095_hi; + uint32_t rx_gr9216_lo; + uint32_t rx_gr9216_hi; + uint32_t rx_gr16383_lo; + uint32_t rx_gr16383_hi; + uint32_t rx_grpkt_lo; + uint32_t rx_grpkt_hi; + uint32_t rx_grfcs_lo; + uint32_t rx_grfcs_hi; + uint32_t rx_gruca_lo; + uint32_t rx_gruca_hi; + uint32_t rx_grmca_lo; + uint32_t rx_grmca_hi; + uint32_t rx_grbca_lo; + uint32_t rx_grbca_hi; + uint32_t rx_grxpf_lo; + uint32_t rx_grxpf_hi; + uint32_t rx_grxpp_lo; + uint32_t rx_grxpp_hi; + uint32_t rx_grxuo_lo; + uint32_t rx_grxuo_hi; + uint32_t rx_grovr_lo; + uint32_t rx_grovr_hi; + uint32_t rx_grxcf_lo; + uint32_t rx_grxcf_hi; + uint32_t rx_grflr_lo; + uint32_t rx_grflr_hi; + uint32_t rx_grpok_lo; + uint32_t rx_grpok_hi; + uint32_t rx_grbyt_lo; + uint32_t rx_grbyt_hi; + uint32_t rx_grund_lo; + uint32_t rx_grund_hi; + uint32_t rx_grfrg_lo; + uint32_t rx_grfrg_hi; + uint32_t rx_grerb_lo; + uint32_t rx_grerb_hi; + uint32_t rx_grfre_lo; + uint32_t rx_grfre_hi; + + uint32_t rx_alignmenterrors_lo; + uint32_t rx_alignmenterrors_hi; + uint32_t rx_falsecarrier_lo; + uint32_t rx_falsecarrier_hi; + uint32_t rx_llfcmsgcnt_lo; + uint32_t rx_llfcmsgcnt_hi; + } stats_rx; +}; + +union mac_stats { + struct emac_stats emac_stats; + struct bmac1_stats bmac1_stats; + struct bmac2_stats bmac2_stats; + struct mstat_stats mstat_stats; +}; + + +struct mac_stx { + /* in_bad_octets */ + uint32_t rx_stat_ifhcinbadoctets_hi; + uint32_t rx_stat_ifhcinbadoctets_lo; + + /* out_bad_octets */ + uint32_t tx_stat_ifhcoutbadoctets_hi; + uint32_t tx_stat_ifhcoutbadoctets_lo; + + /* crc_receive_errors */ + uint32_t rx_stat_dot3statsfcserrors_hi; + uint32_t rx_stat_dot3statsfcserrors_lo; + /* alignment_errors */ + uint32_t rx_stat_dot3statsalignmenterrors_hi; + uint32_t rx_stat_dot3statsalignmenterrors_lo; + /* carrier_sense_errors */ + uint32_t rx_stat_dot3statscarriersenseerrors_hi; + uint32_t rx_stat_dot3statscarriersenseerrors_lo; + /* false_carrier_detections */ + uint32_t rx_stat_falsecarriererrors_hi; + uint32_t rx_stat_falsecarriererrors_lo; + + /* runt_packets_received */ + uint32_t rx_stat_etherstatsundersizepkts_hi; + uint32_t rx_stat_etherstatsundersizepkts_lo; + /* jabber_packets_received */ + uint32_t rx_stat_dot3statsframestoolong_hi; + uint32_t rx_stat_dot3statsframestoolong_lo; + + /* error_runt_packets_received */ + uint32_t rx_stat_etherstatsfragments_hi; + uint32_t rx_stat_etherstatsfragments_lo; + /* error_jabber_packets_received */ + uint32_t rx_stat_etherstatsjabbers_hi; + uint32_t rx_stat_etherstatsjabbers_lo; + + /* control_frames_received */ + uint32_t rx_stat_maccontrolframesreceived_hi; + uint32_t rx_stat_maccontrolframesreceived_lo; + uint32_t rx_stat_mac_xpf_hi; + uint32_t rx_stat_mac_xpf_lo; + uint32_t rx_stat_mac_xcf_hi; + uint32_t rx_stat_mac_xcf_lo; + + /* xoff_state_entered */ + uint32_t rx_stat_xoffstateentered_hi; + uint32_t rx_stat_xoffstateentered_lo; + /* pause_xon_frames_received */ + uint32_t rx_stat_xonpauseframesreceived_hi; + uint32_t rx_stat_xonpauseframesreceived_lo; + /* pause_xoff_frames_received */ + uint32_t rx_stat_xoffpauseframesreceived_hi; + uint32_t rx_stat_xoffpauseframesreceived_lo; + /* pause_xon_frames_transmitted */ + uint32_t tx_stat_outxonsent_hi; + uint32_t tx_stat_outxonsent_lo; + /* pause_xoff_frames_transmitted */ + uint32_t tx_stat_outxoffsent_hi; + uint32_t tx_stat_outxoffsent_lo; + /* flow_control_done */ + uint32_t tx_stat_flowcontroldone_hi; + uint32_t tx_stat_flowcontroldone_lo; + + /* ether_stats_collisions */ + uint32_t tx_stat_etherstatscollisions_hi; + uint32_t tx_stat_etherstatscollisions_lo; + /* single_collision_transmit_frames */ + uint32_t tx_stat_dot3statssinglecollisionframes_hi; + uint32_t tx_stat_dot3statssinglecollisionframes_lo; + /* multiple_collision_transmit_frames */ + uint32_t tx_stat_dot3statsmultiplecollisionframes_hi; + uint32_t tx_stat_dot3statsmultiplecollisionframes_lo; + /* deferred_transmissions */ + uint32_t tx_stat_dot3statsdeferredtransmissions_hi; + uint32_t tx_stat_dot3statsdeferredtransmissions_lo; + /* excessive_collision_frames */ + uint32_t tx_stat_dot3statsexcessivecollisions_hi; + uint32_t tx_stat_dot3statsexcessivecollisions_lo; + /* late_collision_frames */ + uint32_t tx_stat_dot3statslatecollisions_hi; + uint32_t tx_stat_dot3statslatecollisions_lo; + + /* frames_transmitted_64_bytes */ + uint32_t tx_stat_etherstatspkts64octets_hi; + uint32_t tx_stat_etherstatspkts64octets_lo; + /* frames_transmitted_65_127_bytes */ + uint32_t tx_stat_etherstatspkts65octetsto127octets_hi; + uint32_t tx_stat_etherstatspkts65octetsto127octets_lo; + /* frames_transmitted_128_255_bytes */ + uint32_t tx_stat_etherstatspkts128octetsto255octets_hi; + uint32_t tx_stat_etherstatspkts128octetsto255octets_lo; + /* frames_transmitted_256_511_bytes */ + uint32_t tx_stat_etherstatspkts256octetsto511octets_hi; + uint32_t tx_stat_etherstatspkts256octetsto511octets_lo; + /* frames_transmitted_512_1023_bytes */ + uint32_t tx_stat_etherstatspkts512octetsto1023octets_hi; + uint32_t tx_stat_etherstatspkts512octetsto1023octets_lo; + /* frames_transmitted_1024_1522_bytes */ + uint32_t tx_stat_etherstatspkts1024octetsto1522octets_hi; + uint32_t tx_stat_etherstatspkts1024octetsto1522octets_lo; + /* frames_transmitted_1523_9022_bytes */ + uint32_t tx_stat_etherstatspktsover1522octets_hi; + uint32_t tx_stat_etherstatspktsover1522octets_lo; + uint32_t tx_stat_mac_2047_hi; + uint32_t tx_stat_mac_2047_lo; + uint32_t tx_stat_mac_4095_hi; + uint32_t tx_stat_mac_4095_lo; + uint32_t tx_stat_mac_9216_hi; + uint32_t tx_stat_mac_9216_lo; + uint32_t tx_stat_mac_16383_hi; + uint32_t tx_stat_mac_16383_lo; + + /* internal_mac_transmit_errors */ + uint32_t tx_stat_dot3statsinternalmactransmiterrors_hi; + uint32_t tx_stat_dot3statsinternalmactransmiterrors_lo; + + /* if_out_discards */ + uint32_t tx_stat_mac_ufl_hi; + uint32_t tx_stat_mac_ufl_lo; +}; + + +#define MAC_STX_IDX_MAX 2 + +struct host_port_stats { + uint32_t host_port_stats_counter; + + struct mac_stx mac_stx[MAC_STX_IDX_MAX]; + + uint32_t brb_drop_hi; + uint32_t brb_drop_lo; + + uint32_t not_used; /* obsolete as of MFW 7.2.1 */ + + uint32_t pfc_frames_tx_hi; + uint32_t pfc_frames_tx_lo; + uint32_t pfc_frames_rx_hi; + uint32_t pfc_frames_rx_lo; + + uint32_t eee_lpi_count_hi; + uint32_t eee_lpi_count_lo; +}; + + +struct host_func_stats { + uint32_t host_func_stats_start; + + uint32_t total_bytes_received_hi; + uint32_t total_bytes_received_lo; + + uint32_t total_bytes_transmitted_hi; + uint32_t total_bytes_transmitted_lo; + + uint32_t total_unicast_packets_received_hi; + uint32_t total_unicast_packets_received_lo; + + uint32_t total_multicast_packets_received_hi; + uint32_t total_multicast_packets_received_lo; + + uint32_t total_broadcast_packets_received_hi; + uint32_t total_broadcast_packets_received_lo; + + uint32_t total_unicast_packets_transmitted_hi; + uint32_t total_unicast_packets_transmitted_lo; + + uint32_t total_multicast_packets_transmitted_hi; + uint32_t total_multicast_packets_transmitted_lo; + + uint32_t total_broadcast_packets_transmitted_hi; + uint32_t total_broadcast_packets_transmitted_lo; + + uint32_t valid_bytes_received_hi; + uint32_t valid_bytes_received_lo; + + uint32_t host_func_stats_end; +}; + +/* VIC definitions */ +#define VICSTATST_UIF_INDEX 2 + +/* + * stats collected for afex. + * NOTE: structure is exactly as expected to be received by the switch. + * order must remain exactly as is unless protocol changes ! + */ +struct afex_stats { + uint32_t tx_unicast_frames_hi; + uint32_t tx_unicast_frames_lo; + uint32_t tx_unicast_bytes_hi; + uint32_t tx_unicast_bytes_lo; + uint32_t tx_multicast_frames_hi; + uint32_t tx_multicast_frames_lo; + uint32_t tx_multicast_bytes_hi; + uint32_t tx_multicast_bytes_lo; + uint32_t tx_broadcast_frames_hi; + uint32_t tx_broadcast_frames_lo; + uint32_t tx_broadcast_bytes_hi; + uint32_t tx_broadcast_bytes_lo; + uint32_t tx_frames_discarded_hi; + uint32_t tx_frames_discarded_lo; + uint32_t tx_frames_dropped_hi; + uint32_t tx_frames_dropped_lo; + + uint32_t rx_unicast_frames_hi; + uint32_t rx_unicast_frames_lo; + uint32_t rx_unicast_bytes_hi; + uint32_t rx_unicast_bytes_lo; + uint32_t rx_multicast_frames_hi; + uint32_t rx_multicast_frames_lo; + uint32_t rx_multicast_bytes_hi; + uint32_t rx_multicast_bytes_lo; + uint32_t rx_broadcast_frames_hi; + uint32_t rx_broadcast_frames_lo; + uint32_t rx_broadcast_bytes_hi; + uint32_t rx_broadcast_bytes_lo; + uint32_t rx_frames_discarded_hi; + uint32_t rx_frames_discarded_lo; + uint32_t rx_frames_dropped_hi; + uint32_t rx_frames_dropped_lo; +}; + +/* To maintain backward compatibility between FW and drivers, new elements */ +/* should be added to the end of the structure. */ + +/* Per Port Statistics */ +struct port_info { + uint32_t size; /* size of this structure (i.e. sizeof(port_info)) */ + uint32_t enabled; /* 0 =Disabled, 1= Enabled */ + uint32_t link_speed; /* multiplier of 100Mb */ + uint32_t wol_support; /* WoL Support (i.e. Non-Zero if WOL supported ) */ + uint32_t flow_control; /* 802.3X Flow Ctrl. 0=off 1=RX 2=TX 3=RX&TX.*/ + uint32_t flex10; /* Flex10 mode enabled. non zero = yes */ + uint32_t rx_drops; /* RX Discards. Counters roll over, never reset */ + uint32_t rx_errors; /* RX Errors. Physical Port Stats L95, All PFs and NC-SI. + This is flagged by Consumer as an error. */ + uint32_t rx_uncast_lo; /* RX Unicast Packets. Free running counters: */ + uint32_t rx_uncast_hi; /* RX Unicast Packets. Free running counters: */ + uint32_t rx_mcast_lo; /* RX Multicast Packets */ + uint32_t rx_mcast_hi; /* RX Multicast Packets */ + uint32_t rx_bcast_lo; /* RX Broadcast Packets */ + uint32_t rx_bcast_hi; /* RX Broadcast Packets */ + uint32_t tx_uncast_lo; /* TX Unicast Packets */ + uint32_t tx_uncast_hi; /* TX Unicast Packets */ + uint32_t tx_mcast_lo; /* TX Multicast Packets */ + uint32_t tx_mcast_hi; /* TX Multicast Packets */ + uint32_t tx_bcast_lo; /* TX Broadcast Packets */ + uint32_t tx_bcast_hi; /* TX Broadcast Packets */ + uint32_t tx_errors; /* TX Errors */ + uint32_t tx_discards; /* TX Discards */ + uint32_t rx_frames_lo; /* RX Frames received */ + uint32_t rx_frames_hi; /* RX Frames received */ + uint32_t rx_bytes_lo; /* RX Bytes received */ + uint32_t rx_bytes_hi; /* RX Bytes received */ + uint32_t tx_frames_lo; /* TX Frames sent */ + uint32_t tx_frames_hi; /* TX Frames sent */ + uint32_t tx_bytes_lo; /* TX Bytes sent */ + uint32_t tx_bytes_hi; /* TX Bytes sent */ + uint32_t link_status; /* Port P Link Status. 1:0 bit for port enabled. + 1:1 bit for link good, + 2:1 Set if link changed between last poll. */ + uint32_t tx_pfc_frames_lo; /* PFC Frames sent. */ + uint32_t tx_pfc_frames_hi; /* PFC Frames sent. */ + uint32_t rx_pfc_frames_lo; /* PFC Frames Received. */ + uint32_t rx_pfc_frames_hi; /* PFC Frames Received. */ +}; + + +#define BNX2X_5710_FW_MAJOR_VERSION 7 +#define BNX2X_5710_FW_MINOR_VERSION 2 +#define BNX2X_5710_FW_REVISION_VERSION 51 +#define BNX2X_5710_FW_ENGINEERING_VERSION 0 +#define BNX2X_5710_FW_COMPILE_FLAGS 1 + + +/* + * attention bits $$KEEP_ENDIANNESS$$ + */ +struct atten_sp_status_block +{ + uint32_t attn_bits /* 16 bit of attention signal lines */; + uint32_t attn_bits_ack /* 16 bit of attention signal ack */; + uint8_t status_block_id /* status block id */; + uint8_t reserved0 /* resreved for padding */; + uint16_t attn_bits_index /* attention bits running index */; + uint32_t reserved1 /* resreved for padding */; +}; + + +/* + * The eth aggregative context of Cstorm + */ +struct cstorm_eth_ag_context +{ + uint32_t __reserved0[10]; +}; + + +/* + * dmae command structure + */ +struct dmae_command +{ + uint32_t opcode; +#define DMAE_COMMAND_SRC (0x1<<0) /* BitField opcode Whether the source is the PCIe or the GRC. 0- The source is the PCIe 1- The source is the GRC. */ +#define DMAE_COMMAND_SRC_SHIFT 0 +#define DMAE_COMMAND_DST (0x3<<1) /* BitField opcode The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */ +#define DMAE_COMMAND_DST_SHIFT 1 +#define DMAE_COMMAND_C_DST (0x1<<3) /* BitField opcode The destination of the completion: 0-PCIe 1-GRC */ +#define DMAE_COMMAND_C_DST_SHIFT 3 +#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4) /* BitField opcode Whether to write a completion word to the completion destination: 0-Do not write a completion word 1-Write the completion word */ +#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4 +#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5) /* BitField opcode Whether to write a CRC word to the completion destination 0-Do not write a CRC word 1-Write a CRC word */ +#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5 +#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6) /* BitField opcode The CRC word should be taken from the DMAE GRC space from address 9+X, where X is the value in these bits. */ +#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6 +#define DMAE_COMMAND_ENDIANITY (0x3<<9) /* BitField opcode swapping mode. */ +#define DMAE_COMMAND_ENDIANITY_SHIFT 9 +#define DMAE_COMMAND_PORT (0x1<<11) /* BitField opcode Which network port ID to present to the PCI request interface */ +#define DMAE_COMMAND_PORT_SHIFT 11 +#define DMAE_COMMAND_CRC_RESET (0x1<<12) /* BitField opcode reset crc result */ +#define DMAE_COMMAND_CRC_RESET_SHIFT 12 +#define DMAE_COMMAND_SRC_RESET (0x1<<13) /* BitField opcode reset source address in next go */ +#define DMAE_COMMAND_SRC_RESET_SHIFT 13 +#define DMAE_COMMAND_DST_RESET (0x1<<14) /* BitField opcode reset dest address in next go */ +#define DMAE_COMMAND_DST_RESET_SHIFT 14 +#define DMAE_COMMAND_E1HVN (0x3<<15) /* BitField opcode vnic number E2 and onwards source vnic */ +#define DMAE_COMMAND_E1HVN_SHIFT 15 +#define DMAE_COMMAND_DST_VN (0x3<<17) /* BitField opcode E2 and onwards dest vnic */ +#define DMAE_COMMAND_DST_VN_SHIFT 17 +#define DMAE_COMMAND_C_FUNC (0x1<<19) /* BitField opcode E2 and onwards which function gets the completion src_vn(e1hvn)-0 dst_vn-1 */ +#define DMAE_COMMAND_C_FUNC_SHIFT 19 +#define DMAE_COMMAND_ERR_POLICY (0x3<<20) /* BitField opcode E2 and onwards what to do when theres a completion and a PCI error regular-0 error indication-1 no completion-2 */ +#define DMAE_COMMAND_ERR_POLICY_SHIFT 20 +#define DMAE_COMMAND_RESERVED0 (0x3FF<<22) /* BitField opcode */ +#define DMAE_COMMAND_RESERVED0_SHIFT 22 + uint32_t src_addr_lo /* source address low/grc address */; + uint32_t src_addr_hi /* source address hi */; + uint32_t dst_addr_lo /* dest address low/grc address */; + uint32_t dst_addr_hi /* dest address hi */; +#if defined(__BIG_ENDIAN) + uint16_t opcode_iov; +#define DMAE_COMMAND_SRC_VFID (0x3F<<0) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility source VF id */ +#define DMAE_COMMAND_SRC_VFID_SHIFT 0 +#define DMAE_COMMAND_SRC_VFPF (0x1<<6) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility selects the source function PF-0, VF-1 */ +#define DMAE_COMMAND_SRC_VFPF_SHIFT 6 +#define DMAE_COMMAND_RESERVED1 (0x1<<7) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility */ +#define DMAE_COMMAND_RESERVED1_SHIFT 7 +#define DMAE_COMMAND_DST_VFID (0x3F<<8) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility destination VF id */ +#define DMAE_COMMAND_DST_VFID_SHIFT 8 +#define DMAE_COMMAND_DST_VFPF (0x1<<14) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility selects the destination function PF-0, VF-1 */ +#define DMAE_COMMAND_DST_VFPF_SHIFT 14 +#define DMAE_COMMAND_RESERVED2 (0x1<<15) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility */ +#define DMAE_COMMAND_RESERVED2_SHIFT 15 + uint16_t len /* copy length */; +#elif defined(__LITTLE_ENDIAN) + uint16_t len /* copy length */; + uint16_t opcode_iov; +#define DMAE_COMMAND_SRC_VFID (0x3F<<0) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility source VF id */ +#define DMAE_COMMAND_SRC_VFID_SHIFT 0 +#define DMAE_COMMAND_SRC_VFPF (0x1<<6) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility selects the source function PF-0, VF-1 */ +#define DMAE_COMMAND_SRC_VFPF_SHIFT 6 +#define DMAE_COMMAND_RESERVED1 (0x1<<7) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility */ +#define DMAE_COMMAND_RESERVED1_SHIFT 7 +#define DMAE_COMMAND_DST_VFID (0x3F<<8) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility destination VF id */ +#define DMAE_COMMAND_DST_VFID_SHIFT 8 +#define DMAE_COMMAND_DST_VFPF (0x1<<14) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility selects the destination function PF-0, VF-1 */ +#define DMAE_COMMAND_DST_VFPF_SHIFT 14 +#define DMAE_COMMAND_RESERVED2 (0x1<<15) /* BitField opcode_iovE2 and onward, set to 0 for backward compatibility */ +#define DMAE_COMMAND_RESERVED2_SHIFT 15 +#endif + uint32_t comp_addr_lo /* completion address low/grc address */; + uint32_t comp_addr_hi /* completion address hi */; + uint32_t comp_val /* value to write to completion address */; + uint32_t crc32 /* crc32 result */; + uint32_t crc32_c /* crc32_c result */; +#if defined(__BIG_ENDIAN) + uint16_t crc16_c /* crc16_c result */; + uint16_t crc16 /* crc16 result */; +#elif defined(__LITTLE_ENDIAN) + uint16_t crc16 /* crc16 result */; + uint16_t crc16_c /* crc16_c result */; +#endif +#if defined(__BIG_ENDIAN) + uint16_t reserved3; + uint16_t crc_t10 /* crc_t10 result */; +#elif defined(__LITTLE_ENDIAN) + uint16_t crc_t10 /* crc_t10 result */; + uint16_t reserved3; +#endif +#if defined(__BIG_ENDIAN) + uint16_t xsum8 /* checksum8 result */; + uint16_t xsum16 /* checksum16 result */; +#elif defined(__LITTLE_ENDIAN) + uint16_t xsum16 /* checksum16 result */; + uint16_t xsum8 /* checksum8 result */; +#endif +}; + + +/* + * common data for all protocols + */ +struct doorbell_hdr +{ + uint8_t header; +#define DOORBELL_HDR_RX (0x1<<0) /* BitField header 1 for rx doorbell, 0 for tx doorbell */ +#define DOORBELL_HDR_RX_SHIFT 0 +#define DOORBELL_HDR_DB_TYPE (0x1<<1) /* BitField header 0 for normal doorbell, 1 for advertise wnd doorbell */ +#define DOORBELL_HDR_DB_TYPE_SHIFT 1 +#define DOORBELL_HDR_DPM_SIZE (0x3<<2) /* BitField header rdma tx only: DPM transaction size specifier (64/128/256/512 bytes) */ +#define DOORBELL_HDR_DPM_SIZE_SHIFT 2 +#define DOORBELL_HDR_CONN_TYPE (0xF<<4) /* BitField header connection type */ +#define DOORBELL_HDR_CONN_TYPE_SHIFT 4 +}; + +/* + * Ethernet doorbell + */ +struct eth_tx_doorbell +{ +#if defined(__BIG_ENDIAN) + uint16_t npackets /* number of data bytes that were added in the doorbell */; + uint8_t params; +#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) /* BitField params number of buffer descriptors that were added in the doorbell */ +#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) /* BitField params tx fin command flag */ +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 +#define ETH_TX_DOORBELL_SPARE (0x1<<7) /* BitField params doorbell queue spare flag */ +#define ETH_TX_DOORBELL_SPARE_SHIFT 7 + struct doorbell_hdr hdr; +#elif defined(__LITTLE_ENDIAN) + struct doorbell_hdr hdr; + uint8_t params; +#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) /* BitField params number of buffer descriptors that were added in the doorbell */ +#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) /* BitField params tx fin command flag */ +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 +#define ETH_TX_DOORBELL_SPARE (0x1<<7) /* BitField params doorbell queue spare flag */ +#define ETH_TX_DOORBELL_SPARE_SHIFT 7 + uint16_t npackets /* number of data bytes that were added in the doorbell */; +#endif +}; + + +/* + * 3 lines. status block $$KEEP_ENDIANNESS$$ + */ +struct hc_status_block_e1x +{ + uint16_t index_values[HC_SB_MAX_INDICES_E1X] /* indices reported by cstorm */; + uint16_t running_index[HC_SB_MAX_SM] /* Status Block running indices */; + uint32_t rsrv[11]; +}; + +/* + * host status block + */ +struct host_hc_status_block_e1x +{ + struct hc_status_block_e1x sb /* fast path indices */; +}; + + +/* + * 3 lines. status block $$KEEP_ENDIANNESS$$ + */ +struct hc_status_block_e2 +{ + uint16_t index_values[HC_SB_MAX_INDICES_E2] /* indices reported by cstorm */; + uint16_t running_index[HC_SB_MAX_SM] /* Status Block running indices */; + uint32_t reserved[11]; +}; + +/* + * host status block + */ +struct host_hc_status_block_e2 +{ + struct hc_status_block_e2 sb /* fast path indices */; +}; + + +/* + * 5 lines. slow-path status block $$KEEP_ENDIANNESS$$ + */ +struct hc_sp_status_block +{ + uint16_t index_values[HC_SP_SB_MAX_INDICES] /* indices reported by cstorm */; + uint16_t running_index /* Status Block running index */; + uint16_t rsrv; + uint32_t rsrv1; +}; + +/* + * host status block + */ +struct host_sp_status_block +{ + struct atten_sp_status_block atten_status_block /* attention bits section */; + struct hc_sp_status_block sp_sb /* slow path indices */; +}; + + +/* + * IGU driver acknowledgment register + */ +union igu_ack_register +{ + struct { +#if defined(__BIG_ENDIAN) + uint16_t sb_id_and_flags; +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0) /* BitField sb_id_and_flags 0-15: non default status blocks, 16: default status block */ +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0 +#define IGU_ACK_REGISTER_STORM_ID (0x7<<5) /* BitField sb_id_and_flags 0-3:storm id, 4: attn status block (valid in default sb only) */ +#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5 +#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8) /* BitField sb_id_and_flags if set, acknowledges status block index */ +#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8 +#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9) /* BitField sb_id_and_flags interrupt enable/disable/nop: use IGU_INT_xxx constants */ +#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9 +#define IGU_ACK_REGISTER_RESERVED (0x1F<<11) /* BitField sb_id_and_flags */ +#define IGU_ACK_REGISTER_RESERVED_SHIFT 11 + uint16_t status_block_index /* status block index acknowledgement */; +#elif defined(__LITTLE_ENDIAN) + uint16_t status_block_index /* status block index acknowledgement */; + uint16_t sb_id_and_flags; +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0) /* BitField sb_id_and_flags 0-15: non default status blocks, 16: default status block */ +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0 +#define IGU_ACK_REGISTER_STORM_ID (0x7<<5) /* BitField sb_id_and_flags 0-3:storm id, 4: attn status block (valid in default sb only) */ +#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5 +#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8) /* BitField sb_id_and_flags if set, acknowledges status block index */ +#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8 +#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9) /* BitField sb_id_and_flags interrupt enable/disable/nop: use IGU_INT_xxx constants */ +#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9 +#define IGU_ACK_REGISTER_RESERVED (0x1F<<11) /* BitField sb_id_and_flags */ +#define IGU_ACK_REGISTER_RESERVED_SHIFT 11 +#endif + } sb; + uint32_t raw_data; +}; + + +/* + * IGU driver acknowledgement register + */ +struct igu_backward_compatible +{ + uint32_t sb_id_and_flags; +#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF<<0) /* BitField sb_id_and_flags */ +#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0 +#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F<<16) /* BitField sb_id_and_flags */ +#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16 +#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7<<21) /* BitField sb_id_and_flags 0-3:storm id, 4: attn status block (valid in default sb only) */ +#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21 +#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1<<24) /* BitField sb_id_and_flags if set, acknowledges status block index */ +#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24 +#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3<<25) /* BitField sb_id_and_flags interrupt enable/disable/nop: use IGU_INT_xxx constants */ +#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25 +#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F<<27) /* BitField sb_id_and_flags */ +#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27 + uint32_t reserved_2; +}; + + +/* + * IGU driver acknowledgement register + */ +struct igu_regular +{ + uint32_t sb_id_and_flags; +#define IGU_REGULAR_SB_INDEX (0xFFFFF<<0) /* BitField sb_id_and_flags */ +#define IGU_REGULAR_SB_INDEX_SHIFT 0 +#define IGU_REGULAR_RESERVED0 (0x1<<20) /* BitField sb_id_and_flags */ +#define IGU_REGULAR_RESERVED0_SHIFT 20 +#define IGU_REGULAR_SEGMENT_ACCESS (0x7<<21) /* BitField sb_id_and_flags 21-23 (use enum igu_seg_access) */ +#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21 +#define IGU_REGULAR_BUPDATE (0x1<<24) /* BitField sb_id_and_flags */ +#define IGU_REGULAR_BUPDATE_SHIFT 24 +#define IGU_REGULAR_ENABLE_INT (0x3<<25) /* BitField sb_id_and_flags interrupt enable/disable/nop (use enum igu_int_cmd) */ +#define IGU_REGULAR_ENABLE_INT_SHIFT 25 +#define IGU_REGULAR_RESERVED_1 (0x1<<27) /* BitField sb_id_and_flags */ +#define IGU_REGULAR_RESERVED_1_SHIFT 27 +#define IGU_REGULAR_CLEANUP_TYPE (0x3<<28) /* BitField sb_id_and_flags */ +#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28 +#define IGU_REGULAR_CLEANUP_SET (0x1<<30) /* BitField sb_id_and_flags */ +#define IGU_REGULAR_CLEANUP_SET_SHIFT 30 +#define IGU_REGULAR_BCLEANUP (0x1<<31) /* BitField sb_id_and_flags */ +#define IGU_REGULAR_BCLEANUP_SHIFT 31 + uint32_t reserved_2; +}; + +/* + * IGU driver acknowledgement register + */ +union igu_consprod_reg +{ + struct igu_regular regular; + struct igu_backward_compatible backward_compatible; +}; + + +/* + * Igu control commands + */ +enum igu_ctrl_cmd +{ + IGU_CTRL_CMD_TYPE_RD, + IGU_CTRL_CMD_TYPE_WR, + MAX_IGU_CTRL_CMD}; + + +/* + * Control register for the IGU command register + */ +struct igu_ctrl_reg +{ + uint32_t ctrl_data; +#define IGU_CTRL_REG_ADDRESS (0xFFF<<0) /* BitField ctrl_data */ +#define IGU_CTRL_REG_ADDRESS_SHIFT 0 +#define IGU_CTRL_REG_FID (0x7F<<12) /* BitField ctrl_data */ +#define IGU_CTRL_REG_FID_SHIFT 12 +#define IGU_CTRL_REG_RESERVED (0x1<<19) /* BitField ctrl_data */ +#define IGU_CTRL_REG_RESERVED_SHIFT 19 +#define IGU_CTRL_REG_TYPE (0x1<<20) /* BitField ctrl_data (use enum igu_ctrl_cmd) */ +#define IGU_CTRL_REG_TYPE_SHIFT 20 +#define IGU_CTRL_REG_UNUSED (0x7FF<<21) /* BitField ctrl_data */ +#define IGU_CTRL_REG_UNUSED_SHIFT 21 +}; + + +/* + * Igu interrupt command + */ +enum igu_int_cmd +{ + IGU_INT_ENABLE, + IGU_INT_DISABLE, + IGU_INT_NOP, + IGU_INT_NOP2, + MAX_IGU_INT_CMD}; + + +/* + * Igu segments + */ +enum igu_seg_access +{ + IGU_SEG_ACCESS_NORM, + IGU_SEG_ACCESS_DEF, + IGU_SEG_ACCESS_ATTN, + MAX_IGU_SEG_ACCESS}; + + +/* + * Parser parsing flags field + */ +struct parsing_flags +{ + uint16_t flags; +#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0) /* BitField flagscontext flags 0=non-unicast, 1=unicast (use enum prs_flags_eth_addr_type) */ +#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0 +#define PARSING_FLAGS_VLAN (0x1<<1) /* BitField flagscontext flags 0 or 1 */ +#define PARSING_FLAGS_VLAN_SHIFT 1 +#define PARSING_FLAGS_EXTRA_VLAN (0x1<<2) /* BitField flagscontext flags 0 or 1 */ +#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2 +#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3) /* BitField flagscontext flags 0=un-known, 1=Ipv4, 2=Ipv6,3=LLC SNAP un-known. LLC SNAP here refers only to LLC/SNAP packets that do not have Ipv4 or Ipv6 above them. Ipv4 and Ipv6 indications are even if they are over LLC/SNAP and not directly over Ethernet (use enum prs_flags_over_eth) */ +#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3 +#define PARSING_FLAGS_IP_OPTIONS (0x1<<5) /* BitField flagscontext flags 0=no IP options / extension headers. 1=IP options / extension header exist */ +#define PARSING_FLAGS_IP_OPTIONS_SHIFT 5 +#define PARSING_FLAGS_FRAGMENTATION_STATUS (0x1<<6) /* BitField flagscontext flags 0=non-fragmented, 1=fragmented */ +#define PARSING_FLAGS_FRAGMENTATION_STATUS_SHIFT 6 +#define PARSING_FLAGS_OVER_IP_PROTOCOL (0x3<<7) /* BitField flagscontext flags 0=un-known, 1=TCP, 2=UDP (use enum prs_flags_over_ip) */ +#define PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT 7 +#define PARSING_FLAGS_PURE_ACK_INDICATION (0x1<<9) /* BitField flagscontext flags 0=packet with data, 1=pure-ACK (use enum prs_flags_ack_type) */ +#define PARSING_FLAGS_PURE_ACK_INDICATION_SHIFT 9 +#define PARSING_FLAGS_TCP_OPTIONS_EXIST (0x1<<10) /* BitField flagscontext flags 0=no TCP options. 1=TCP options */ +#define PARSING_FLAGS_TCP_OPTIONS_EXIST_SHIFT 10 +#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG (0x1<<11) /* BitField flagscontext flags According to the TCP header options parsing */ +#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG_SHIFT 11 +#define PARSING_FLAGS_CONNECTION_MATCH (0x1<<12) /* BitField flagscontext flags connection match in searcher indication */ +#define PARSING_FLAGS_CONNECTION_MATCH_SHIFT 12 +#define PARSING_FLAGS_LLC_SNAP (0x1<<13) /* BitField flagscontext flags LLC SNAP indication */ +#define PARSING_FLAGS_LLC_SNAP_SHIFT 13 +#define PARSING_FLAGS_RESERVED0 (0x3<<14) /* BitField flagscontext flags */ +#define PARSING_FLAGS_RESERVED0_SHIFT 14 +}; + + +/* + * Parsing flags for TCP ACK type + */ +enum prs_flags_ack_type +{ + PRS_FLAG_PUREACK_PIGGY, + PRS_FLAG_PUREACK_PURE, + MAX_PRS_FLAGS_ACK_TYPE}; + + +/* + * Parsing flags for Ethernet address type + */ +enum prs_flags_eth_addr_type +{ + PRS_FLAG_ETHTYPE_NON_UNICAST, + PRS_FLAG_ETHTYPE_UNICAST, + MAX_PRS_FLAGS_ETH_ADDR_TYPE}; + + +/* + * Parsing flags for over-ethernet protocol + */ +enum prs_flags_over_eth +{ + PRS_FLAG_OVERETH_UNKNOWN, + PRS_FLAG_OVERETH_IPV4, + PRS_FLAG_OVERETH_IPV6, + PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN, + MAX_PRS_FLAGS_OVER_ETH}; + + +/* + * Parsing flags for over-IP protocol + */ +enum prs_flags_over_ip +{ + PRS_FLAG_OVERIP_UNKNOWN, + PRS_FLAG_OVERIP_TCP, + PRS_FLAG_OVERIP_UDP, + MAX_PRS_FLAGS_OVER_IP}; + + +/* + * SDM operation gen command (generate aggregative interrupt) + */ +struct sdm_op_gen +{ + uint32_t command; +#define SDM_OP_GEN_COMP_PARAM (0x1F<<0) /* BitField commandcomp_param and comp_type thread ID/aggr interrupt number/counter depending on the completion type */ +#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 +#define SDM_OP_GEN_COMP_TYPE (0x7<<5) /* BitField commandcomp_param and comp_type Direct messages to CM / PCI switch are not supported in operation_gen completion */ +#define SDM_OP_GEN_COMP_TYPE_SHIFT 5 +#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8) /* BitField commandcomp_param and comp_type bit index in aggregated interrupt vector */ +#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8 +#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16) /* BitField commandcomp_param and comp_type */ +#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16 +#define SDM_OP_GEN_RESERVED (0x7FFF<<17) /* BitField commandcomp_param and comp_type */ +#define SDM_OP_GEN_RESERVED_SHIFT 17 +}; + + +/* + * Timers connection context + */ +struct timers_block_context +{ + uint32_t __reserved_0 /* data of client 0 of the timers block*/; + uint32_t __reserved_1 /* data of client 1 of the timers block*/; + uint32_t __reserved_2 /* data of client 2 of the timers block*/; + uint32_t flags; +#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0) /* BitField flagscontext flags number of active timers running */ +#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0 +#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2) /* BitField flagscontext flags flag: is connection valid (should be set by driver to 1 in toe/iscsi connections) */ +#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2 +#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3) /* BitField flagscontext flags */ +#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3 +}; + + +/* + * The eth aggregative context of Tstorm + */ +struct tstorm_eth_ag_context +{ + uint32_t __reserved0[14]; +}; + + +/* + * The eth aggregative context of Ustorm + */ +struct ustorm_eth_ag_context +{ + uint32_t __reserved0; +#if defined(__BIG_ENDIAN) + uint8_t cdu_usage /* Will be used by the CDU for validation of the CID/connection type on doorbells. */; + uint8_t __reserved2; + uint16_t __reserved1; +#elif defined(__LITTLE_ENDIAN) + uint16_t __reserved1; + uint8_t __reserved2; + uint8_t cdu_usage /* Will be used by the CDU for validation of the CID/connection type on doorbells. */; +#endif + uint32_t __reserved3[6]; +}; + + +/* + * The eth aggregative context of Xstorm + */ +struct xstorm_eth_ag_context +{ + uint32_t reserved0; +#if defined(__BIG_ENDIAN) + uint8_t cdu_reserved /* Used by the CDU for validation and debugging */; + uint8_t reserved2; + uint16_t reserved1; +#elif defined(__LITTLE_ENDIAN) + uint16_t reserved1; + uint8_t reserved2; + uint8_t cdu_reserved /* Used by the CDU for validation and debugging */; +#endif + uint32_t reserved3[30]; +}; + + +/* + * doorbell message sent to the chip + */ +struct doorbell +{ +#if defined(__BIG_ENDIAN) + uint16_t zero_fill2 /* driver must zero this field! */; + uint8_t zero_fill1 /* driver must zero this field! */; + struct doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct doorbell_hdr header; + uint8_t zero_fill1 /* driver must zero this field! */; + uint16_t zero_fill2 /* driver must zero this field! */; +#endif +}; + + +/* + * doorbell message sent to the chip + */ +struct doorbell_set_prod +{ +#if defined(__BIG_ENDIAN) + uint16_t prod /* Producer index to be set */; + uint8_t zero_fill1 /* driver must zero this field! */; + struct doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct doorbell_hdr header; + uint8_t zero_fill1 /* driver must zero this field! */; + uint16_t prod /* Producer index to be set */; +#endif +}; + + +struct regpair +{ + uint32_t lo /* low word for reg-pair */; + uint32_t hi /* high word for reg-pair */; +}; + + +struct regpair_native +{ + uint32_t lo /* low word for reg-pair */; + uint32_t hi /* high word for reg-pair */; +}; + + +/* + * Classify rule opcodes in E2/E3 + */ +enum classify_rule +{ + CLASSIFY_RULE_OPCODE_MAC /* Add/remove a MAC address */, + CLASSIFY_RULE_OPCODE_VLAN /* Add/remove a VLAN */, + CLASSIFY_RULE_OPCODE_PAIR /* Add/remove a MAC-VLAN pair */, + MAX_CLASSIFY_RULE}; + + +/* + * Classify rule types in E2/E3 + */ +enum classify_rule_action_type +{ + CLASSIFY_RULE_REMOVE, + CLASSIFY_RULE_ADD, + MAX_CLASSIFY_RULE_ACTION_TYPE}; + + +/* + * client init ramrod data $$KEEP_ENDIANNESS$$ + */ +struct client_init_general_data +{ + uint8_t client_id /* client_id */; + uint8_t statistics_counter_id /* statistics counter id */; + uint8_t statistics_en_flg /* statistics en flg */; + uint8_t is_fcoe_flg /* is this an fcoe connection. (1 bit is used) */; + uint8_t activate_flg /* if 0 - the client is deactivate else the client is activate client (1 bit is used) */; + uint8_t sp_client_id /* the slow path rings client Id. */; + uint16_t mtu /* Host MTU from client config */; + uint8_t statistics_zero_flg /* if set FW will reset the statistic counter of this client */; + uint8_t func_id /* PCI function ID (0-71) */; + uint8_t cos /* The connection cos, if applicable */; + uint8_t traffic_type; + uint32_t reserved0; +}; + + +/* + * client init rx data $$KEEP_ENDIANNESS$$ + */ +struct client_init_rx_data +{ + uint8_t tpa_en; +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0) /* BitField tpa_entpa_enable tpa enable flg ipv4 */ +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0 +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1) /* BitField tpa_entpa_enable tpa enable flg ipv6 */ +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1 +#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2) /* BitField tpa_entpa_enable tpa mode (LRO or GRO) (use enum tpa_mode) */ +#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2 +#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3) /* BitField tpa_entpa_enable */ +#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3 + uint8_t vmqueue_mode_en_flg /* If set, working in VMQueue mode (always consume one sge) */; + uint8_t extra_data_over_sgl_en_flg /* if set, put over sgl data from end of input message */; + uint8_t cache_line_alignment_log_size /* The log size of cache line alignment in bytes. Must be a power of 2. */; + uint8_t enable_dynamic_hc /* If set, dynamic HC is enabled */; + uint8_t max_sges_for_packet /* The maximal number of SGEs that can be used for one packet. depends on MTU and SGE size. must be 0 if SGEs are disabled */; + uint8_t client_qzone_id /* used in E2 only, to specify the HW queue zone ID used for this client rx producers */; + uint8_t drop_ip_cs_err_flg /* If set, this client drops packets with IP checksum error */; + uint8_t drop_tcp_cs_err_flg /* If set, this client drops packets with TCP checksum error */; + uint8_t drop_ttl0_flg /* If set, this client drops packets with TTL=0 */; + uint8_t drop_udp_cs_err_flg /* If set, this client drops packets with UDP checksum error */; + uint8_t inner_vlan_removal_enable_flg /* If set, inner VLAN removal is enabled for this client */; + uint8_t outer_vlan_removal_enable_flg /* If set, outer VLAN removal is enabled for this client */; + uint8_t status_block_id /* rx status block id */; + uint8_t rx_sb_index_number /* status block indices */; + uint8_t dont_verify_rings_pause_thr_flg /* If set, the rings pause thresholds will not be verified by firmware. */; + uint8_t max_tpa_queues /* maximal TPA queues allowed for this client */; + uint8_t silent_vlan_removal_flg /* if set, and the vlan is equal to requested vlan according to mask, the vlan will be remove without notifying the driver */; + uint16_t max_bytes_on_bd /* Maximum bytes that can be placed on a BD. The BD allocated size should include 2 more bytes (ip alignment) and alignment size (in case the address is not aligned) */; + uint16_t sge_buff_size /* Size of the buffers pointed by SGEs */; + uint8_t approx_mcast_engine_id /* In Everest2, if is_approx_mcast is set, this field specified which approximate multicast engine is associate with this client */; + uint8_t rss_engine_id /* In Everest2, if rss_mode is set, this field specified which RSS engine is associate with this client */; + struct regpair bd_page_base /* BD page base address at the host */; + struct regpair sge_page_base /* SGE page base address at the host */; + struct regpair cqe_page_base /* Completion queue base address */; + uint8_t is_leading_rss; + uint8_t is_approx_mcast; + uint16_t max_agg_size /* maximal size for the aggregated TPA packets, reprted by the host */; + uint16_t state; +#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0) /* BitField staterx filters state drop all unicast packets */ +#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0 +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1) /* BitField staterx filters state accept all unicast packets (subject to vlan) */ +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1 +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2) /* BitField staterx filters state accept all unmatched unicast packets (subject to vlan) */ +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3) /* BitField staterx filters state drop all multicast packets */ +#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3 +#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4) /* BitField staterx filters state accept all multicast packets (subject to vlan) */ +#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4 +#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5) /* BitField staterx filters state accept all broadcast packets (subject to vlan) */ +#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5 +#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6) /* BitField staterx filters state accept packets matched only by MAC (without checking vlan) */ +#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6 +#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7) /* BitField staterx filters state */ +#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7 + uint16_t cqe_pause_thr_low /* number of remaining cqes under which, we send pause message */; + uint16_t cqe_pause_thr_high /* number of remaining cqes above which, we send un-pause message */; + uint16_t bd_pause_thr_low /* number of remaining bds under which, we send pause message */; + uint16_t bd_pause_thr_high /* number of remaining bds above which, we send un-pause message */; + uint16_t sge_pause_thr_low /* number of remaining sges under which, we send pause message */; + uint16_t sge_pause_thr_high /* number of remaining sges above which, we send un-pause message */; + uint16_t rx_cos_mask /* the bits that will be set on pfc/ safc paket whith will be genratet when this ring is full. for regular flow control set this to 1 */; + uint16_t silent_vlan_value /* The vlan to compare, in case, silent vlan is set */; + uint16_t silent_vlan_mask /* The vlan mask, in case, silent vlan is set */; + uint32_t reserved6[2]; +}; + +/* + * client init tx data $$KEEP_ENDIANNESS$$ + */ +struct client_init_tx_data +{ + uint8_t enforce_security_flg /* if set, security checks will be made for this connection */; + uint8_t tx_status_block_id /* the number of status block to update */; + uint8_t tx_sb_index_number /* the index to use inside the status block */; + uint8_t tss_leading_client_id /* client ID of the leading TSS client, for TX classification source knock out */; + uint8_t tx_switching_flg /* if set, tx switching will be done to packets on this connection */; + uint8_t anti_spoofing_flg /* if set, anti spoofing check will be done to packets on this connection */; + uint16_t default_vlan /* default vlan tag (id+pri). (valid if default_vlan_flg is set) */; + struct regpair tx_bd_page_base /* BD page base address at the host for TxBdCons */; + uint16_t state; +#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0) /* BitField statetx filters state accept all unicast packets (subject to vlan) */ +#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0 +#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1) /* BitField statetx filters state accept all multicast packets (subject to vlan) */ +#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1 +#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2) /* BitField statetx filters state accept all broadcast packets (subject to vlan) */ +#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2 +#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3) /* BitField statetx filters state accept packets matched only by MAC (without checking vlan) */ +#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3 +#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4) /* BitField statetx filters state */ +#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4 + uint8_t default_vlan_flg /* is default vlan valid for this client. */; + uint8_t force_default_pri_flg /* if set, force default priority */; + uint8_t tunnel_lso_inc_ip_id /* In case of LSO over IPv4 tunnel, whether to increment IP ID on external IP header or internal IP header */; + uint8_t refuse_outband_vlan_flg /* if set, the FW will not add outband vlan on packet (even if will exist on BD). */; + uint8_t tunnel_non_lso_pcsum_location /* In case of non-Lso encapsulated packets with L4 checksum offload, the pseudo checksum location - on packet or on BD. */; + uint8_t tunnel_non_lso_outer_ip_csum_location /* In case of non-Lso encapsulated packets with outer L3 ip checksum offload, the pseudo checksum location - on packet or on BD. */; +}; + +/* + * client init ramrod data $$KEEP_ENDIANNESS$$ + */ +struct client_init_ramrod_data +{ + struct client_init_general_data general /* client init general data */; + struct client_init_rx_data rx /* client init rx data */; + struct client_init_tx_data tx /* client init tx data */; +}; + + +/* + * client update ramrod data $$KEEP_ENDIANNESS$$ + */ +struct client_update_ramrod_data +{ + uint8_t client_id /* the client to update */; + uint8_t func_id /* PCI function ID this client belongs to (0-71) */; + uint8_t inner_vlan_removal_enable_flg /* If set, inner VLAN removal is enabled for this client, will be change according to change flag */; + uint8_t inner_vlan_removal_change_flg /* If set, inner VLAN removal flag will be set according to the enable flag */; + uint8_t outer_vlan_removal_enable_flg /* If set, outer VLAN removal is enabled for this client, will be change according to change flag */; + uint8_t outer_vlan_removal_change_flg /* If set, outer VLAN removal flag will be set according to the enable flag */; + uint8_t anti_spoofing_enable_flg /* If set, anti spoofing is enabled for this client, will be change according to change flag */; + uint8_t anti_spoofing_change_flg /* If set, anti spoofing flag will be set according to anti spoofing flag */; + uint8_t activate_flg /* if 0 - the client is deactivate else the client is activate client (1 bit is used) */; + uint8_t activate_change_flg /* If set, activate_flg will be checked */; + uint16_t default_vlan /* default vlan tag (id+pri). (valid if default_vlan_flg is set) */; + uint8_t default_vlan_enable_flg; + uint8_t default_vlan_change_flg; + uint16_t silent_vlan_value /* The vlan to compare, in case, silent vlan is set */; + uint16_t silent_vlan_mask /* The vlan mask, in case, silent vlan is set */; + uint8_t silent_vlan_removal_flg /* if set, and the vlan is equal to requested vlan according to mask, the vlan will be remove without notifying the driver */; + uint8_t silent_vlan_change_flg; + uint8_t refuse_outband_vlan_flg /* If set, the FW will not add outband vlan on packet (even if will exist on BD). */; + uint8_t refuse_outband_vlan_change_flg /* If set, refuse_outband_vlan_flg will be updated. */; + uint8_t tx_switching_flg /* If set, tx switching will be done to packets on this connection. */; + uint8_t tx_switching_change_flg /* If set, tx_switching_flg will be updated. */; + uint32_t reserved1; + uint32_t echo /* echo value to be sent to driver on event ring */; +}; + + +/* + * The eth storm context of Cstorm + */ +struct cstorm_eth_st_context +{ + uint32_t __reserved0[4]; +}; + + +struct double_regpair +{ + uint32_t regpair0_lo /* low word for reg-pair0 */; + uint32_t regpair0_hi /* high word for reg-pair0 */; + uint32_t regpair1_lo /* low word for reg-pair1 */; + uint32_t regpair1_hi /* high word for reg-pair1 */; +}; + + +/* + * Ethernet address types used in ethernet tx BDs + */ +enum eth_addr_type +{ + UNKNOWN_ADDRESS, + UNICAST_ADDRESS, + MULTICAST_ADDRESS, + BROADCAST_ADDRESS, + MAX_ETH_ADDR_TYPE +}; + + +/* + * $$KEEP_ENDIANNESS$$ + */ +struct eth_classify_cmd_header +{ + uint8_t cmd_general_data; +#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0) /* BitField cmd_general_data should this cmd be applied for Rx */ +#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0 +#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1) /* BitField cmd_general_data should this cmd be applied for Tx */ +#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1 +#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2) /* BitField cmd_general_data command opcode for MAC/VLAN/PAIR (use enum classify_rule) */ +#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2 +#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4) /* BitField cmd_general_data (use enum classify_rule_action_type) */ +#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4 +#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5) /* BitField cmd_general_data */ +#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5 + uint8_t func_id /* the function id */; + uint8_t client_id; + uint8_t reserved1; +}; + + +/* + * header for eth classification config ramrod $$KEEP_ENDIANNESS$$ + */ +struct eth_classify_header +{ + uint8_t rule_cnt /* number of rules in classification config ramrod */; + uint8_t reserved0; + uint16_t reserved1; + uint32_t echo /* echo value to be sent to driver on event ring */; +}; + + +/* + * Command for adding/removing a MAC classification rule $$KEEP_ENDIANNESS$$ + */ +struct eth_classify_mac_cmd +{ + struct eth_classify_cmd_header header; + uint16_t reserved0; + uint16_t inner_mac; + uint16_t mac_lsb; + uint16_t mac_mid; + uint16_t mac_msb; + uint16_t reserved1; +}; + + +/* + * Command for adding/removing a MAC-VLAN pair classification rule $$KEEP_ENDIANNESS$$ + */ +struct eth_classify_pair_cmd +{ + struct eth_classify_cmd_header header; + uint16_t reserved0; + uint16_t inner_mac; + uint16_t mac_lsb; + uint16_t mac_mid; + uint16_t mac_msb; + uint16_t vlan; +}; + + +/* + * Command for adding/removing a VLAN classification rule $$KEEP_ENDIANNESS$$ + */ +struct eth_classify_vlan_cmd +{ + struct eth_classify_cmd_header header; + uint32_t reserved0; + uint32_t reserved1; + uint16_t reserved2; + uint16_t vlan; +}; + +/* + * union for eth classification rule $$KEEP_ENDIANNESS$$ + */ +union eth_classify_rule_cmd +{ + struct eth_classify_mac_cmd mac; + struct eth_classify_vlan_cmd vlan; + struct eth_classify_pair_cmd pair; +}; + +/* + * parameters for eth classification configuration ramrod $$KEEP_ENDIANNESS$$ + */ +struct eth_classify_rules_ramrod_data +{ + struct eth_classify_header header; + union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; +}; + + +/* + * The data contain client ID need to the ramrod $$KEEP_ENDIANNESS$$ + */ +struct eth_common_ramrod_data +{ + uint32_t client_id /* id of this client. (5 bits are used) */; + uint32_t reserved1; +}; + + +/* + * The eth storm context of Ustorm + */ +struct ustorm_eth_st_context +{ + uint32_t reserved0[52]; +}; + +/* + * The eth storm context of Tstorm + */ +struct tstorm_eth_st_context +{ + uint32_t __reserved0[28]; +}; + +/* + * The eth storm context of Xstorm + */ +struct xstorm_eth_st_context +{ + uint32_t reserved0[60]; +}; + +/* + * Ethernet connection context + */ +struct eth_context +{ + struct ustorm_eth_st_context ustorm_st_context /* Ustorm storm context */; + struct tstorm_eth_st_context tstorm_st_context /* Tstorm storm context */; + struct xstorm_eth_ag_context xstorm_ag_context /* Xstorm aggregative context */; + struct tstorm_eth_ag_context tstorm_ag_context /* Tstorm aggregative context */; + struct cstorm_eth_ag_context cstorm_ag_context /* Cstorm aggregative context */; + struct ustorm_eth_ag_context ustorm_ag_context /* Ustorm aggregative context */; + struct timers_block_context timers_context /* Timers block context */; + struct xstorm_eth_st_context xstorm_st_context /* Xstorm storm context */; + struct cstorm_eth_st_context cstorm_st_context /* Cstorm storm context */; +}; + + +/* + * union for sgl and raw data. + */ +union eth_sgl_or_raw_data +{ + uint16_t sgl[8] /* Scatter-gather list of SGEs used by this packet. This list includes the indices of the SGEs. */; + uint32_t raw_data[4] /* raw data from Tstorm to the driver. */; +}; + +/* + * eth FP end aggregation CQE parameters struct $$KEEP_ENDIANNESS$$ + */ +struct eth_end_agg_rx_cqe +{ + uint8_t type_error_flags; +#define ETH_END_AGG_RX_CQE_TYPE (0x3<<0) /* BitField type_error_flags (use enum eth_rx_cqe_type) */ +#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0 +#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2) /* BitField type_error_flags (use enum eth_rx_fp_sel) */ +#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2 +#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3) /* BitField type_error_flags */ +#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3 + uint8_t reserved1; + uint8_t queue_index /* The aggregation queue index of this packet */; + uint8_t reserved2; + uint32_t timestamp_delta /* timestamp delta between first packet to last packet in aggregation */; + uint16_t num_of_coalesced_segs /* Num of coalesced segments. */; + uint16_t pkt_len /* Packet length */; + uint8_t pure_ack_count /* Number of pure acks coalesced. */; + uint8_t reserved3; + uint16_t reserved4; + union eth_sgl_or_raw_data sgl_or_raw_data /* union for sgl and raw data. */; + uint32_t reserved5[8]; +}; + + +/* + * regular eth FP CQE parameters struct $$KEEP_ENDIANNESS$$ + */ +struct eth_fast_path_rx_cqe +{ + uint8_t type_error_flags; +#define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0) /* BitField type_error_flags (use enum eth_rx_cqe_type) */ +#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2) /* BitField type_error_flags (use enum eth_rx_fp_sel) */ +#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2 +#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3) /* BitField type_error_flags Physical layer errors */ +#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3 +#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4) /* BitField type_error_flags IP checksum error */ +#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4 +#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5) /* BitField type_error_flags TCP/UDP checksum error */ +#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5 +#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) /* BitField type_error_flags */ +#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6 + uint8_t status_flags; +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) /* BitField status_flags (use enum eth_rss_hash_type) */ +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3) /* BitField status_flags RSS hashing on/off */ +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3 +#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4) /* BitField status_flags if set to 1, this is a broadcast packet */ +#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4 +#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5) /* BitField status_flags if set to 1, the MAC address was matched in the tstorm CAM search */ +#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5 +#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6) /* BitField status_flags IP checksum validation was not performed (if packet is not IPv4) */ +#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6 +#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7) /* BitField status_flags TCP/UDP checksum validation was not performed (if packet is not TCP/UDP or IPv6 extheaders exist) */ +#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7 + uint8_t queue_index /* The aggregation queue index of this packet */; + uint8_t placement_offset /* Placement offset from the start of the BD, in bytes */; + uint32_t rss_hash_result /* RSS toeplitz hash result */; + uint16_t vlan_tag /* Ethernet VLAN tag field */; + uint16_t pkt_len_or_gro_seg_len /* Packet length (for non-TPA CQE) or GRO Segment Length (for TPA in GRO Mode) otherwise 0 */; + uint16_t len_on_bd /* Number of bytes placed on the BD */; + struct parsing_flags pars_flags; + union eth_sgl_or_raw_data sgl_or_raw_data /* union for sgl and raw data. */; + uint32_t reserved1[8]; +}; + + +/* + * Command for setting classification flags for a client $$KEEP_ENDIANNESS$$ + */ +struct eth_filter_rules_cmd +{ + uint8_t cmd_general_data; +#define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0) /* BitField cmd_general_data should this cmd be applied for Rx */ +#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0 +#define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1) /* BitField cmd_general_data should this cmd be applied for Tx */ +#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1 +#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2) /* BitField cmd_general_data */ +#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2 + uint8_t func_id /* the function id */; + uint8_t client_id /* the client id */; + uint8_t reserved1; + uint16_t state; +#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0) /* BitField state drop all unicast packets */ +#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0 +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1) /* BitField state accept all unicast packets (subject to vlan) */ +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2) /* BitField state accept all unmatched unicast packets */ +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3) /* BitField state drop all multicast packets */ +#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3 +#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4) /* BitField state accept all multicast packets (subject to vlan) */ +#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5) /* BitField state accept all broadcast packets (subject to vlan) */ +#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5 +#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6) /* BitField state accept packets matched only by MAC (without checking vlan) */ +#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6 +#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7) /* BitField state */ +#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7 + uint16_t reserved3; + struct regpair reserved4; +}; + + +/* + * parameters for eth classification filters ramrod $$KEEP_ENDIANNESS$$ + */ +struct eth_filter_rules_ramrod_data +{ + struct eth_classify_header header; + struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT]; +}; + + +/* + * parameters for eth classification configuration ramrod $$KEEP_ENDIANNESS$$ + */ +struct eth_general_rules_ramrod_data +{ + struct eth_classify_header header; + union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; +}; + + +/* + * The data for Halt ramrod + */ +struct eth_halt_ramrod_data +{ + uint32_t client_id /* id of this client. (5 bits are used) */; + uint32_t reserved0; +}; + + +/* + * destination and source mac address. + */ +struct eth_mac_addresses +{ +#if defined(__BIG_ENDIAN) + uint16_t dst_mid /* destination mac address 16 middle bits */; + uint16_t dst_lo /* destination mac address 16 low bits */; +#elif defined(__LITTLE_ENDIAN) + uint16_t dst_lo /* destination mac address 16 low bits */; + uint16_t dst_mid /* destination mac address 16 middle bits */; +#endif +#if defined(__BIG_ENDIAN) + uint16_t src_lo /* source mac address 16 low bits */; + uint16_t dst_hi /* destination mac address 16 high bits */; +#elif defined(__LITTLE_ENDIAN) + uint16_t dst_hi /* destination mac address 16 high bits */; + uint16_t src_lo /* source mac address 16 low bits */; +#endif +#if defined(__BIG_ENDIAN) + uint16_t src_hi /* source mac address 16 high bits */; + uint16_t src_mid /* source mac address 16 middle bits */; +#elif defined(__LITTLE_ENDIAN) + uint16_t src_mid /* source mac address 16 middle bits */; + uint16_t src_hi /* source mac address 16 high bits */; +#endif +}; + + +/* + * tunneling related data. + */ +struct eth_tunnel_data +{ +#if defined(__BIG_ENDIAN) + uint16_t dst_mid /* destination mac address 16 middle bits */; + uint16_t dst_lo /* destination mac address 16 low bits */; +#elif defined(__LITTLE_ENDIAN) + uint16_t dst_lo /* destination mac address 16 low bits */; + uint16_t dst_mid /* destination mac address 16 middle bits */; +#endif +#if defined(__BIG_ENDIAN) + uint16_t fw_ip_hdr_csum /* Fw Ip header checksum (with ALL ip header fields) for the outer IP header */; + uint16_t dst_hi /* destination mac address 16 high bits */; +#elif defined(__LITTLE_ENDIAN) + uint16_t dst_hi /* destination mac address 16 high bits */; + uint16_t fw_ip_hdr_csum /* Fw Ip header checksum (with ALL ip header fields) for the outer IP header */; +#endif +#if defined(__BIG_ENDIAN) + uint8_t flags; +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0) /* BitField flags Set in case outer IP header is ipV6 */ +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0 +#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1) /* BitField flags Should be set with 0 */ +#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1 + uint8_t ip_hdr_start_inner_w /* Inner IP header offset in WORDs (16-bit) from start of packet */; + uint16_t pseudo_csum /* Pseudo checksum with length field=0 */; +#elif defined(__LITTLE_ENDIAN) + uint16_t pseudo_csum /* Pseudo checksum with length field=0 */; + uint8_t ip_hdr_start_inner_w /* Inner IP header offset in WORDs (16-bit) from start of packet */; + uint8_t flags; +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0) /* BitField flags Set in case outer IP header is ipV6 */ +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0 +#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1) /* BitField flags Should be set with 0 */ +#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1 +#endif +}; + +/* + * union for mac addresses and for tunneling data. considered as tunneling data only if (tunnel_exist == 1). + */ +union eth_mac_addr_or_tunnel_data +{ + struct eth_mac_addresses mac_addr /* destination and source mac addresses. */; + struct eth_tunnel_data tunnel_data /* tunneling related data. */; +}; + + +/* + * Command for setting multicast classification for a client $$KEEP_ENDIANNESS$$ + */ +struct eth_multicast_rules_cmd +{ + uint8_t cmd_general_data; +#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0) /* BitField cmd_general_data should this cmd be applied for Rx */ +#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0 +#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1) /* BitField cmd_general_data should this cmd be applied for Tx */ +#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1 +#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2) /* BitField cmd_general_data 1 for add rule, 0 for remove rule */ +#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2 +#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3) /* BitField cmd_general_data */ +#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3 + uint8_t func_id /* the function id */; + uint8_t bin_id /* the bin to add this function to (0-255) */; + uint8_t engine_id /* the approximate multicast engine id */; + uint32_t reserved2; + struct regpair reserved3; +}; + + +/* + * parameters for multicast classification ramrod $$KEEP_ENDIANNESS$$ + */ +struct eth_multicast_rules_ramrod_data +{ + struct eth_classify_header header; + struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT]; +}; + + +/* + * Place holder for ramrods protocol specific data + */ +struct ramrod_data +{ + uint32_t data_lo; + uint32_t data_hi; +}; + +/* + * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits) + */ +union eth_ramrod_data +{ + struct ramrod_data general; +}; + + +/* + * RSS toeplitz hash type, as reported in CQE + */ +enum eth_rss_hash_type +{ + DEFAULT_HASH_TYPE, + IPV4_HASH_TYPE, + TCP_IPV4_HASH_TYPE, + IPV6_HASH_TYPE, + TCP_IPV6_HASH_TYPE, + VLAN_PRI_HASH_TYPE, + E1HOV_PRI_HASH_TYPE, + DSCP_HASH_TYPE, + MAX_ETH_RSS_HASH_TYPE}; + + +/* + * Ethernet RSS mode + */ +enum eth_rss_mode +{ + ETH_RSS_MODE_DISABLED, + ETH_RSS_MODE_ESX51 /* RSS mode for Vmware ESX 5.1 (Only do RSS if packet is UDP with dst port that matches the UDP 4-tuble Destination Port mask and value) */, + ETH_RSS_MODE_REGULAR /* Regular (ndis-like) RSS */, + ETH_RSS_MODE_VLAN_PRI /* RSS based on inner-vlan priority field */, + ETH_RSS_MODE_E1HOV_PRI /* RSS based on outer-vlan priority field */, + ETH_RSS_MODE_IP_DSCP /* RSS based on IPv4 DSCP field */, + MAX_ETH_RSS_MODE}; + + +/* + * parameters for RSS update ramrod (E2) $$KEEP_ENDIANNESS$$ + */ +struct eth_rss_update_ramrod_data +{ + uint8_t rss_engine_id; + uint8_t capabilities; +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV4 2-tupple capability */ +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV4 4-tupple capability for TCP */ +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV4 4-tupple capability for UDP */ +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV6 2-tupple capability */ +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV6 4-tupple capability for TCP */ +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) /* BitField capabilitiesFunction RSS capabilities configuration of the IpV6 4-tupple capability for UDP */ +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6) /* BitField capabilitiesFunction RSS capabilities configuration of the 5-tupple capability */ +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6 +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7) /* BitField capabilitiesFunction RSS capabilities if set update the rss keys */ +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7 + uint8_t rss_result_mask /* The mask for the lower byte of RSS result - defines which section of the indirection table will be used. To enable all table put here 0x7F */; + uint8_t rss_mode /* The RSS mode for this function */; + uint16_t udp_4tuple_dst_port_mask /* If UDP 4-tuple enabled, packets that match the mask and value are 4-tupled, the rest are 2-tupled. (Set to 0 to match all) */; + uint16_t udp_4tuple_dst_port_value /* If UDP 4-tuple enabled, packets that match the mask and value are 4-tupled, the rest are 2-tupled. (Set to 0 to match all) */; + uint8_t indirection_table[T_ETH_INDIRECTION_TABLE_SIZE] /* RSS indirection table */; + uint32_t rss_key[T_ETH_RSS_KEY] /* RSS key supplied as by OS */; + uint32_t echo; + uint32_t reserved3; +}; + + +/* + * The eth Rx Buffer Descriptor + */ +struct eth_rx_bd +{ + uint32_t addr_lo /* Single continuous buffer low pointer */; + uint32_t addr_hi /* Single continuous buffer high pointer */; +}; + + +/* + * Eth Rx Cqe structure- general structure for ramrods $$KEEP_ENDIANNESS$$ + */ +struct common_ramrod_eth_rx_cqe +{ + uint8_t ramrod_type; +#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0) /* BitField ramrod_type (use enum eth_rx_cqe_type) */ +#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0 +#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2) /* BitField ramrod_type */ +#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2 +#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3) /* BitField ramrod_type */ +#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3 + uint8_t conn_type /* only 3 bits are used */; + uint16_t reserved1 /* protocol specific data */; + uint32_t conn_and_cmd_data; +#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0) /* BitField conn_and_cmd_data */ +#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0 +#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24) /* BitField conn_and_cmd_data command id of the ramrod- use RamrodCommandIdEnum */ +#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24 + struct ramrod_data protocol_data /* protocol specific data */; + uint32_t echo; + uint32_t reserved2[11]; +}; + +/* + * Rx Last CQE in page (in ETH) + */ +struct eth_rx_cqe_next_page +{ + uint32_t addr_lo /* Next page low pointer */; + uint32_t addr_hi /* Next page high pointer */; + uint32_t reserved[14]; +}; + +/* + * union for all eth rx cqe types (fix their sizes) + */ +union eth_rx_cqe +{ + struct eth_fast_path_rx_cqe fast_path_cqe; + struct common_ramrod_eth_rx_cqe ramrod_cqe; + struct eth_rx_cqe_next_page next_page_cqe; + struct eth_end_agg_rx_cqe end_agg_cqe; +}; + + +/* + * Values for RX ETH CQE type field + */ +enum eth_rx_cqe_type +{ + RX_ETH_CQE_TYPE_ETH_FASTPATH /* Fast path CQE */, + RX_ETH_CQE_TYPE_ETH_RAMROD /* Slow path CQE */, + RX_ETH_CQE_TYPE_ETH_START_AGG /* Fast path CQE */, + RX_ETH_CQE_TYPE_ETH_STOP_AGG /* Slow path CQE */, + MAX_ETH_RX_CQE_TYPE}; + + +/* + * Type of SGL/Raw field in ETH RX fast path CQE + */ +enum eth_rx_fp_sel +{ + ETH_FP_CQE_REGULAR /* Regular CQE- no extra data */, + ETH_FP_CQE_RAW /* Extra data is raw data- iscsi OOO */, + MAX_ETH_RX_FP_SEL}; + + +/* + * The eth Rx SGE Descriptor + */ +struct eth_rx_sge +{ + uint32_t addr_lo /* Single continuous buffer low pointer */; + uint32_t addr_hi /* Single continuous buffer high pointer */; +}; + + +/* + * common data for all protocols $$KEEP_ENDIANNESS$$ + */ +struct spe_hdr +{ + uint32_t conn_and_cmd_data; +#define SPE_HDR_CID (0xFFFFFF<<0) /* BitField conn_and_cmd_data */ +#define SPE_HDR_CID_SHIFT 0 +#define SPE_HDR_CMD_ID (0xFF<<24) /* BitField conn_and_cmd_data command id of the ramrod- use enum common_spqe_cmd_id/eth_spqe_cmd_id/toe_spqe_cmd_id */ +#define SPE_HDR_CMD_ID_SHIFT 24 + uint16_t type; +#define SPE_HDR_CONN_TYPE (0xFF<<0) /* BitField type connection type. (3 bits are used) (use enum connection_type) */ +#define SPE_HDR_CONN_TYPE_SHIFT 0 +#define SPE_HDR_FUNCTION_ID (0xFF<<8) /* BitField type */ +#define SPE_HDR_FUNCTION_ID_SHIFT 8 + uint16_t reserved1; +}; + +/* + * specific data for ethernet slow path element + */ +union eth_specific_data +{ + uint8_t protocol_data[8] /* to fix this structure size to 8 bytes */; + struct regpair client_update_ramrod_data /* The address of the data for client update ramrod */; + struct regpair client_init_ramrod_init_data /* The data for client setup ramrod */; + struct eth_halt_ramrod_data halt_ramrod_data /* Includes the client id to be deleted */; + struct regpair update_data_addr /* physical address of the eth_rss_update_ramrod_data struct, as allocated by the driver */; + struct eth_common_ramrod_data common_ramrod_data /* The data contain client ID need to the ramrod */; + struct regpair classify_cfg_addr /* physical address of the eth_classify_rules_ramrod_data struct, as allocated by the driver */; + struct regpair filter_cfg_addr /* physical address of the eth_filter_cfg_ramrod_data struct, as allocated by the driver */; + struct regpair mcast_cfg_addr /* physical address of the eth_mcast_cfg_ramrod_data struct, as allocated by the driver */; +}; + +/* + * Ethernet slow path element + */ +struct eth_spe +{ + struct spe_hdr hdr /* common data for all protocols */; + union eth_specific_data data /* data specific to ethernet protocol */; +}; + + +/* + * Ethernet command ID for slow path elements + */ +enum eth_spqe_cmd_id +{ + RAMROD_CMD_ID_ETH_UNUSED, + RAMROD_CMD_ID_ETH_CLIENT_SETUP /* Setup a new L2 client */, + RAMROD_CMD_ID_ETH_HALT /* Halt an L2 client */, + RAMROD_CMD_ID_ETH_FORWARD_SETUP /* Setup a new FW channel */, + RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP /* Setup a new Tx only queue */, + RAMROD_CMD_ID_ETH_CLIENT_UPDATE /* Update an L2 client configuration */, + RAMROD_CMD_ID_ETH_EMPTY /* Empty ramrod - used to synchronize iSCSI OOO */, + RAMROD_CMD_ID_ETH_TERMINATE /* Terminate an L2 client */, + RAMROD_CMD_ID_ETH_TPA_UPDATE /* update the tpa roles in L2 client */, + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES /* Add/remove classification filters for L2 client (in E2/E3 only) */, + RAMROD_CMD_ID_ETH_FILTER_RULES /* Add/remove classification filters for L2 client (in E2/E3 only) */, + RAMROD_CMD_ID_ETH_MULTICAST_RULES /* Add/remove multicast classification bin (in E2/E3 only) */, + RAMROD_CMD_ID_ETH_RSS_UPDATE /* Update RSS configuration */, + RAMROD_CMD_ID_ETH_SET_MAC /* Update RSS configuration */, + MAX_ETH_SPQE_CMD_ID}; + + +/* + * eth tpa update command + */ +enum eth_tpa_update_command +{ + TPA_UPDATE_NONE_COMMAND /* nop command */, + TPA_UPDATE_ENABLE_COMMAND /* enable command */, + TPA_UPDATE_DISABLE_COMMAND /* disable command */, + MAX_ETH_TPA_UPDATE_COMMAND}; + + +/* + * In case of LSO over IPv4 tunnel, whether to increment IP ID on external IP header or internal IP header + */ +enum eth_tunnel_lso_inc_ip_id +{ + EXT_HEADER /* Increment IP ID of external header (HW works on external, FW works on internal */, + INT_HEADER /* Increment IP ID of internal header (HW works on internal, FW works on external */, + MAX_ETH_TUNNEL_LSO_INC_IP_ID}; + + +/* + * In case tunnel exist and L4 checksum offload (or outer ip header checksum), the pseudo checksum location, on packet or on BD. + */ +enum eth_tunnel_non_lso_csum_location +{ + CSUM_ON_PKT /* checksum is on the packet. */, + CSUM_ON_BD /* checksum is on the BD. */, + MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION}; + + +/* + * Tx regular BD structure $$KEEP_ENDIANNESS$$ + */ +struct eth_tx_bd +{ + uint32_t addr_lo /* Single continuous buffer low pointer */; + uint32_t addr_hi /* Single continuous buffer high pointer */; + uint16_t total_pkt_bytes /* Size of the entire packet, valid for non-LSO packets */; + uint16_t nbytes /* Size of the data represented by the BD */; + uint8_t reserved[4] /* keeps same size as other eth tx bd types */; +}; + + +/* + * structure for easy accessibility to assembler + */ +struct eth_tx_bd_flags +{ + uint8_t as_bitfield; +#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0) /* BitField as_bitfield IP CKSUM flag,Relevant in START */ +#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0 +#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1) /* BitField as_bitfield L4 CKSUM flag,Relevant in START */ +#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1 +#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2) /* BitField as_bitfield 00 - no vlan; 01 - inband Vlan; 10 outband Vlan (use enum eth_tx_vlan_type) */ +#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2 +#define ETH_TX_BD_FLAGS_START_BD (0x1<<4) /* BitField as_bitfield Start of packet BD */ +#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4 +#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5) /* BitField as_bitfield flag that indicates that the current packet is a udp packet */ +#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5 +#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) /* BitField as_bitfield LSO flag, Relevant in START */ +#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6 +#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) /* BitField as_bitfield set in case ipV6 packet, Relevant in START */ +#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7 +}; + +/* + * The eth Tx Buffer Descriptor $$KEEP_ENDIANNESS$$ + */ +struct eth_tx_start_bd +{ + uint64_t addr; + uint16_t nbd /* Num of BDs in packet: include parsInfoBD, Relevant in START(only in Everest) */; + uint16_t nbytes /* Size of the data represented by the BD */; + uint16_t vlan_or_ethertype /* Vlan structure: vlan_id is in lsb, then cfi and then priority vlan_id 12 bits (lsb), cfi 1 bit, priority 3 bits. In E2, this field should be set with etherType for VFs with no vlan */; + struct eth_tx_bd_flags bd_flags; + uint8_t general_data; +#define ETH_TX_START_BD_HDR_NBDS (0xF<<0) /* BitField general_data contains the number of BDs that contain Ethernet/IP/TCP headers, for full/partial LSO modes */ +#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 +#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) /* BitField general_data force vlan mode according to bds (vlan mode can change accroding to global configuration) */ +#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 +#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5) /* BitField general_data Determines the number of parsing BDs in packet. Number of parsing BDs in packet is (parse_nbds+1). */ +#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5 +#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7) /* BitField general_data set in case of tunneling encapsulated packet */ +#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7 +}; + +/* + * Tx parsing BD structure for ETH E1h $$KEEP_ENDIANNESS$$ + */ +struct eth_tx_parse_bd_e1x +{ + uint16_t global_data; +#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) /* BitField global_data IP header Offset in WORDs from start of packet */ +#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE (0x3<<4) /* BitField global_data marks ethernet address type (use enum eth_addr_type) */ +#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT 4 +#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<6) /* BitField global_data */ +#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 6 +#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<7) /* BitField global_data */ +#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 7 +#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<8) /* BitField global_data an optional addition to ECN that protects against accidental or malicious concealment of marked packets from the TCP sender. */ +#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 8 +#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x7F<<9) /* BitField global_data reserved bit, should be set with 0 */ +#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 9 + uint8_t tcp_flags; +#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) /* BitField tcp_flagsState flags End of data flag */ +#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0 +#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1) /* BitField tcp_flagsState flags Synchronize sequence numbers flag */ +#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1 +#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2) /* BitField tcp_flagsState flags Reset connection flag */ +#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2 +#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3) /* BitField tcp_flagsState flags Push flag */ +#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3 +#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4) /* BitField tcp_flagsState flags Acknowledgment number valid flag */ +#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4 +#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5) /* BitField tcp_flagsState flags Urgent pointer valid flag */ +#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5 +#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6) /* BitField tcp_flagsState flags ECN-Echo */ +#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6 +#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) /* BitField tcp_flagsState flags Congestion Window Reduced */ +#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7 + uint8_t ip_hlen_w /* IP header length in WORDs */; + uint16_t total_hlen_w /* IP+TCP+ETH */; + uint16_t tcp_pseudo_csum /* Checksum of pseudo header with length field=0 */; + uint16_t lso_mss /* for LSO mode */; + uint16_t ip_id /* for LSO mode */; + uint32_t tcp_send_seq /* for LSO mode */; +}; + +/* + * Tx parsing BD structure for ETH E2 $$KEEP_ENDIANNESS$$ + */ +struct eth_tx_parse_bd_e2 +{ + union eth_mac_addr_or_tunnel_data data /* union for mac addresses and for tunneling data. considered as tunneling data only if (tunnel_exist == 1). */; + uint32_t parsing_data; +#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0) /* BitField parsing_data TCP/UDP header Offset in WORDs from start of packet */ +#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11) /* BitField parsing_data TCP header size in DOUBLE WORDS */ +#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11 +#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15) /* BitField parsing_data a flag to indicate an ipv6 packet with extension headers. If set on LSO packet, pseudo CS should be placed in TCP CS field without length field */ +#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 15 +#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<16) /* BitField parsing_data for LSO mode */ +#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 16 +#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE (0x3<<30) /* BitField parsing_data marks ethernet address type (use enum eth_addr_type) */ +#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT 30 +}; + +/* + * Tx 2nd parsing BD structure for ETH packet $$KEEP_ENDIANNESS$$ + */ +struct eth_tx_parse_2nd_bd +{ + uint16_t global_data; +#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0) /* BitField global_data Outer IP header offset in WORDs (16-bit) from start of packet */ +#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x1<<4) /* BitField global_data should be set with 0 */ +#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 4 +#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5) /* BitField global_data */ +#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5 +#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6) /* BitField global_data an optional addition to ECN that protects against accidental or malicious concealment of marked packets from the TCP sender. */ +#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6 +#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7) /* BitField global_data Set in case UDP header exists in tunnel outer hedears. */ +#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7 +#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8) /* BitField global_data Outer IP header length in WORDs (16-bit). Valid only for IpV4. */ +#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8 +#define ETH_TX_PARSE_2ND_BD_RESERVED1 (0x7<<13) /* BitField global_data should be set with 0 */ +#define ETH_TX_PARSE_2ND_BD_RESERVED1_SHIFT 13 + uint16_t reserved2; + uint8_t tcp_flags; +#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0) /* BitField tcp_flagsState flags End of data flag */ +#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1) /* BitField tcp_flagsState flags Synchronize sequence numbers flag */ +#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1 +#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2) /* BitField tcp_flagsState flags Reset connection flag */ +#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2 +#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3) /* BitField tcp_flagsState flags Push flag */ +#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3 +#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4) /* BitField tcp_flagsState flags Acknowledgment number valid flag */ +#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4 +#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5) /* BitField tcp_flagsState flags Urgent pointer valid flag */ +#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5 +#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6) /* BitField tcp_flagsState flags ECN-Echo */ +#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6 +#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7) /* BitField tcp_flagsState flags Congestion Window Reduced */ +#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7 + uint8_t reserved3; + uint8_t tunnel_udp_hdr_start_w /* Offset (in WORDs) from start of packet to tunnel UDP header. (if exist) */; + uint8_t fw_ip_hdr_to_payload_w /* In IpV4, the length (in WORDs) from the FW IpV4 header start to the payload start. In IpV6, the length (in WORDs) from the FW IpV6 header end to the payload start. However, if extension headers are included, their length is counted here as well. */; + uint16_t fw_ip_csum_wo_len_flags_frag /* For the IP header which is set by the FW, the IP checksum without length, flags and fragment offset. */; + uint16_t hw_ip_id /* The IP ID to be set by HW for LSO packets in tunnel mode. */; + uint32_t tcp_send_seq /* The TCP sequence number for LSO packets. */; +}; + +/* + * The last BD in the BD memory will hold a pointer to the next BD memory + */ +struct eth_tx_next_bd +{ + uint32_t addr_lo /* Single continuous buffer low pointer */; + uint32_t addr_hi /* Single continuous buffer high pointer */; + uint8_t reserved[8] /* keeps same size as other eth tx bd types */; +}; + +/* + * union for 4 Bd types + */ +union eth_tx_bd_types +{ + struct eth_tx_start_bd start_bd /* the first bd in a packets */; + struct eth_tx_bd reg_bd /* the common bd */; + struct eth_tx_parse_bd_e1x parse_bd_e1x /* parsing info BD for e1/e1h */; + struct eth_tx_parse_bd_e2 parse_bd_e2 /* parsing info BD for e2 */; + struct eth_tx_parse_2nd_bd parse_2nd_bd /* 2nd parsing info BD */; + struct eth_tx_next_bd next_bd /* Bd that contains the address of the next page */; +}; + +/* + * array of 13 bds as appears in the eth xstorm context + */ +struct eth_tx_bds_array +{ + union eth_tx_bd_types bds[13]; +}; + + +/* + * VLAN mode on TX BDs + */ +enum eth_tx_vlan_type +{ + X_ETH_NO_VLAN, + X_ETH_OUTBAND_VLAN, + X_ETH_INBAND_VLAN, + X_ETH_FW_ADDED_VLAN /* Driver should not use this! */, + MAX_ETH_TX_VLAN_TYPE}; + + +/* + * Ethernet VLAN filtering mode in E1x + */ +enum eth_vlan_filter_mode +{ + ETH_VLAN_FILTER_ANY_VLAN /* Dont filter by vlan */, + ETH_VLAN_FILTER_SPECIFIC_VLAN /* Only the vlan_id is allowed */, + ETH_VLAN_FILTER_CLASSIFY /* Vlan will be added to CAM for classification */, + MAX_ETH_VLAN_FILTER_MODE}; + + +/* + * MAC filtering configuration command header $$KEEP_ENDIANNESS$$ + */ +struct mac_configuration_hdr +{ + uint8_t length /* number of entries valid in this command (6 bits) */; + uint8_t offset /* offset of the first entry in the list */; + uint16_t client_id /* the client id which this ramrod is sent on. 5b is used. */; + uint32_t echo /* echo value to be sent to driver on event ring */; +}; + +/* + * MAC address in list for ramrod $$KEEP_ENDIANNESS$$ + */ +struct mac_configuration_entry +{ + uint16_t lsb_mac_addr /* 2 LSB of MAC address (should be given in big endien - driver should do hton to this number!!!) */; + uint16_t middle_mac_addr /* 2 middle bytes of MAC address (should be given in big endien - driver should do hton to this number!!!) */; + uint16_t msb_mac_addr /* 2 MSB of MAC address (should be given in big endien - driver should do hton to this number!!!) */; + uint16_t vlan_id /* The inner vlan id (12b). Used either in vlan_in_cam for mac_valn pair or for vlan filtering */; + uint8_t pf_id /* The pf id, for multi function mode */; + uint8_t flags; +#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0) /* BitField flags configures the action to be done in cam (used only is slow path handlers) (use enum set_mac_action_type) */ +#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0 +#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1) /* BitField flags If set, this MAC also belongs to RDMA client */ +#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1 +#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2) /* BitField flags (use enum eth_vlan_filter_mode) */ +#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2 +#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4) /* BitField flags BitField flags 0 - cant remove vlan 1 - can remove vlan. relevant only to everest1 */ +#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4 +#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5) /* BitField flags BitField flags 0 - not broadcast 1 - broadcast. relevant only to everest1 */ +#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5 +#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6) /* BitField flags */ +#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6 + uint16_t reserved0; + uint32_t clients_bit_vector /* Bit vector for the clients which should receive this MAC. */; +}; + +/* + * MAC filtering configuration command + */ +struct mac_configuration_cmd +{ + struct mac_configuration_hdr hdr /* header */; + struct mac_configuration_entry config_table[64] /* table of 64 MAC configuration entries: addresses and target table entries */; +}; + + +/* + * Set-MAC command type (in E1x) + */ +enum set_mac_action_type +{ + T_ETH_MAC_COMMAND_INVALIDATE, + T_ETH_MAC_COMMAND_SET, + MAX_SET_MAC_ACTION_TYPE}; + + +/* + * Ethernet TPA Modes + */ +enum tpa_mode +{ + TPA_LRO /* LRO mode TPA */, + TPA_GRO /* GRO mode TPA */, + MAX_TPA_MODE}; + + +/* + * tpa update ramrod data $$KEEP_ENDIANNESS$$ + */ +struct tpa_update_ramrod_data +{ + uint8_t update_ipv4 /* none, enable or disable */; + uint8_t update_ipv6 /* none, enable or disable */; + uint8_t client_id /* client init flow control data */; + uint8_t max_tpa_queues /* maximal TPA queues allowed for this client */; + uint8_t max_sges_for_packet /* The maximal number of SGEs that can be used for one packet. depends on MTU and SGE size. must be 0 if SGEs are disabled */; + uint8_t complete_on_both_clients /* If set and the client has different sp_client, completion will be sent to both rings */; + uint8_t dont_verify_rings_pause_thr_flg /* If set, the rings pause thresholds will not be verified by firmware. */; + uint8_t tpa_mode /* TPA mode to use (LRO or GRO) */; + uint16_t sge_buff_size /* Size of the buffers pointed by SGEs */; + uint16_t max_agg_size /* maximal size for the aggregated TPA packets, reprted by the host */; + uint32_t sge_page_base_lo /* The address to fetch the next sges from (low) */; + uint32_t sge_page_base_hi /* The address to fetch the next sges from (high) */; + uint16_t sge_pause_thr_low /* number of remaining sges under which, we send pause message */; + uint16_t sge_pause_thr_high /* number of remaining sges above which, we send un-pause message */; +}; + + +/* + * approximate-match multicast filtering for E1H per function in Tstorm + */ +struct tstorm_eth_approximate_match_multicast_filtering +{ + uint32_t mcast_add_hash_bit_array[8] /* Bit array for multicast hash filtering.Each bit supports a hash function result if to accept this multicast dst address. */; +}; + + +/* + * Common configuration parameters per function in Tstorm $$KEEP_ENDIANNESS$$ + */ +struct tstorm_eth_function_common_config +{ + uint16_t config_flags; +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) /* BitField config_flagsGeneral configuration flags configuration of the port RSS IpV4 2-tupple capability */ +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) /* BitField config_flagsGeneral configuration flags configuration of the port RSS IpV4 4-tupple capability */ +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) /* BitField config_flagsGeneral configuration flags configuration of the port RSS IpV4 2-tupple capability */ +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) /* BitField config_flagsGeneral configuration flags configuration of the port RSS IpV6 4-tupple capability */ +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) /* BitField config_flagsGeneral configuration flags RSS mode of operation (use enum eth_rss_mode) */ +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7) /* BitField config_flagsGeneral configuration flags 0 - Dont filter by vlan, 1 - Filter according to the vlans specificied in mac_filter_config */ +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7 +#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8) /* BitField config_flagsGeneral configuration flags */ +#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8 + uint8_t rss_result_mask /* The mask for the lower byte of RSS result - defines which section of the indirection table will be used. To enable all table put here 0x7F */; + uint8_t reserved1; + uint16_t vlan_id[2] /* VLANs of this function. VLAN filtering is determine according to vlan_filtering_enable. */; +}; + + +/* + * MAC filtering configuration parameters per port in Tstorm $$KEEP_ENDIANNESS$$ + */ +struct tstorm_eth_mac_filter_config +{ + uint32_t ucast_drop_all /* bit vector in which the clients which drop all unicast packets are set */; + uint32_t ucast_accept_all /* bit vector in which clients that accept all unicast packets are set */; + uint32_t mcast_drop_all /* bit vector in which the clients which drop all multicast packets are set */; + uint32_t mcast_accept_all /* bit vector in which clients that accept all multicast packets are set */; + uint32_t bcast_accept_all /* bit vector in which clients that accept all broadcast packets are set */; + uint32_t vlan_filter[2] /* bit vector for VLAN filtering. Clients which enforce filtering of vlan[x] should be marked in vlan_filter[x]. The primary vlan is taken from the CAM target table. */; + uint32_t unmatched_unicast /* bit vector in which clients that accept unmatched unicast packets are set */; +}; + + +/* + * tx only queue init ramrod data $$KEEP_ENDIANNESS$$ + */ +struct tx_queue_init_ramrod_data +{ + struct client_init_general_data general /* client init general data */; + struct client_init_tx_data tx /* client init tx data */; +}; + + +/* + * Three RX producers for ETH + */ +union ustorm_eth_rx_producers +{ + struct { +#if defined(__BIG_ENDIAN) + uint16_t bd_prod /* Producer of the RX BD ring */; + uint16_t cqe_prod /* Producer of the RX CQE ring */; +#elif defined(__LITTLE_ENDIAN) + uint16_t cqe_prod /* Producer of the RX CQE ring */; + uint16_t bd_prod /* Producer of the RX BD ring */; +#endif +#if defined(__BIG_ENDIAN) + uint16_t reserved; + uint16_t sge_prod /* Producer of the RX SGE ring */; +#elif defined(__LITTLE_ENDIAN) + uint16_t sge_prod /* Producer of the RX SGE ring */; + uint16_t reserved; +#endif + } prod; + uint32_t raw_data[2]; +}; + + +/* + * The data afex vif list ramrod need $$KEEP_ENDIANNESS$$ + */ +struct afex_vif_list_ramrod_data +{ + uint8_t afex_vif_list_command /* set get, clear all a VIF list id defined by enum vif_list_rule_kind */; + uint8_t func_bit_map /* the function bit map to set */; + uint16_t vif_list_index /* the VIF list, in a per pf vector to add this function to */; + uint8_t func_to_clear /* the func id to clear in case of clear func mode */; + uint8_t echo; + uint16_t reserved1; +}; + + +/* + * cfc delete event data $$KEEP_ENDIANNESS$$ + */ +struct cfc_del_event_data +{ + uint32_t cid /* cid of deleted connection */; + uint32_t reserved0; + uint32_t reserved1; +}; + + +/* + * per-port SAFC demo variables + */ +struct cmng_flags_per_port +{ + uint32_t cmng_enables; +#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes if set, enable fairness between vnics */ +#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0 +#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes if set, enable rate shaping between vnics */ +#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1 +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes if set, enable fairness between COSes */ +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2 +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes (use enum fairness_mode) */ +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3 +#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4) /* BitField cmng_enablesenables flag for fairness and rate shaping between protocols, vnics and COSes reserved */ +#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4 + uint32_t __reserved1; +}; + + +/* + * per-port rate shaping variables + */ +struct rate_shaping_vars_per_port +{ + uint32_t rs_periodic_timeout /* timeout of periodic timer */; + uint32_t rs_threshold /* threshold, below which we start to stop queues */; +}; + +/* + * per-port fairness variables + */ +struct fairness_vars_per_port +{ + uint32_t upper_bound /* Quota for a protocol/vnic */; + uint32_t fair_threshold /* almost-empty threshold */; + uint32_t fairness_timeout /* timeout of fairness timer */; + uint32_t reserved0; +}; + +/* + * per-port SAFC variables + */ +struct safc_struct_per_port +{ +#if defined(__BIG_ENDIAN) + uint16_t __reserved1; + uint8_t __reserved0; + uint8_t safc_timeout_usec /* timeout to stop queues on SAFC pause command */; +#elif defined(__LITTLE_ENDIAN) + uint8_t safc_timeout_usec /* timeout to stop queues on SAFC pause command */; + uint8_t __reserved0; + uint16_t __reserved1; +#endif + uint8_t cos_to_traffic_types[MAX_COS_NUMBER] /* translate cos to service traffics types */; + uint16_t cos_to_pause_mask[NUM_OF_SAFC_BITS] /* QM pause mask for each class of service in the SAFC frame */; +}; + +/* + * Per-port congestion management variables + */ +struct cmng_struct_per_port +{ + struct rate_shaping_vars_per_port rs_vars; + struct fairness_vars_per_port fair_vars; + struct safc_struct_per_port safc_vars; + struct cmng_flags_per_port flags; +}; + +/* + * a single rate shaping counter. can be used as protocol or vnic counter + */ +struct rate_shaping_counter +{ + uint32_t quota /* Quota for a protocol/vnic */; +#if defined(__BIG_ENDIAN) + uint16_t __reserved0; + uint16_t rate /* Vnic/Protocol rate in units of Mega-bits/sec */; +#elif defined(__LITTLE_ENDIAN) + uint16_t rate /* Vnic/Protocol rate in units of Mega-bits/sec */; + uint16_t __reserved0; +#endif +}; + +/* + * per-vnic rate shaping variables + */ +struct rate_shaping_vars_per_vn +{ + struct rate_shaping_counter vn_counter /* per-vnic counter */; +}; + +/* + * per-vnic fairness variables + */ +struct fairness_vars_per_vn +{ + uint32_t cos_credit_delta[MAX_COS_NUMBER] /* used for incrementing the credit */; + uint32_t vn_credit_delta /* used for incrementing the credit */; + uint32_t __reserved0; +}; + +/* + * cmng port init state + */ +struct cmng_vnic +{ + struct rate_shaping_vars_per_vn vnic_max_rate[4]; + struct fairness_vars_per_vn vnic_min_rate[4]; +}; + +/* + * cmng port init state + */ +struct cmng_init +{ + struct cmng_struct_per_port port; + struct cmng_vnic vnic; +}; + + +/* + * driver parameters for congestion management init, all rates are in Mbps + */ +struct cmng_init_input +{ + uint32_t port_rate; + uint16_t vnic_min_rate[4] /* rates are in Mbps */; + uint16_t vnic_max_rate[4] /* rates are in Mbps */; + uint16_t cos_min_rate[MAX_COS_NUMBER] /* rates are in Mbps */; + uint16_t cos_to_pause_mask[MAX_COS_NUMBER]; + struct cmng_flags_per_port flags; +}; + + +/* + * Protocol-common command ID for slow path elements + */ +enum common_spqe_cmd_id +{ + RAMROD_CMD_ID_COMMON_UNUSED, + RAMROD_CMD_ID_COMMON_FUNCTION_START /* Start a function (for PFs only) */, + RAMROD_CMD_ID_COMMON_FUNCTION_STOP /* Stop a function (for PFs only) */, + RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE /* niv update function */, + RAMROD_CMD_ID_COMMON_CFC_DEL /* Delete a connection from CFC */, + RAMROD_CMD_ID_COMMON_CFC_DEL_WB /* Delete a connection from CFC (with write back) */, + RAMROD_CMD_ID_COMMON_STAT_QUERY /* Collect statistics counters */, + RAMROD_CMD_ID_COMMON_STOP_TRAFFIC /* Stop Tx traffic (before DCB updates) */, + RAMROD_CMD_ID_COMMON_START_TRAFFIC /* Start Tx traffic (after DCB updates) */, + RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS /* niv vif lists */, + RAMROD_CMD_ID_COMMON_SET_TIMESYNC /* Set Timesync Parameters (E3 Only) */, + MAX_COMMON_SPQE_CMD_ID}; + + +/* + * Per-protocol connection types + */ +enum connection_type +{ + ETH_CONNECTION_TYPE /* Ethernet */, + TOE_CONNECTION_TYPE /* TOE */, + RDMA_CONNECTION_TYPE /* RDMA */, + ISCSI_CONNECTION_TYPE /* iSCSI */, + FCOE_CONNECTION_TYPE /* FCoE */, + RESERVED_CONNECTION_TYPE_0, + RESERVED_CONNECTION_TYPE_1, + RESERVED_CONNECTION_TYPE_2, + NONE_CONNECTION_TYPE /* General- used for common slow path */, + MAX_CONNECTION_TYPE}; + + +/* + * Cos modes + */ +enum cos_mode +{ + OVERRIDE_COS /* Firmware deduce cos according to DCB */, + STATIC_COS /* Firmware has constant queues per CoS */, + FW_WRR /* Firmware keep fairness between different CoSes */, + MAX_COS_MODE}; + + +/* + * Dynamic HC counters set by the driver + */ +struct hc_dynamic_drv_counter +{ + uint32_t val[HC_SB_MAX_DYNAMIC_INDICES] /* 4 bytes * 4 indices = 2 lines */; +}; + +/* + * zone A per-queue data + */ +struct cstorm_queue_zone_data +{ + struct hc_dynamic_drv_counter hc_dyn_drv_cnt /* 4 bytes * 4 indices = 2 lines */; + struct regpair reserved[2]; +}; + + +/* + * Vf-PF channel data in cstorm ram (non-triggered zone) + */ +struct vf_pf_channel_zone_data +{ + uint32_t msg_addr_lo /* the message address on VF memory */; + uint32_t msg_addr_hi /* the message address on VF memory */; +}; + +/* + * zone for VF non-triggered data + */ +struct non_trigger_vf_zone +{ + struct vf_pf_channel_zone_data vf_pf_channel /* vf-pf channel zone data */; +}; + +/* + * Vf-PF channel trigger zone in cstorm ram + */ +struct vf_pf_channel_zone_trigger +{ + uint8_t addr_valid /* indicates that a vf-pf message is pending. MUST be set AFTER the message address. */; +}; + +/* + * zone that triggers the in-bound interrupt + */ +struct trigger_vf_zone +{ +#if defined(__BIG_ENDIAN) + uint16_t reserved1; + uint8_t reserved0; + struct vf_pf_channel_zone_trigger vf_pf_channel; +#elif defined(__LITTLE_ENDIAN) + struct vf_pf_channel_zone_trigger vf_pf_channel; + uint8_t reserved0; + uint16_t reserved1; +#endif + uint32_t reserved2; +}; + +/* + * zone B per-VF data + */ +struct cstorm_vf_zone_data +{ + struct non_trigger_vf_zone non_trigger /* zone for VF non-triggered data */; + struct trigger_vf_zone trigger /* zone that triggers the in-bound interrupt */; +}; + + +/* + * Dynamic host coalescing init parameters, per state machine + */ +struct dynamic_hc_sm_config +{ + uint32_t threshold[3] /* thresholds of number of outstanding bytes */; + uint8_t shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES] /* bytes difference of each protocol is shifted right by this value */; + uint8_t hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES] /* timeout for level 0 for each protocol, in units of usec */; + uint8_t hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES] /* timeout for level 1 for each protocol, in units of usec */; + uint8_t hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES] /* timeout for level 2 for each protocol, in units of usec */; + uint8_t hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES] /* timeout for level 3 for each protocol, in units of usec */; +}; + +/* + * Dynamic host coalescing init parameters + */ +struct dynamic_hc_config +{ + struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM] /* Configuration per state machine */; +}; + + +struct e2_integ_data +{ +#if defined(__BIG_ENDIAN) + uint8_t flags; +#define E2_INTEG_DATA_TESTING_EN (0x1<<0) /* BitField flags integration testing enabled */ +#define E2_INTEG_DATA_TESTING_EN_SHIFT 0 +#define E2_INTEG_DATA_LB_TX (0x1<<1) /* BitField flags flag indicating this connection will transmit on loopback */ +#define E2_INTEG_DATA_LB_TX_SHIFT 1 +#define E2_INTEG_DATA_COS_TX (0x1<<2) /* BitField flags flag indicating this connection will transmit according to cos field */ +#define E2_INTEG_DATA_COS_TX_SHIFT 2 +#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) /* BitField flags flag indicating this connection will activate the opportunistic QM credit flow */ +#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 +#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) /* BitField flags flag indicating this connection will release the door bell queue (DQ) */ +#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 +#define E2_INTEG_DATA_RESERVED (0x7<<5) /* BitField flags */ +#define E2_INTEG_DATA_RESERVED_SHIFT 5 + uint8_t cos /* cos of the connection (relevant only in cos transmitting connections, when cosTx is set */; + uint8_t voq /* voq to return credit on. Normally equal to port (i.e. always 0 in E2 operational connections). in cos tests equal to cos. in loopback tests equal to LB_PORT (=4) */; + uint8_t pbf_queue /* pbf queue to transmit on. Normally equal to port (i.e. always 0 in E2 operational connections). in cos tests equal to cos. in loopback tests equal to LB_PORT (=4) */; +#elif defined(__LITTLE_ENDIAN) + uint8_t pbf_queue /* pbf queue to transmit on. Normally equal to port (i.e. always 0 in E2 operational connections). in cos tests equal to cos. in loopback tests equal to LB_PORT (=4) */; + uint8_t voq /* voq to return credit on. Normally equal to port (i.e. always 0 in E2 operational connections). in cos tests equal to cos. in loopback tests equal to LB_PORT (=4) */; + uint8_t cos /* cos of the connection (relevant only in cos transmitting connections, when cosTx is set */; + uint8_t flags; +#define E2_INTEG_DATA_TESTING_EN (0x1<<0) /* BitField flags integration testing enabled */ +#define E2_INTEG_DATA_TESTING_EN_SHIFT 0 +#define E2_INTEG_DATA_LB_TX (0x1<<1) /* BitField flags flag indicating this connection will transmit on loopback */ +#define E2_INTEG_DATA_LB_TX_SHIFT 1 +#define E2_INTEG_DATA_COS_TX (0x1<<2) /* BitField flags flag indicating this connection will transmit according to cos field */ +#define E2_INTEG_DATA_COS_TX_SHIFT 2 +#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) /* BitField flags flag indicating this connection will activate the opportunistic QM credit flow */ +#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 +#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) /* BitField flags flag indicating this connection will release the door bell queue (DQ) */ +#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 +#define E2_INTEG_DATA_RESERVED (0x7<<5) /* BitField flags */ +#define E2_INTEG_DATA_RESERVED_SHIFT 5 +#endif +#if defined(__BIG_ENDIAN) + uint16_t reserved3; + uint8_t reserved2; + uint8_t ramEn /* context area reserved for reading enable bit from ram */; +#elif defined(__LITTLE_ENDIAN) + uint8_t ramEn /* context area reserved for reading enable bit from ram */; + uint8_t reserved2; + uint16_t reserved3; +#endif +}; + + +/* + * set mac event data $$KEEP_ENDIANNESS$$ + */ +struct eth_event_data +{ + uint32_t echo /* set mac echo data to return to driver */; + uint32_t reserved0; + uint32_t reserved1; +}; + + +/* + * pf-vf event data $$KEEP_ENDIANNESS$$ + */ +struct vf_pf_event_data +{ + uint8_t vf_id /* VF ID (0-63) */; + uint8_t reserved0; + uint16_t reserved1; + uint32_t msg_addr_lo /* message address on Vf (low 32 bits) */; + uint32_t msg_addr_hi /* message address on Vf (high 32 bits) */; +}; + +/* + * VF FLR event data $$KEEP_ENDIANNESS$$ + */ +struct vf_flr_event_data +{ + uint8_t vf_id /* VF ID (0-63) */; + uint8_t reserved0; + uint16_t reserved1; + uint32_t reserved2; + uint32_t reserved3; +}; + +/* + * malicious VF event data $$KEEP_ENDIANNESS$$ + */ +struct malicious_vf_event_data +{ + uint8_t vf_id /* VF ID (0-63) */; + uint8_t err_id /* reason for malicious notification */; + uint16_t reserved1; + uint32_t reserved2; + uint32_t reserved3; +}; + +/* + * vif list event data $$KEEP_ENDIANNESS$$ + */ +struct vif_list_event_data +{ + uint8_t func_bit_map /* bit map of pf indice */; + uint8_t echo; + uint16_t reserved0; + uint32_t reserved1; + uint32_t reserved2; +}; + +/* + * function update event data $$KEEP_ENDIANNESS$$ + */ +struct function_update_event_data +{ + uint8_t echo; + uint8_t reserved; + uint16_t reserved0; + uint32_t reserved1; + uint32_t reserved2; +}; + +/* + * union for all event ring message types + */ +union event_data +{ + struct vf_pf_event_data vf_pf_event /* vf-pf event data */; + struct eth_event_data eth_event /* set mac event data */; + struct cfc_del_event_data cfc_del_event /* cfc delete event data */; + struct vf_flr_event_data vf_flr_event /* vf flr event data */; + struct malicious_vf_event_data malicious_vf_event /* malicious vf event data */; + struct vif_list_event_data vif_list_event /* vif list event data */; + struct function_update_event_data function_update_event /* function update event data */; +}; + + +/* + * per PF event ring data + */ +struct event_ring_data +{ + struct regpair_native base_addr /* ring base address */; +#if defined(__BIG_ENDIAN) + uint8_t index_id /* index ID within the status block */; + uint8_t sb_id /* status block ID */; + uint16_t producer /* event ring producer */; +#elif defined(__LITTLE_ENDIAN) + uint16_t producer /* event ring producer */; + uint8_t sb_id /* status block ID */; + uint8_t index_id /* index ID within the status block */; +#endif + uint32_t reserved0; +}; + + +/* + * event ring message element (each element is 128 bits) $$KEEP_ENDIANNESS$$ + */ +struct event_ring_msg +{ + uint8_t opcode; + uint8_t error /* error on the mesasage */; + uint16_t reserved1; + union event_data data /* message data (96 bits data) */; +}; + +/* + * event ring next page element (128 bits) + */ +struct event_ring_next +{ + struct regpair addr /* Address of the next page of the ring */; + uint32_t reserved[2]; +}; + +/* + * union for event ring element types (each element is 128 bits) + */ +union event_ring_elem +{ + struct event_ring_msg message /* event ring message */; + struct event_ring_next next_page /* event ring next page */; +}; + + +/* + * Common event ring opcodes + */ +enum event_ring_opcode +{ + EVENT_RING_OPCODE_VF_PF_CHANNEL, + EVENT_RING_OPCODE_FUNCTION_START /* Start a function (for PFs only) */, + EVENT_RING_OPCODE_FUNCTION_STOP /* Stop a function (for PFs only) */, + EVENT_RING_OPCODE_CFC_DEL /* Delete a connection from CFC */, + EVENT_RING_OPCODE_CFC_DEL_WB /* Delete a connection from CFC (with write back) */, + EVENT_RING_OPCODE_STAT_QUERY /* Collect statistics counters */, + EVENT_RING_OPCODE_STOP_TRAFFIC /* Stop Tx traffic (before DCB updates) */, + EVENT_RING_OPCODE_START_TRAFFIC /* Start Tx traffic (after DCB updates) */, + EVENT_RING_OPCODE_VF_FLR /* VF FLR indication for PF */, + EVENT_RING_OPCODE_MALICIOUS_VF /* Malicious VF operation detected */, + EVENT_RING_OPCODE_FORWARD_SETUP /* Initialize forward channel */, + EVENT_RING_OPCODE_RSS_UPDATE_RULES /* Update RSS configuration */, + EVENT_RING_OPCODE_FUNCTION_UPDATE /* function update */, + EVENT_RING_OPCODE_AFEX_VIF_LISTS /* event ring opcode niv vif lists */, + EVENT_RING_OPCODE_SET_MAC /* Add/remove MAC (in E1x only) */, + EVENT_RING_OPCODE_CLASSIFICATION_RULES /* Add/remove MAC or VLAN (in E2/E3 only) */, + EVENT_RING_OPCODE_FILTERS_RULES /* Add/remove classification filters for L2 client (in E2/E3 only) */, + EVENT_RING_OPCODE_MULTICAST_RULES /* Add/remove multicast classification bin (in E2/E3 only) */, + EVENT_RING_OPCODE_SET_TIMESYNC /* Set Timesync Parameters (E3 Only) */, + MAX_EVENT_RING_OPCODE}; + + +/* + * Modes for fairness algorithm + */ +enum fairness_mode +{ + FAIRNESS_COS_WRR_MODE /* Weighted round robin mode (used in Google) */, + FAIRNESS_COS_ETS_MODE /* ETS mode (used in FCoE) */, + MAX_FAIRNESS_MODE}; + + +/* + * Priority and cos $$KEEP_ENDIANNESS$$ + */ +struct priority_cos +{ + uint8_t priority /* Priority */; + uint8_t cos /* Cos */; + uint16_t reserved1; +}; + +/* + * The data for flow control configuration $$KEEP_ENDIANNESS$$ + */ +struct flow_control_configuration +{ + struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES] /* traffic_type to priority cos */; + uint8_t dcb_enabled /* If DCB mode is enabled then traffic class to priority array is fully initialized and there must be inner VLAN */; + uint8_t dcb_version /* DCB version Increase by one on each DCB update */; + uint8_t dont_add_pri_0 /* In case, the priority is 0, and the packet has no vlan, the firmware wont add vlan */; + uint8_t reserved1; + uint32_t reserved2; +}; + + +/* + * $$KEEP_ENDIANNESS$$ + */ +struct function_start_data +{ + uint8_t function_mode /* the function mode */; + uint8_t allow_npar_tx_switching /* If set, inter-pf tx switching is allowed in Switch Independant function mode. (E2/E3 Only) */; + uint16_t sd_vlan_tag /* value of Vlan in case of switch depended multi-function mode */; + uint16_t vif_id /* value of VIF id in case of NIV multi-function mode */; + uint8_t path_id; + uint8_t network_cos_mode /* The cos mode for network traffic. */; + uint8_t dmae_cmd_id /* The DMAE command id to use for FW DMAE transactions */; + uint8_t gre_tunnel_mode /* GRE Tunnel Mode to enable on the Function (E2/E3 Only) */; + uint8_t gre_tunnel_rss /* Type of RSS to perform on GRE Tunneled packets */; + uint8_t nvgre_clss_en /* If set, NVGRE tunneled packets are classified according to their inner MAC (gre_mode must be NVGRE_TUNNEL) */; + uint16_t reserved1[2]; +}; + + +/* + * $$KEEP_ENDIANNESS$$ + */ +struct function_update_data +{ + uint8_t vif_id_change_flg /* If set, vif_id will be checked */; + uint8_t afex_default_vlan_change_flg /* If set, afex_default_vlan will be checked */; + uint8_t allowed_priorities_change_flg /* If set, allowed_priorities will be checked */; + uint8_t network_cos_mode_change_flg /* If set, network_cos_mode will be checked */; + uint16_t vif_id /* value of VIF id in case of NIV multi-function mode */; + uint16_t afex_default_vlan /* value of default Vlan in case of NIV mf */; + uint8_t allowed_priorities /* bit vector of allowed Vlan priorities for this VIF */; + uint8_t network_cos_mode /* The cos mode for network traffic. */; + uint8_t lb_mode_en_change_flg /* If set, lb_mode_en will be checked */; + uint8_t lb_mode_en /* If set, niv loopback mode will be enabled */; + uint8_t tx_switch_suspend_change_flg /* If set, tx_switch_suspend will be checked */; + uint8_t tx_switch_suspend /* If set, TX switching TO this function will be disabled and packets will be dropped */; + uint8_t echo; + uint8_t reserved1; + uint8_t update_gre_cfg_flg /* If set, GRE config for the function will be updated according to the gre_tunnel_rss and nvgre_clss_en fields */; + uint8_t gre_tunnel_mode /* GRE Tunnel Mode to enable on the Function (E2/E3 Only) */; + uint8_t gre_tunnel_rss /* Type of RSS to perform on GRE Tunneled packets */; + uint8_t nvgre_clss_en /* If set, NVGRE tunneled packets are classified according to their inner MAC (gre_mode must be NVGRE_TUNNEL) */; + uint32_t reserved3; +}; + + +/* + * FW version stored in the Xstorm RAM + */ +struct fw_version +{ +#if defined(__BIG_ENDIAN) + uint8_t engineering /* firmware current engineering version */; + uint8_t revision /* firmware current revision version */; + uint8_t minor /* firmware current minor version */; + uint8_t major /* firmware current major version */; +#elif defined(__LITTLE_ENDIAN) + uint8_t major /* firmware current major version */; + uint8_t minor /* firmware current minor version */; + uint8_t revision /* firmware current revision version */; + uint8_t engineering /* firmware current engineering version */; +#endif + uint32_t flags; +#define FW_VERSION_OPTIMIZED (0x1<<0) /* BitField flags if set, this is optimized ASM */ +#define FW_VERSION_OPTIMIZED_SHIFT 0 +#define FW_VERSION_BIG_ENDIEN (0x1<<1) /* BitField flags if set, this is big-endien ASM */ +#define FW_VERSION_BIG_ENDIEN_SHIFT 1 +#define FW_VERSION_CHIP_VERSION (0x3<<2) /* BitField flags 1 - E1H */ +#define FW_VERSION_CHIP_VERSION_SHIFT 2 +#define __FW_VERSION_RESERVED (0xFFFFFFF<<4) /* BitField flags */ +#define __FW_VERSION_RESERVED_SHIFT 4 +}; + + +/* + * GRE RSS Mode + */ +enum gre_rss_mode +{ + GRE_OUTER_HEADERS_RSS /* RSS for GRE Packets is performed on the outer headers */, + GRE_INNER_HEADERS_RSS /* RSS for GRE Packets is performed on the inner headers */, + NVGRE_KEY_ENTROPY_RSS /* RSS for NVGRE Packets is done based on a hash containing the entropy bits from the GRE Key Field (gre_tunnel must be NVGRE_TUNNEL) */, + MAX_GRE_RSS_MODE}; + + +/* + * GRE Tunnel Mode + */ +enum gre_tunnel_type +{ + NO_GRE_TUNNEL, + NVGRE_TUNNEL /* NV-GRE Tunneling Microsoft L2 over GRE. GRE header contains mandatory Key Field. */, + L2GRE_TUNNEL /* L2-GRE Tunneling General L2 over GRE. GRE can contain Key field with Tenant ID and Sequence Field */, + IPGRE_TUNNEL /* IP-GRE Tunneling IP over GRE. GRE may contain Key field with Tenant ID, Sequence Field and/or Checksum Field */, + MAX_GRE_TUNNEL_TYPE}; + + +/* + * Dynamic Host-Coalescing - Driver(host) counters + */ +struct hc_dynamic_sb_drv_counters +{ + uint32_t dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES] /* Dynamic HC counters written by drivers */; +}; + + +/* + * 2 bytes. configuration/state parameters for a single protocol index + */ +struct hc_index_data +{ +#if defined(__BIG_ENDIAN) + uint8_t flags; +#define HC_INDEX_DATA_SM_ID (0x1<<0) /* BitField flags Index to a state machine. Can be 0 or 1 */ +#define HC_INDEX_DATA_SM_ID_SHIFT 0 +#define HC_INDEX_DATA_HC_ENABLED (0x1<<1) /* BitField flags if set, host coalescing would be done for this index */ +#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1 +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2) /* BitField flags if set, dynamic HC will be done for this index */ +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2 +#define HC_INDEX_DATA_RESERVE (0x1F<<3) /* BitField flags */ +#define HC_INDEX_DATA_RESERVE_SHIFT 3 + uint8_t timeout /* the timeout values for this index. Units are 4 usec */; +#elif defined(__LITTLE_ENDIAN) + uint8_t timeout /* the timeout values for this index. Units are 4 usec */; + uint8_t flags; +#define HC_INDEX_DATA_SM_ID (0x1<<0) /* BitField flags Index to a state machine. Can be 0 or 1 */ +#define HC_INDEX_DATA_SM_ID_SHIFT 0 +#define HC_INDEX_DATA_HC_ENABLED (0x1<<1) /* BitField flags if set, host coalescing would be done for this index */ +#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1 +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2) /* BitField flags if set, dynamic HC will be done for this index */ +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2 +#define HC_INDEX_DATA_RESERVE (0x1F<<3) /* BitField flags */ +#define HC_INDEX_DATA_RESERVE_SHIFT 3 +#endif +}; + + +/* + * HC state-machine + */ +struct hc_status_block_sm +{ +#if defined(__BIG_ENDIAN) + uint8_t igu_seg_id; + uint8_t igu_sb_id /* sb_id within the IGU */; + uint8_t timer_value /* Determines the time_to_expire */; + uint8_t __flags; +#elif defined(__LITTLE_ENDIAN) + uint8_t __flags; + uint8_t timer_value /* Determines the time_to_expire */; + uint8_t igu_sb_id /* sb_id within the IGU */; + uint8_t igu_seg_id; +#endif + uint32_t time_to_expire /* The time in which it expects to wake up */; +}; + +/* + * hold PCI identification variables- used in various places in firmware + */ +struct pci_entity +{ +#if defined(__BIG_ENDIAN) + uint8_t vf_valid /* If set, this is a VF, otherwise it is PF */; + uint8_t vf_id /* VF ID (0-63). Value of 0xFF means VF not valid */; + uint8_t vnic_id /* Virtual NIC ID (0-3) */; + uint8_t pf_id /* PCI physical function number (0-7). The LSB of this field is the port ID */; +#elif defined(__LITTLE_ENDIAN) + uint8_t pf_id /* PCI physical function number (0-7). The LSB of this field is the port ID */; + uint8_t vnic_id /* Virtual NIC ID (0-3) */; + uint8_t vf_id /* VF ID (0-63). Value of 0xFF means VF not valid */; + uint8_t vf_valid /* If set, this is a VF, otherwise it is PF */; +#endif +}; + +/* + * The fast-path status block meta-data, common to all chips + */ +struct hc_sb_data +{ + struct regpair_native host_sb_addr /* Host status block address */; + struct hc_status_block_sm state_machine[HC_SB_MAX_SM] /* Holds the state machines of the status block */; + struct pci_entity p_func /* vnic / port of the status block to be set by the driver */; +#if defined(__BIG_ENDIAN) + uint8_t rsrv0; + uint8_t state; + uint8_t dhc_qzone_id /* used in E2 only, to specify the HW queue zone ID used for this status block dynamic HC counters */; + uint8_t same_igu_sb_1b /* Indicate that both state-machines acts like single sm */; +#elif defined(__LITTLE_ENDIAN) + uint8_t same_igu_sb_1b /* Indicate that both state-machines acts like single sm */; + uint8_t dhc_qzone_id /* used in E2 only, to specify the HW queue zone ID used for this status block dynamic HC counters */; + uint8_t state; + uint8_t rsrv0; +#endif + struct regpair_native rsrv1[2]; +}; + + +/* + * Segment types for host coaslescing + */ +enum hc_segment +{ + HC_REGULAR_SEGMENT, + HC_DEFAULT_SEGMENT, + MAX_HC_SEGMENT}; + + +/* + * The fast-path status block meta-data + */ +struct hc_sp_status_block_data +{ + struct regpair_native host_sb_addr /* Host status block address */; +#if defined(__BIG_ENDIAN) + uint8_t rsrv1; + uint8_t state; + uint8_t igu_seg_id /* segment id of the IGU */; + uint8_t igu_sb_id /* sb_id within the IGU */; +#elif defined(__LITTLE_ENDIAN) + uint8_t igu_sb_id /* sb_id within the IGU */; + uint8_t igu_seg_id /* segment id of the IGU */; + uint8_t state; + uint8_t rsrv1; +#endif + struct pci_entity p_func /* vnic / port of the status block to be set by the driver */; +}; + + +/* + * The fast-path status block meta-data + */ +struct hc_status_block_data_e1x +{ + struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X] /* configuration/state parameters for a single protocol index */; + struct hc_sb_data common /* The fast-path status block meta-data, common to all chips */; +}; + + +/* + * The fast-path status block meta-data + */ +struct hc_status_block_data_e2 +{ + struct hc_index_data index_data[HC_SB_MAX_INDICES_E2] /* configuration/state parameters for a single protocol index */; + struct hc_sb_data common /* The fast-path status block meta-data, common to all chips */; +}; + + +/* + * IGU block operartion modes (in Everest2) + */ +enum igu_mode +{ + HC_IGU_BC_MODE /* Backward compatible mode */, + HC_IGU_NBC_MODE /* Non-backward compatible mode */, + MAX_IGU_MODE}; + + +/* + * IP versions + */ +enum ip_ver +{ + IP_V4, + IP_V6, + MAX_IP_VER}; + + +/* + * Malicious VF error ID + */ +enum malicious_vf_error_id +{ + VF_PF_CHANNEL_NOT_READY /* Writing to VF/PF channel when it is not ready */, + ETH_ILLEGAL_BD_LENGTHS /* TX BD lengths error was detected */, + ETH_PACKET_TOO_SHORT /* TX packet is shorter then reported on BDs */, + ETH_PAYLOAD_TOO_BIG /* TX packet is greater then MTU */, + ETH_ILLEGAL_ETH_TYPE /* TX packet reported without VLAN but eth type is 0x8100 */, + ETH_ILLEGAL_LSO_HDR_LEN /* LSO header length on BDs and on hdr_nbd do not match */, + ETH_TOO_MANY_BDS /* Tx packet has too many BDs */, + ETH_ZERO_HDR_NBDS /* hdr_nbds field is zero */, + ETH_START_BD_NOT_SET /* start_bd should be set on first TX BD in packet */, + ETH_ILLEGAL_PARSE_NBDS /* Tx packet with parse_nbds field which is not legal */, + ETH_IPV6_AND_CHECKSUM /* Tx packet with IP checksum on IPv6 */, + ETH_VLAN_FLG_INCORRECT /* Tx packet with incorrect VLAN flag */, + ETH_ILLEGAL_LSO_MSS /* Tx LSO packet with illegal MSS value */, + ETH_TUNNEL_NOT_SUPPORTED /* Tunneling packets are not supported in current connection */, + MAX_MALICIOUS_VF_ERROR_ID}; + + +/* + * Multi-function modes + */ +enum mf_mode +{ + SINGLE_FUNCTION, + MULTI_FUNCTION_SD /* Switch dependent (vlan based) */, + MULTI_FUNCTION_SI /* Switch independent (mac based) */, + MULTI_FUNCTION_AFEX /* Switch dependent (niv based) */, + MAX_MF_MODE}; + + +/* + * Protocol-common statistics collected by the Tstorm (per pf) $$KEEP_ENDIANNESS$$ + */ +struct tstorm_per_pf_stats +{ + struct regpair rcv_error_bytes /* number of bytes received with errors */; +}; + +/* + * $$KEEP_ENDIANNESS$$ + */ +struct per_pf_stats +{ + struct tstorm_per_pf_stats tstorm_pf_statistics; +}; + + +/* + * Protocol-common statistics collected by the Tstorm (per port) $$KEEP_ENDIANNESS$$ + */ +struct tstorm_per_port_stats +{ + uint32_t mac_discard /* number of packets with mac errors */; + uint32_t mac_filter_discard /* the number of good frames dropped because of no perfect match to MAC/VLAN address */; + uint32_t brb_truncate_discard /* the number of packtes that were dropped because they were truncated in BRB */; + uint32_t mf_tag_discard /* the number of good frames dropped because of no match to the outer vlan/VNtag */; + uint32_t packet_drop /* general packet drop conter- incremented for every packet drop */; + uint32_t reserved; +}; + +/* + * $$KEEP_ENDIANNESS$$ + */ +struct per_port_stats +{ + struct tstorm_per_port_stats tstorm_port_statistics; +}; + + +/* + * Protocol-common statistics collected by the Tstorm (per client) $$KEEP_ENDIANNESS$$ + */ +struct tstorm_per_queue_stats +{ + struct regpair rcv_ucast_bytes /* number of bytes in unicast packets received without errors and pass the filter */; + uint32_t rcv_ucast_pkts /* number of unicast packets received without errors and pass the filter */; + uint32_t checksum_discard /* number of total packets received with checksum error */; + struct regpair rcv_bcast_bytes /* number of bytes in broadcast packets received without errors and pass the filter */; + uint32_t rcv_bcast_pkts /* number of packets in broadcast packets received without errors and pass the filter */; + uint32_t pkts_too_big_discard /* number of too long packets received */; + struct regpair rcv_mcast_bytes /* number of bytes in multicast packets received without errors and pass the filter */; + uint32_t rcv_mcast_pkts /* number of packets in multicast packets received without errors and pass the filter */; + uint32_t ttl0_discard /* the number of good frames dropped because of TTL=0 */; + uint16_t no_buff_discard; + uint16_t reserved0; + uint32_t reserved1; +}; + +/* + * Protocol-common statistics collected by the Ustorm (per client) $$KEEP_ENDIANNESS$$ + */ +struct ustorm_per_queue_stats +{ + struct regpair ucast_no_buff_bytes /* the number of unicast bytes received from network dropped because of no buffer at host */; + struct regpair mcast_no_buff_bytes /* the number of multicast bytes received from network dropped because of no buffer at host */; + struct regpair bcast_no_buff_bytes /* the number of broadcast bytes received from network dropped because of no buffer at host */; + uint32_t ucast_no_buff_pkts /* the number of unicast frames received from network dropped because of no buffer at host */; + uint32_t mcast_no_buff_pkts /* the number of unicast frames received from network dropped because of no buffer at host */; + uint32_t bcast_no_buff_pkts /* the number of unicast frames received from network dropped because of no buffer at host */; + uint32_t coalesced_pkts /* the number of packets coalesced in all aggregations */; + struct regpair coalesced_bytes /* the number of bytes coalesced in all aggregations */; + uint32_t coalesced_events /* the number of aggregations */; + uint32_t coalesced_aborts /* the number of exception which avoid aggregation */; +}; + +/* + * Protocol-common statistics collected by the Xstorm (per client) $$KEEP_ENDIANNESS$$ + */ +struct xstorm_per_queue_stats +{ + struct regpair ucast_bytes_sent /* number of total bytes sent without errors */; + struct regpair mcast_bytes_sent /* number of total bytes sent without errors */; + struct regpair bcast_bytes_sent /* number of total bytes sent without errors */; + uint32_t ucast_pkts_sent /* number of total packets sent without errors */; + uint32_t mcast_pkts_sent /* number of total packets sent without errors */; + uint32_t bcast_pkts_sent /* number of total packets sent without errors */; + uint32_t error_drop_pkts /* number of total packets drooped due to errors */; +}; + +/* + * $$KEEP_ENDIANNESS$$ + */ +struct per_queue_stats +{ + struct tstorm_per_queue_stats tstorm_queue_statistics; + struct ustorm_per_queue_stats ustorm_queue_statistics; + struct xstorm_per_queue_stats xstorm_queue_statistics; +}; + + +/* + * FW version stored in first line of pram $$KEEP_ENDIANNESS$$ + */ +struct pram_fw_version +{ + uint8_t major /* firmware current major version */; + uint8_t minor /* firmware current minor version */; + uint8_t revision /* firmware current revision version */; + uint8_t engineering /* firmware current engineering version */; + uint8_t flags; +#define PRAM_FW_VERSION_OPTIMIZED (0x1<<0) /* BitField flags if set, this is optimized ASM */ +#define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0 +#define PRAM_FW_VERSION_STORM_ID (0x3<<1) /* BitField flags storm_id identification */ +#define PRAM_FW_VERSION_STORM_ID_SHIFT 1 +#define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3) /* BitField flags if set, this is big-endien ASM */ +#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3 +#define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4) /* BitField flags 1 - E1H */ +#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4 +#define __PRAM_FW_VERSION_RESERVED0 (0x3<<6) /* BitField flags */ +#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6 +}; + + +/* + * Ethernet slow path element + */ +union protocol_common_specific_data +{ + uint8_t protocol_data[8] /* to fix this structure size to 8 bytes */; + struct regpair phy_address /* SPE physical address */; + struct regpair mac_config_addr /* physical address of the MAC configuration command, as allocated by the driver */; + struct afex_vif_list_ramrod_data afex_vif_list_data /* The data afex vif list ramrod need */; +}; + +/* + * The send queue element + */ +struct protocol_common_spe +{ + struct spe_hdr hdr /* SPE header */; + union protocol_common_specific_data data /* data specific to common protocol */; +}; + + +/* + * The data for the Set Timesync Ramrod $$KEEP_ENDIANNESS$$ + */ +struct set_timesync_ramrod_data +{ + uint8_t drift_adjust_cmd /* Timesync Drift Adjust Command */; + uint8_t offset_cmd /* Timesync Offset Command */; + uint8_t add_sub_drift_adjust_value /* Whether to add(1)/subtract(0) Drift Adjust Value from the Offset */; + uint8_t drift_adjust_value /* Drift Adjust Value (in ns) */; + uint32_t drift_adjust_period /* Drift Adjust Period (in us) */; + struct regpair offset_delta /* Timesync Offset Delta (in ns) */; +}; + + +/* + * The send queue element + */ +struct slow_path_element +{ + struct spe_hdr hdr /* common data for all protocols */; + struct regpair protocol_data /* additional data specific to the protocol */; +}; + + +/* + * Protocol-common statistics counter $$KEEP_ENDIANNESS$$ + */ +struct stats_counter +{ + uint16_t xstats_counter /* xstorm statistics counter */; + uint16_t reserved0; + uint32_t reserved1; + uint16_t tstats_counter /* tstorm statistics counter */; + uint16_t reserved2; + uint32_t reserved3; + uint16_t ustats_counter /* ustorm statistics counter */; + uint16_t reserved4; + uint32_t reserved5; + uint16_t cstats_counter /* ustorm statistics counter */; + uint16_t reserved6; + uint32_t reserved7; +}; + + +/* + * $$KEEP_ENDIANNESS$$ + */ +struct stats_query_entry +{ + uint8_t kind; + uint8_t index /* queue index */; + uint16_t funcID /* the func the statistic will send to */; + uint32_t reserved; + struct regpair address /* pxp address */; +}; + +/* + * statistic command $$KEEP_ENDIANNESS$$ + */ +struct stats_query_cmd_group +{ + struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; +}; + + +/* + * statistic command header $$KEEP_ENDIANNESS$$ + */ +struct stats_query_header +{ + uint8_t cmd_num /* command number */; + uint8_t reserved0; + uint16_t drv_stats_counter; + uint32_t reserved1; + struct regpair stats_counters_addrs /* stats counter */; +}; + + +/* + * Types of statistcis query entry + */ +enum stats_query_type +{ + STATS_TYPE_QUEUE, + STATS_TYPE_PORT, + STATS_TYPE_PF, + STATS_TYPE_TOE, + STATS_TYPE_FCOE, + MAX_STATS_QUERY_TYPE}; + + +/* + * Indicate of the function status block state + */ +enum status_block_state +{ + SB_DISABLED, + SB_ENABLED, + SB_CLEANED, + MAX_STATUS_BLOCK_STATE}; + + +/* + * Storm IDs (including attentions for IGU related enums) + */ +enum storm_id +{ + USTORM_ID, + CSTORM_ID, + XSTORM_ID, + TSTORM_ID, + ATTENTION_ID, + MAX_STORM_ID}; + + +/* + * Taffic types used in ETS and flow control algorithms + */ +enum traffic_type +{ + LLFC_TRAFFIC_TYPE_NW /* Networking */, + LLFC_TRAFFIC_TYPE_FCOE /* FCoE */, + LLFC_TRAFFIC_TYPE_ISCSI /* iSCSI */, + MAX_TRAFFIC_TYPE}; + + +/* + * zone A per-queue data + */ +struct tstorm_queue_zone_data +{ + struct regpair reserved[4]; +}; + + +/* + * zone B per-VF data + */ +struct tstorm_vf_zone_data +{ + struct regpair reserved; +}; + + +/* + * Add or Subtract Value for Set Timesync Ramrod + */ +enum ts_add_sub_value +{ + TS_SUB_VALUE /* Subtract Value */, + TS_ADD_VALUE /* Add Value */, + MAX_TS_ADD_SUB_VALUE}; + + +/* + * Drift-Adjust Commands for Set Timesync Ramrod + */ +enum ts_drift_adjust_cmd +{ + TS_DRIFT_ADJUST_KEEP /* Keep Drift-Adjust at current values */, + TS_DRIFT_ADJUST_SET /* Set Drift-Adjust */, + TS_DRIFT_ADJUST_RESET /* Reset Drift-Adjust */, + MAX_TS_DRIFT_ADJUST_CMD}; + + +/* + * Offset Commands for Set Timesync Ramrod + */ +enum ts_offset_cmd +{ + TS_OFFSET_KEEP /* Keep Offset at current values */, + TS_OFFSET_INC /* Increase Offset by Offset Delta */, + TS_OFFSET_DEC /* Decrease Offset by Offset Delta */, + MAX_TS_OFFSET_CMD}; + + +/* + * zone A per-queue data + */ +struct ustorm_queue_zone_data +{ + union ustorm_eth_rx_producers eth_rx_producers /* ETH RX rings producers */; + struct regpair reserved[3]; +}; + + +/* + * zone B per-VF data + */ +struct ustorm_vf_zone_data +{ + struct regpair reserved; +}; + + +/* + * data per VF-PF channel + */ +struct vf_pf_channel_data +{ +#if defined(__BIG_ENDIAN) + uint16_t reserved0; + uint8_t valid /* flag for channel validity. (cleared when identify a VF as malicious) */; + uint8_t state /* channel state (ready / waiting for ack) */; +#elif defined(__LITTLE_ENDIAN) + uint8_t state /* channel state (ready / waiting for ack) */; + uint8_t valid /* flag for channel validity. (cleared when identify a VF as malicious) */; + uint16_t reserved0; +#endif + uint32_t reserved1; +}; + + +/* + * State of VF-PF channel + */ +enum vf_pf_channel_state +{ + VF_PF_CHANNEL_STATE_READY /* Channel is ready to accept a message from VF */, + VF_PF_CHANNEL_STATE_WAITING_FOR_ACK /* Channel waits for an ACK from PF */, + MAX_VF_PF_CHANNEL_STATE}; + + +/* + * vif_list_rule_kind + */ +enum vif_list_rule_kind +{ + VIF_LIST_RULE_SET, + VIF_LIST_RULE_GET, + VIF_LIST_RULE_CLEAR_ALL, + VIF_LIST_RULE_CLEAR_FUNC, + MAX_VIF_LIST_RULE_KIND}; + + +/* + * zone A per-queue data + */ +struct xstorm_queue_zone_data +{ + struct regpair reserved[4]; +}; + + +/* + * zone B per-VF data + */ +struct xstorm_vf_zone_data +{ + struct regpair reserved; +}; + + +#endif /* ECORE_HSI_H */ diff --git a/drivers/net/bnx2x/ecore_init.h b/drivers/net/bnx2x/ecore_init.h new file mode 100644 index 00000000..d25e2803 --- /dev/null +++ b/drivers/net/bnx2x/ecore_init.h @@ -0,0 +1,819 @@ +/*- + * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#ifndef ECORE_INIT_H +#define ECORE_INIT_H + +/* Init operation types and structures */ +enum { + OP_RD = 0x1, /* read a single register */ + OP_WR, /* write a single register */ + OP_SW, /* copy a string to the device */ + OP_ZR, /* clear memory */ + OP_ZP, /* unzip then copy with DMAE */ + OP_WR_64, /* write 64 bit pattern */ + OP_WB, /* copy a string using DMAE */ + OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */ + OP_IF_MODE_OR, /* Skip the following ops if all init modes don't match */ + OP_IF_MODE_AND, /* Skip the following ops if any init modes don't match */ + OP_IF_PHASE, + OP_RT, + OP_DELAY, + OP_VERIFY, + OP_MAX +}; + +enum { + STAGE_START, + STAGE_END, +}; + +/* Returns the index of start or end of a specific block stage in ops array*/ +#define BLOCK_OPS_IDX(block, stage, end) \ + (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) + + +/* structs for the various opcodes */ +struct raw_op { + uint32_t op:8; + uint32_t offset:24; + uint32_t raw_data; +}; + +struct op_read { + uint32_t op:8; + uint32_t offset:24; + uint32_t val; +}; + +struct op_write { + uint32_t op:8; + uint32_t offset:24; + uint32_t val; +}; + +struct op_arr_write { + uint32_t op:8; + uint32_t offset:24; +#ifdef __BIG_ENDIAN + uint16_t data_len; + uint16_t data_off; +#else /* __LITTLE_ENDIAN */ + uint16_t data_off; + uint16_t data_len; +#endif +}; + +struct op_zero { + uint32_t op:8; + uint32_t offset:24; + uint32_t len; +}; + +struct op_if_mode { + uint32_t op:8; + uint32_t cmd_offset:24; + uint32_t mode_bit_map; +}; + +struct op_if_phase { + uint32_t op:8; + uint32_t cmd_offset:24; + uint32_t phase_bit_map; +}; + +struct op_delay { + uint32_t op:8; + uint32_t reserved:24; + uint32_t delay; +}; + +union init_op { + struct op_read read; + struct op_write write; + struct op_arr_write arr_wr; + struct op_zero zero; + struct raw_op raw; + struct op_if_mode if_mode; + struct op_if_phase if_phase; + struct op_delay delay; +}; + + +/* Init Phases */ +enum { + PHASE_COMMON, + PHASE_PORT0, + PHASE_PORT1, + PHASE_PF0, + PHASE_PF1, + PHASE_PF2, + PHASE_PF3, + PHASE_PF4, + PHASE_PF5, + PHASE_PF6, + PHASE_PF7, + NUM_OF_INIT_PHASES +}; + +/* Init Modes */ +enum { + MODE_ASIC = 0x00000001, + MODE_FPGA = 0x00000002, + MODE_EMUL = 0x00000004, + MODE_E2 = 0x00000008, + MODE_E3 = 0x00000010, + MODE_PORT2 = 0x00000020, + MODE_PORT4 = 0x00000040, + MODE_SF = 0x00000080, + MODE_MF = 0x00000100, + MODE_MF_SD = 0x00000200, + MODE_MF_SI = 0x00000400, + MODE_MF_AFEX = 0x00000800, + MODE_E3_A0 = 0x00001000, + MODE_E3_B0 = 0x00002000, + MODE_COS3 = 0x00004000, + MODE_COS6 = 0x00008000, + MODE_LITTLE_ENDIAN = 0x00010000, + MODE_BIG_ENDIAN = 0x00020000, +}; + +/* Init Blocks */ +enum { + BLOCK_ATC, + BLOCK_BRB1, + BLOCK_CCM, + BLOCK_CDU, + BLOCK_CFC, + BLOCK_CSDM, + BLOCK_CSEM, + BLOCK_DBG, + BLOCK_DMAE, + BLOCK_DORQ, + BLOCK_HC, + BLOCK_IGU, + BLOCK_MISC, + BLOCK_NIG, + BLOCK_PBF, + BLOCK_PGLUE_B, + BLOCK_PRS, + BLOCK_PXP2, + BLOCK_PXP, + BLOCK_QM, + BLOCK_SRC, + BLOCK_TCM, + BLOCK_TM, + BLOCK_TSDM, + BLOCK_TSEM, + BLOCK_UCM, + BLOCK_UPB, + BLOCK_USDM, + BLOCK_USEM, + BLOCK_XCM, + BLOCK_XPB, + BLOCK_XSDM, + BLOCK_XSEM, + BLOCK_MISC_AEU, + NUM_OF_INIT_BLOCKS +}; + + + + + + + + +/* Vnics per mode */ +#define ECORE_PORT2_MODE_NUM_VNICS 4 + + +/* QM queue numbers */ +#define ECORE_ETH_Q 0 +#define ECORE_TOE_Q 3 +#define ECORE_TOE_ACK_Q 6 +#define ECORE_ISCSI_Q 9 +#define ECORE_ISCSI_ACK_Q 11 +#define ECORE_FCOE_Q 10 + +/* Vnics per mode */ +#define ECORE_PORT4_MODE_NUM_VNICS 2 + +/* COS offset for port1 in E3 B0 4port mode */ +#define ECORE_E3B0_PORT1_COS_OFFSET 3 + +/* QM Register addresses */ +#define ECORE_Q_VOQ_REG_ADDR(pf_q_num)\ + (QM_REG_QVOQIDX_0 + 4 * (pf_q_num)) +#define ECORE_VOQ_Q_REG_ADDR(cos, pf_q_num)\ + (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5))) +#define ECORE_Q_CMDQ_REG_ADDR(pf_q_num)\ + (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4)) + +/* extracts the QM queue number for the specified port and vnic */ +#define ECORE_PF_Q_NUM(q_num, port, vnic)\ + ((((port) << 1) | (vnic)) * 16 + (q_num)) + + +/* Maps the specified queue to the specified COS */ +static inline void ecore_map_q_cos(struct bnx2x_softc *sc, uint32_t q_num, uint32_t new_cos) +{ + /* find current COS mapping */ + uint32_t curr_cos = REG_RD(sc, QM_REG_QVOQIDX_0 + q_num * 4); + + /* check if queue->COS mapping has changed */ + if (curr_cos != new_cos) { + uint32_t num_vnics = ECORE_PORT2_MODE_NUM_VNICS; + uint32_t reg_addr, reg_bit_map, vnic; + + /* update parameters for 4port mode */ + if (INIT_MODE_FLAGS(sc) & MODE_PORT4) { + num_vnics = ECORE_PORT4_MODE_NUM_VNICS; + if (PORT_ID(sc)) { + curr_cos += ECORE_E3B0_PORT1_COS_OFFSET; + new_cos += ECORE_E3B0_PORT1_COS_OFFSET; + } + } + + /* change queue mapping for each VNIC */ + for (vnic = 0; vnic < num_vnics; vnic++) { + uint32_t pf_q_num = + ECORE_PF_Q_NUM(q_num, PORT_ID(sc), vnic); + uint32_t q_bit_map = 1 << (pf_q_num & 0x1f); + + /* overwrite queue->VOQ mapping */ + REG_WR(sc, ECORE_Q_VOQ_REG_ADDR(pf_q_num), new_cos); + + /* clear queue bit from current COS bit map */ + reg_addr = ECORE_VOQ_Q_REG_ADDR(curr_cos, pf_q_num); + reg_bit_map = REG_RD(sc, reg_addr); + REG_WR(sc, reg_addr, reg_bit_map & (~q_bit_map)); + + /* set queue bit in new COS bit map */ + reg_addr = ECORE_VOQ_Q_REG_ADDR(new_cos, pf_q_num); + reg_bit_map = REG_RD(sc, reg_addr); + REG_WR(sc, reg_addr, reg_bit_map | q_bit_map); + + /* set/clear queue bit in command-queue bit map + (E2/E3A0 only, valid COS values are 0/1) */ + if (!(INIT_MODE_FLAGS(sc) & MODE_E3_B0)) { + reg_addr = ECORE_Q_CMDQ_REG_ADDR(pf_q_num); + reg_bit_map = REG_RD(sc, reg_addr); + q_bit_map = 1 << (2 * (pf_q_num & 0xf)); + reg_bit_map = new_cos ? + (reg_bit_map | q_bit_map) : + (reg_bit_map & (~q_bit_map)); + REG_WR(sc, reg_addr, reg_bit_map); + } + } + } +} + +/* Configures the QM according to the specified per-traffic-type COSes */ +static inline void ecore_dcb_config_qm(struct bnx2x_softc *sc, enum cos_mode mode, + struct priority_cos *traffic_cos) +{ + ecore_map_q_cos(sc, ECORE_FCOE_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos); + ecore_map_q_cos(sc, ECORE_ISCSI_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); + ecore_map_q_cos(sc, ECORE_ISCSI_ACK_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); + if (mode != STATIC_COS) { + /* required only in OVERRIDE_COS mode */ + ecore_map_q_cos(sc, ECORE_ETH_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + ecore_map_q_cos(sc, ECORE_TOE_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + ecore_map_q_cos(sc, ECORE_TOE_ACK_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + } +} + + +/* + * congestion managment port init api description + * the api works as follows: + * the driver should pass the cmng_init_input struct, the port_init function + * will prepare the required internal ram structure which will be passed back + * to the driver (cmng_init) that will write it into the internal ram. + * + * IMPORTANT REMARKS: + * 1. the cmng_init struct does not represent the contiguous internal ram + * structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET + * offset in order to write the port sub struct and the + * PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other + * words - don't use memcpy!). + * 2. although the cmng_init struct is filled for the maximal vnic number + * possible, the driver should only write the valid vnics into the internal + * ram according to the appropriate port mode. + */ +#define BITS_TO_BYTES(x) ((x)/8) + +/* CMNG constants, as derived from system spec calculations */ + +/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */ +#define DEF_MIN_RATE 100 + +/* resolution of the rate shaping timer - 400 usec */ +#define RS_PERIODIC_TIMEOUT_USEC 400 + +/* + * number of bytes in single QM arbitration cycle - + * coefficient for calculating the fairness timer + */ +#define QM_ARB_BYTES 160000 + +/* resolution of Min algorithm 1:100 */ +#define MIN_RES 100 + +/* + * how many bytes above threshold for + * the minimal credit of Min algorithm + */ +#define MIN_ABOVE_THRESH 32768 + +/* + * Fairness algorithm integration time coefficient - + * for calculating the actual Tfair + */ +#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) + +/* Memory of fairness algorithm - 2 cycles */ +#define FAIR_MEM 2 +#define SAFC_TIMEOUT_USEC 52 + +#define SDM_TICKS 4 + + +static inline void ecore_init_max(const struct cmng_init_input *input_data, + uint32_t r_param, struct cmng_init *ram_data) +{ + uint32_t vnic; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + /* + * rate shaping per-port variables + * 100 micro seconds in SDM ticks = 25 + * since each tick is 4 microSeconds + */ + + pdata->rs_vars.rs_periodic_timeout = + RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS; + + /* this is the threshold below which no timer arming will occur. + * 1.25 coefficient is for the threshold to be a little bigger + * then the real time to compensate for timer in-accuracy + */ + pdata->rs_vars.rs_threshold = + (5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4; + + /* rate shaping per-vnic variables */ + for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) { + /* global vnic counter */ + vdata->vnic_max_rate[vnic].vn_counter.rate = + input_data->vnic_max_rate[vnic]; + /* + * maximal Mbps for this vnic + * the quota in each timer period - number of bytes + * transmitted in this period + */ + vdata->vnic_max_rate[vnic].vn_counter.quota = + RS_PERIODIC_TIMEOUT_USEC * + (uint32_t)vdata->vnic_max_rate[vnic].vn_counter.rate / 8; + } + +} + +static inline void ecore_init_max_per_vn(uint16_t vnic_max_rate, + struct rate_shaping_vars_per_vn *ram_data) +{ + /* global vnic counter */ + ram_data->vn_counter.rate = vnic_max_rate; + + /* + * maximal Mbps for this vnic + * the quota in each timer period - number of bytes + * transmitted in this period + */ + ram_data->vn_counter.quota = + RS_PERIODIC_TIMEOUT_USEC * (uint32_t)vnic_max_rate / 8; +} + +static inline void ecore_init_min(const struct cmng_init_input *input_data, + uint32_t r_param, struct cmng_init *ram_data) +{ + uint32_t vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + + /* this is the resolution of the fairness timer */ + fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; + + /* + * fairness per-port variables + * for 10G it is 1000usec. for 1G it is 10000usec. + */ + tFair = T_FAIR_COEF / input_data->port_rate; + + /* this is the threshold below which we won't arm the timer anymore */ + pdata->fair_vars.fair_threshold = QM_ARB_BYTES; + + /* + * we multiply by 1e3/8 to get bytes/msec. We don't want the credits + * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution) + */ + pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM; + + /* since each tick is 4 microSeconds */ + pdata->fair_vars.fairness_timeout = + fair_periodic_timeout_usec / SDM_TICKS; + + /* calculate sum of weights */ + vnicWeightSum = 0; + + for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) + vnicWeightSum += input_data->vnic_min_rate[vnic]; + + /* global vnic counter */ + if (vnicWeightSum > 0) { + /* fairness per-vnic variables */ + for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) { + /* + * this is the credit for each period of the fairness + * algorithm - number of bytes in T_FAIR (this vnic + * share of the port rate) + */ + vdata->vnic_min_rate[vnic].vn_credit_delta = + ((uint32_t)(input_data->vnic_min_rate[vnic]) * 100 * + (T_FAIR_COEF / (8 * 100 * vnicWeightSum))); + if (vdata->vnic_min_rate[vnic].vn_credit_delta < + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH) { + vdata->vnic_min_rate[vnic].vn_credit_delta = + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH; + } + } + } +} + +static inline void ecore_init_fw_wrr(const struct cmng_init_input *input_data, + struct cmng_init *ram_data) +{ + uint32_t vnic, cos; + uint32_t cosWeightSum = 0; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + + for (cos = 0; cos < MAX_COS_NUMBER; cos++) + cosWeightSum += input_data->cos_min_rate[cos]; + + if (cosWeightSum > 0) { + + for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) { + /* + * Since cos and vnic shouldn't work together the rate + * to divide between the coses is the port rate. + */ + uint32_t *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta; + for (cos = 0; cos < MAX_COS_NUMBER; cos++) { + /* + * this is the credit for each period of + * the fairness algorithm - number of bytes + * in T_FAIR (this cos share of the vnic rate) + */ + ccd[cos] = + ((uint32_t)input_data->cos_min_rate[cos] * 100 * + (T_FAIR_COEF / (8 * 100 * cosWeightSum))); + if (ccd[cos] < pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH) { + ccd[cos] = + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH; + } + } + } + } +} + +static inline void ecore_init_safc(struct cmng_init *ram_data) +{ + /* in microSeconds */ + ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC; +} + +/* Congestion management port init */ +static inline void ecore_init_cmng(const struct cmng_init_input *input_data, + struct cmng_init *ram_data) +{ + uint32_t r_param; + ECORE_MEMSET(ram_data, 0,sizeof(struct cmng_init)); + + ram_data->port.flags = input_data->flags; + + /* + * number of bytes transmitted in a rate of 10Gbps + * in one usec = 1.25KB. + */ + r_param = BITS_TO_BYTES(input_data->port_rate); + ecore_init_max(input_data, r_param, ram_data); + ecore_init_min(input_data, r_param, ram_data); + ecore_init_fw_wrr(input_data, ram_data); + ecore_init_safc(ram_data); +} + + + + +/* Returns the index of start or end of a specific block stage in ops array*/ +#define BLOCK_OPS_IDX(block, stage, end) \ + (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) + + +#define INITOP_SET 0 /* set the HW directly */ +#define INITOP_CLEAR 1 /* clear the HW directly */ +#define INITOP_INIT 2 /* set the init-value array */ + +/**************************************************************************** +* ILT management +****************************************************************************/ +struct ilt_line { + ecore_dma_addr_t page_mapping; + void *page; + uint32_t size; +}; + +struct ilt_client_info { + uint32_t page_size; + uint16_t start; + uint16_t end; + uint16_t client_num; + uint16_t flags; +#define ILT_CLIENT_SKIP_INIT 0x1 +#define ILT_CLIENT_SKIP_MEM 0x2 +}; + +struct ecore_ilt { + uint32_t start_line; + struct ilt_line *lines; + struct ilt_client_info clients[4]; +#define ILT_CLIENT_CDU 0 +#define ILT_CLIENT_QM 1 +#define ILT_CLIENT_SRC 2 +#define ILT_CLIENT_TM 3 +}; + +/**************************************************************************** +* SRC configuration +****************************************************************************/ +struct src_ent { + uint8_t opaque[56]; + uint64_t next; +}; + +/**************************************************************************** +* Parity configuration +****************************************************************************/ +#define BLOCK_PRTY_INFO(block, en_mask, m1h, m2, m3) \ +{ \ + block##_REG_##block##_PRTY_MASK, \ + block##_REG_##block##_PRTY_STS_CLR, \ + en_mask, {m1h, m2, m3}, #block \ +} + +#define BLOCK_PRTY_INFO_0(block, en_mask, m1h, m2, m3) \ +{ \ + block##_REG_##block##_PRTY_MASK_0, \ + block##_REG_##block##_PRTY_STS_CLR_0, \ + en_mask, {m1h, m2, m3}, #block"_0" \ +} + +#define BLOCK_PRTY_INFO_1(block, en_mask, m1h, m2, m3) \ +{ \ + block##_REG_##block##_PRTY_MASK_1, \ + block##_REG_##block##_PRTY_STS_CLR_1, \ + en_mask, {m1h, m2, m3}, #block"_1" \ +} + +static const struct { + uint32_t mask_addr; + uint32_t sts_clr_addr; + uint32_t en_mask; /* Mask to enable parity attentions */ + struct { + uint32_t e1h; /* 57711 */ + uint32_t e2; /* 57712 */ + uint32_t e3; /* 578xx */ + } reg_mask; /* Register mask (all valid bits) */ + char name[8]; /* Block's longest name is 7 characters long + * (name + suffix) + */ +} ecore_blocks_parity_data[] = { + /* bit 19 masked */ + /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */ + /* bit 5,18,20-31 */ + /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */ + /* bit 5 */ + /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */ + /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */ + /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */ + + /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't + * want to handle "system kill" flow at the moment. + */ + BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x7ffffff, + 0x7ffffff), + BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7ff, 0x1ffffff), + BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0, 0), + BLOCK_PRTY_INFO(NIG, 0xffffffff, 0xffffffff, 0, 0), + BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0xffffffff, 0xffffffff), + BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0xff, 0xffff), + BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1), + BLOCK_PRTY_INFO(QM, 0, 0xfff, 0xfff, 0xfff), + BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0x1f, 0x1f), + BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0x3, 0x3), + BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3), + {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, + GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf, + {0xf, 0xf, 0xf}, "UPB"}, + {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, + GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0, + {0xf, 0xf, 0xf}, "XPB"}, + BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7), + BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0x3f), + BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1), + BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf), + BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf), + BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff), + BLOCK_PRTY_INFO(PBF, 0, 0x3ffff, 0xfffff, 0xfffffff), + BLOCK_PRTY_INFO(TM, 0, 0x7f, 0x7f, 0x7f), + BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(TCM, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(CCM, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(UCM, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(XCM, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff), + BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), + BLOCK_PRTY_INFO_1(TSEM, 0, 0x1f, 0x3f, 0x3f), + BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), + BLOCK_PRTY_INFO_1(USEM, 0, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), + BLOCK_PRTY_INFO_1(CSEM, 0, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), + BLOCK_PRTY_INFO_1(XSEM, 0, 0x1f, 0x3f, 0x3f), +}; + + +/* [28] MCP Latched rom_parity + * [29] MCP Latched ump_rx_parity + * [30] MCP Latched ump_tx_parity + * [31] MCP Latched scpad_parity + */ +#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ + (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) + +/* Below registers control the MCP parity attention output. When + * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are + * enabled, when cleared - disabled. + */ +static const uint32_t mcp_attn_ctl_regs[] = { + MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, + MISC_REG_AEU_ENABLE4_NIG_0, + MISC_REG_AEU_ENABLE4_PXP_0, + MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, + MISC_REG_AEU_ENABLE4_NIG_1, + MISC_REG_AEU_ENABLE4_PXP_1 +}; + +static inline void ecore_set_mcp_parity(struct bnx2x_softc *sc, uint8_t enable) +{ + uint32_t i; + uint32_t reg_val; + + for (i = 0; i < ARRSIZE(mcp_attn_ctl_regs); i++) { + reg_val = REG_RD(sc, mcp_attn_ctl_regs[i]); + + if (enable) + reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; + else + reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; + + REG_WR(sc, mcp_attn_ctl_regs[i], reg_val); + } +} + +static inline uint32_t ecore_parity_reg_mask(struct bnx2x_softc *sc, int idx) +{ + if (CHIP_IS_E1H(sc)) + return ecore_blocks_parity_data[idx].reg_mask.e1h; + else if (CHIP_IS_E2(sc)) + return ecore_blocks_parity_data[idx].reg_mask.e2; + else /* CHIP_IS_E3 */ + return ecore_blocks_parity_data[idx].reg_mask.e3; +} + +static inline void ecore_disable_blocks_parity(struct bnx2x_softc *sc) +{ + uint32_t i; + + for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) { + uint32_t dis_mask = ecore_parity_reg_mask(sc, i); + + if (dis_mask) { + REG_WR(sc, ecore_blocks_parity_data[i].mask_addr, + dis_mask); + ECORE_MSG("Setting parity mask " + "for %s to\t\t0x%x", + ecore_blocks_parity_data[i].name, dis_mask); + } + } + + /* Disable MCP parity attentions */ + ecore_set_mcp_parity(sc, FALSE); +} + +/** + * Clear the parity error status registers. + */ +static inline void ecore_clear_blocks_parity(struct bnx2x_softc *sc) +{ + uint32_t i; + uint32_t reg_val, mcp_aeu_bits = + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY; + + /* Clear SEM_FAST parities */ + REG_WR(sc, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(sc, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(sc, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(sc, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + + for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) { + uint32_t reg_mask = ecore_parity_reg_mask(sc, i); + + if (reg_mask) { + reg_val = REG_RD(sc, ecore_blocks_parity_data[i]. + sts_clr_addr); + if (reg_val & reg_mask) + ECORE_MSG("Parity errors in %s: 0x%x", + ecore_blocks_parity_data[i].name, + reg_val & reg_mask); + } + } + + /* Check if there were parity attentions in MCP */ + reg_val = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_MCP); + if (reg_val & mcp_aeu_bits) + ECORE_MSG("Parity error in MCP: 0x%x", + reg_val & mcp_aeu_bits); + + /* Clear parity attentions in MCP: + * [7] clears Latched rom_parity + * [8] clears Latched ump_rx_parity + * [9] clears Latched ump_tx_parity + * [10] clears Latched scpad_parity (both ports) + */ + REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780); +} + +static inline void ecore_enable_blocks_parity(struct bnx2x_softc *sc) +{ + uint32_t i; + + for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) { + uint32_t reg_mask = ecore_parity_reg_mask(sc, i); + + if (reg_mask) + REG_WR(sc, ecore_blocks_parity_data[i].mask_addr, + ecore_blocks_parity_data[i].en_mask & reg_mask); + } + + /* Enable MCP parity attentions */ + ecore_set_mcp_parity(sc, TRUE); +} + + +#endif /* ECORE_INIT_H */ diff --git a/drivers/net/bnx2x/ecore_init_ops.h b/drivers/net/bnx2x/ecore_init_ops.h new file mode 100644 index 00000000..b6f98324 --- /dev/null +++ b/drivers/net/bnx2x/ecore_init_ops.h @@ -0,0 +1,865 @@ +/*- + * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#ifndef ECORE_INIT_OPS_H +#define ECORE_INIT_OPS_H + +static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t *zbuf, int len); +static void ecore_write_dmae_phys_len(struct bnx2x_softc *sc, + ecore_dma_addr_t phys_addr, uint32_t addr, + uint32_t len); + +static void ecore_init_str_wr(struct bnx2x_softc *sc, uint32_t addr, + const uint32_t *data, uint32_t len) +{ + uint32_t i; + + for (i = 0; i < len; i++) + REG_WR(sc, addr + i*4, data[i]); +} + +static void ecore_write_big_buf(struct bnx2x_softc *sc, uint32_t addr, uint32_t len) +{ + if (DMAE_READY(sc)) + ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len); + + else ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len); +} + +static void ecore_init_fill(struct bnx2x_softc *sc, uint32_t addr, int fill, + uint32_t len) +{ + uint32_t buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4)); + uint32_t buf_len32 = buf_len/4; + uint32_t i; + + ECORE_MEMSET(GUNZIP_BUF(sc), (uint8_t)fill, buf_len); + + for (i = 0; i < len; i += buf_len32) { + uint32_t cur_len = min(buf_len32, len - i); + + ecore_write_big_buf(sc, addr + i*4, cur_len); + } +} + +static void ecore_write_big_buf_wb(struct bnx2x_softc *sc, uint32_t addr, uint32_t len) +{ + if (DMAE_READY(sc)) + ecore_write_dmae_phys_len(sc, GUNZIP_PHYS(sc), addr, len); + + else ecore_init_str_wr(sc, addr, GUNZIP_BUF(sc), len); +} + +static void ecore_init_wr_64(struct bnx2x_softc *sc, uint32_t addr, + const uint32_t *data, uint32_t len64) +{ + uint32_t buf_len32 = FW_BUF_SIZE/4; + uint32_t len = len64*2; + uint64_t data64 = 0; + uint32_t i; + + /* 64 bit value is in a blob: first low DWORD, then high DWORD */ + data64 = HILO_U64((*(data + 1)), (*data)); + + len64 = min((uint32_t)(FW_BUF_SIZE/8), len64); + for (i = 0; i < len64; i++) { + uint64_t *pdata = ((uint64_t *)(GUNZIP_BUF(sc))) + i; + + *pdata = data64; + } + + for (i = 0; i < len; i += buf_len32) { + uint32_t cur_len = min(buf_len32, len - i); + + ecore_write_big_buf_wb(sc, addr + i*4, cur_len); + } +} + +/********************************************************* + There are different blobs for each PRAM section. + In addition, each blob write operation is divided into a few operations + in order to decrease the amount of phys. contiguous buffer needed. + Thus, when we select a blob the address may be with some offset + from the beginning of PRAM section. + The same holds for the INT_TABLE sections. +**********************************************************/ +#define IF_IS_INT_TABLE_ADDR(base, addr) \ + if (((base) <= (addr)) && ((base) + 0x400 >= (addr))) + +#define IF_IS_PRAM_ADDR(base, addr) \ + if (((base) <= (addr)) && ((base) + 0x40000 >= (addr))) + +static const uint8_t *ecore_sel_blob(struct bnx2x_softc *sc, uint32_t addr, + const uint8_t *data) +{ + IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr) + data = INIT_TSEM_INT_TABLE_DATA(sc); + else + IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr) + data = INIT_CSEM_INT_TABLE_DATA(sc); + else + IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr) + data = INIT_USEM_INT_TABLE_DATA(sc); + else + IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr) + data = INIT_XSEM_INT_TABLE_DATA(sc); + else + IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr) + data = INIT_TSEM_PRAM_DATA(sc); + else + IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr) + data = INIT_CSEM_PRAM_DATA(sc); + else + IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr) + data = INIT_USEM_PRAM_DATA(sc); + else + IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr) + data = INIT_XSEM_PRAM_DATA(sc); + + return data; +} + +static void ecore_init_wr_wb(struct bnx2x_softc *sc, uint32_t addr, + const uint32_t *data, uint32_t len) +{ + if (DMAE_READY(sc)) + VIRT_WR_DMAE_LEN(sc, data, addr, len, 0); + + else ecore_init_str_wr(sc, addr, data, len); +} + +static void ecore_wr_64(struct bnx2x_softc *sc, uint32_t reg, uint32_t val_lo, + uint32_t val_hi) +{ + uint32_t wb_write[2]; + + wb_write[0] = val_lo; + wb_write[1] = val_hi; + REG_WR_DMAE_LEN(sc, reg, wb_write, 2); +} + +static void ecore_init_wr_zp(struct bnx2x_softc *sc, uint32_t addr, uint32_t len, + uint32_t blob_off) +{ + const uint8_t *data = NULL; + int rc; + uint32_t i; + + data = ecore_sel_blob(sc, addr, data) + blob_off*4; + + rc = ecore_gunzip(sc, data, len); + if (rc) + return; + + /* gunzip_outlen is in dwords */ + len = GUNZIP_OUTLEN(sc); + for (i = 0; i < len; i++) + ((uint32_t *)GUNZIP_BUF(sc))[i] = (uint32_t) + ECORE_CPU_TO_LE32(((uint32_t *)GUNZIP_BUF(sc))[i]); + + ecore_write_big_buf_wb(sc, addr, len); +} + +static void ecore_init_block(struct bnx2x_softc *sc, uint32_t block, uint32_t stage) +{ + uint16_t op_start = + INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage, + STAGE_START)]; + uint16_t op_end = + INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage, + STAGE_END)]; + const union init_op *op; + uint32_t op_idx, op_type, addr, len; + const uint32_t *data, *data_base; + + /* If empty block */ + if (op_start == op_end) + return; + + data_base = INIT_DATA(sc); + + for (op_idx = op_start; op_idx < op_end; op_idx++) { + + op = (const union init_op *)&(INIT_OPS(sc)[op_idx]); + /* Get generic data */ + op_type = op->raw.op; + addr = op->raw.offset; + /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and + * OP_WR64 (we assume that op_arr_write and op_write have the + * same structure). + */ + len = op->arr_wr.data_len; + data = data_base + op->arr_wr.data_off; + + switch (op_type) { + case OP_RD: + REG_RD(sc, addr); + break; + case OP_WR: + REG_WR(sc, addr, op->write.val); + break; + case OP_SW: + ecore_init_str_wr(sc, addr, data, len); + break; + case OP_WB: + ecore_init_wr_wb(sc, addr, data, len); + break; + case OP_ZR: + case OP_WB_ZR: + ecore_init_fill(sc, addr, 0, op->zero.len); + break; + case OP_ZP: + ecore_init_wr_zp(sc, addr, len, op->arr_wr.data_off); + break; + case OP_WR_64: + ecore_init_wr_64(sc, addr, data, len); + break; + case OP_IF_MODE_AND: + /* if any of the flags doesn't match, skip the + * conditional block. + */ + if ((INIT_MODE_FLAGS(sc) & + op->if_mode.mode_bit_map) != + op->if_mode.mode_bit_map) + op_idx += op->if_mode.cmd_offset; + break; + case OP_IF_MODE_OR: + /* if all the flags don't match, skip the conditional + * block. + */ + if ((INIT_MODE_FLAGS(sc) & + op->if_mode.mode_bit_map) == 0) + op_idx += op->if_mode.cmd_offset; + break; + /* the following opcodes are unused at the moment. */ + case OP_IF_PHASE: + case OP_RT: + case OP_DELAY: + case OP_VERIFY: + default: + /* Should never get here! */ + + break; + } + } +} + + +/**************************************************************************** +* PXP Arbiter +****************************************************************************/ +/* + * This code configures the PCI read/write arbiter + * which implements a weighted round robin + * between the virtual queues in the chip. + * + * The values were derived for each PCI max payload and max request size. + * since max payload and max request size are only known at run time, + * this is done as a separate init stage. + */ + +#define NUM_WR_Q 13 +#define NUM_RD_Q 29 +#define MAX_RD_ORD 3 +#define MAX_WR_ORD 2 + +/* configuration for one arbiter queue */ +struct arb_line { + int l; + int add; + int ubound; +}; + +/* derived configuration for each read queue for each max request size */ +static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = { +/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, + { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} }, + { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} }, + { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} }, + { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, +/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, +/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} } +}; + +/* derived configuration for each write queue for each max request size */ +static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = { +/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} }, + { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} }, + { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, + { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, + { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, + { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, + { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} }, + { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, + { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, +/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} }, + { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} }, + { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} }, + { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} } +}; + +/* register addresses for read queues */ +static const struct arb_line read_arb_addr[NUM_RD_Q-1] = { +/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0, + PXP2_REG_RQ_BW_RD_UBOUND0}, + {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, + PXP2_REG_PSWRQ_BW_UB1}, + {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, + PXP2_REG_PSWRQ_BW_UB2}, + {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, + PXP2_REG_PSWRQ_BW_UB3}, + {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4, + PXP2_REG_RQ_BW_RD_UBOUND4}, + {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5, + PXP2_REG_RQ_BW_RD_UBOUND5}, + {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, + PXP2_REG_PSWRQ_BW_UB6}, + {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, + PXP2_REG_PSWRQ_BW_UB7}, + {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, + PXP2_REG_PSWRQ_BW_UB8}, +/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, + PXP2_REG_PSWRQ_BW_UB9}, + {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, + PXP2_REG_PSWRQ_BW_UB10}, + {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, + PXP2_REG_PSWRQ_BW_UB11}, + {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12, + PXP2_REG_RQ_BW_RD_UBOUND12}, + {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13, + PXP2_REG_RQ_BW_RD_UBOUND13}, + {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14, + PXP2_REG_RQ_BW_RD_UBOUND14}, + {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15, + PXP2_REG_RQ_BW_RD_UBOUND15}, + {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16, + PXP2_REG_RQ_BW_RD_UBOUND16}, + {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17, + PXP2_REG_RQ_BW_RD_UBOUND17}, + {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18, + PXP2_REG_RQ_BW_RD_UBOUND18}, +/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19, + PXP2_REG_RQ_BW_RD_UBOUND19}, + {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20, + PXP2_REG_RQ_BW_RD_UBOUND20}, + {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22, + PXP2_REG_RQ_BW_RD_UBOUND22}, + {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23, + PXP2_REG_RQ_BW_RD_UBOUND23}, + {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24, + PXP2_REG_RQ_BW_RD_UBOUND24}, + {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25, + PXP2_REG_RQ_BW_RD_UBOUND25}, + {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26, + PXP2_REG_RQ_BW_RD_UBOUND26}, + {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27, + PXP2_REG_RQ_BW_RD_UBOUND27}, + {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, + PXP2_REG_PSWRQ_BW_UB28} +}; + +/* register addresses for write queues */ +static const struct arb_line write_arb_addr[NUM_WR_Q-1] = { +/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, + PXP2_REG_PSWRQ_BW_UB1}, + {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, + PXP2_REG_PSWRQ_BW_UB2}, + {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, + PXP2_REG_PSWRQ_BW_UB3}, + {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, + PXP2_REG_PSWRQ_BW_UB6}, + {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, + PXP2_REG_PSWRQ_BW_UB7}, + {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, + PXP2_REG_PSWRQ_BW_UB8}, + {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, + PXP2_REG_PSWRQ_BW_UB9}, + {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, + PXP2_REG_PSWRQ_BW_UB10}, + {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, + PXP2_REG_PSWRQ_BW_UB11}, +/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, + PXP2_REG_PSWRQ_BW_UB28}, + {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29, + PXP2_REG_RQ_BW_WR_UBOUND29}, + {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30, + PXP2_REG_RQ_BW_WR_UBOUND30} +}; + +static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order, + int w_order) +{ + uint32_t val, i; + + if (r_order > MAX_RD_ORD) { + ECORE_MSG("read order of %d order adjusted to %d", + r_order, MAX_RD_ORD); + r_order = MAX_RD_ORD; + } + if (w_order > MAX_WR_ORD) { + ECORE_MSG("write order of %d order adjusted to %d", + w_order, MAX_WR_ORD); + w_order = MAX_WR_ORD; + } + if (CHIP_REV_IS_FPGA(sc)) { + ECORE_MSG("write order adjusted to 1 for FPGA"); + w_order = 0; + } + ECORE_MSG("read order %d write order %d", r_order, w_order); + + for (i = 0; i < NUM_RD_Q-1; i++) { + REG_WR(sc, read_arb_addr[i].l, read_arb_data[i][r_order].l); + REG_WR(sc, read_arb_addr[i].add, + read_arb_data[i][r_order].add); + REG_WR(sc, read_arb_addr[i].ubound, + read_arb_data[i][r_order].ubound); + } + + for (i = 0; i < NUM_WR_Q-1; i++) { + if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) || + (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) { + + REG_WR(sc, write_arb_addr[i].l, + write_arb_data[i][w_order].l); + + REG_WR(sc, write_arb_addr[i].add, + write_arb_data[i][w_order].add); + + REG_WR(sc, write_arb_addr[i].ubound, + write_arb_data[i][w_order].ubound); + } else { + + val = REG_RD(sc, write_arb_addr[i].l); + REG_WR(sc, write_arb_addr[i].l, + val | (write_arb_data[i][w_order].l << 10)); + + val = REG_RD(sc, write_arb_addr[i].add); + REG_WR(sc, write_arb_addr[i].add, + val | (write_arb_data[i][w_order].add << 10)); + + val = REG_RD(sc, write_arb_addr[i].ubound); + REG_WR(sc, write_arb_addr[i].ubound, + val | (write_arb_data[i][w_order].ubound << 7)); + } + } + + val = write_arb_data[NUM_WR_Q-1][w_order].add; + val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10; + val += write_arb_data[NUM_WR_Q-1][w_order].l << 17; + REG_WR(sc, PXP2_REG_PSWRQ_BW_RD, val); + + val = read_arb_data[NUM_RD_Q-1][r_order].add; + val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10; + val += read_arb_data[NUM_RD_Q-1][r_order].l << 17; + REG_WR(sc, PXP2_REG_PSWRQ_BW_WR, val); + + REG_WR(sc, PXP2_REG_RQ_WR_MBS0, w_order); + REG_WR(sc, PXP2_REG_RQ_WR_MBS1, w_order); + REG_WR(sc, PXP2_REG_RQ_RD_MBS0, r_order); + REG_WR(sc, PXP2_REG_RQ_RD_MBS1, r_order); + + if (CHIP_IS_E1H(sc) && (r_order == MAX_RD_ORD)) + REG_WR(sc, PXP2_REG_RQ_PDR_LIMIT, 0xe00); + + if (CHIP_IS_E3(sc)) + REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order)); + else if (CHIP_IS_E2(sc)) + REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); + else + REG_WR(sc, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); + + /* MPS w_order optimal TH presently TH + * 128 0 0 2 + * 256 1 1 3 + * >=512 2 2 3 + */ + /* DMAE is special */ + if (!CHIP_IS_E1H(sc)) { + /* E2 can use optimal TH */ + val = w_order; + REG_WR(sc, PXP2_REG_WR_DMAE_MPS, val); + } else { + val = ((w_order == 0) ? 2 : 3); + REG_WR(sc, PXP2_REG_WR_DMAE_MPS, 2); + } + + REG_WR(sc, PXP2_REG_WR_HC_MPS, val); + REG_WR(sc, PXP2_REG_WR_USDM_MPS, val); + REG_WR(sc, PXP2_REG_WR_CSDM_MPS, val); + REG_WR(sc, PXP2_REG_WR_TSDM_MPS, val); + REG_WR(sc, PXP2_REG_WR_XSDM_MPS, val); + REG_WR(sc, PXP2_REG_WR_QM_MPS, val); + REG_WR(sc, PXP2_REG_WR_TM_MPS, val); + REG_WR(sc, PXP2_REG_WR_SRC_MPS, val); + REG_WR(sc, PXP2_REG_WR_DBG_MPS, val); + REG_WR(sc, PXP2_REG_WR_CDU_MPS, val); + + /* Validate number of tags suppoted by device */ +#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980 + val = REG_RD(sc, PCIE_REG_PCIER_TL_HDR_FC_ST); + val &= 0xFF; + if (val <= 0x20) + REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x20); +} + +/**************************************************************************** +* ILT management +****************************************************************************/ +/* + * This codes hides the low level HW interaction for ILT management and + * configuration. The API consists of a shadow ILT table which is set by the + * driver and a set of routines to use it to configure the HW. + * + */ + +/* ILT HW init operations */ + +/* ILT memory management operations */ +#define ILT_MEMOP_ALLOC 0 +#define ILT_MEMOP_FREE 1 + +/* the phys address is shifted right 12 bits and has an added + * 1=valid bit added to the 53rd bit + * then since this is a wide register(TM) + * we split it into two 32 bit writes + */ +#define ILT_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF)) +#define ILT_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44))) +#define ILT_RANGE(f, l) (((l) << 10) | f) + +static int ecore_ilt_line_mem_op(struct bnx2x_softc *sc, + struct ilt_line *line, uint32_t size, uint8_t memop, int cli_num, int i) +{ +#define ECORE_ILT_NAMESIZE 10 + char str[ECORE_ILT_NAMESIZE]; + + if (memop == ILT_MEMOP_FREE) { + ECORE_ILT_FREE(line->page, line->page_mapping, line->size); + return 0; + } + snprintf(str, ECORE_ILT_NAMESIZE, "ILT_%d_%d", cli_num, i); + ECORE_ILT_ZALLOC(line->page, &line->page_mapping, size, str); + if (!line->page) + return -1; + line->size = size; + return 0; +} + + +static int ecore_ilt_client_mem_op(struct bnx2x_softc *sc, int cli_num, + uint8_t memop) +{ + int i, rc = 0; + struct ecore_ilt *ilt = SC_ILT(sc); + struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + + if (!ilt || !ilt->lines) + return -1; + + if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM)) + return 0; + + for (i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) { + rc = ecore_ilt_line_mem_op(sc, &ilt->lines[i], + ilt_cli->page_size, memop, cli_num, i); + } + return rc; +} + +static inline int ecore_ilt_mem_op_cnic(struct bnx2x_softc *sc, uint8_t memop) +{ + int rc = 0; + + if (CONFIGURE_NIC_MODE(sc)) + rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop); + if (!rc) + rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_TM, memop); + + return rc; +} + +static int ecore_ilt_mem_op(struct bnx2x_softc *sc, uint8_t memop) +{ + int rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_CDU, memop); + if (!rc) + rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_QM, memop); + if (!rc && CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc)) + rc = ecore_ilt_client_mem_op(sc, ILT_CLIENT_SRC, memop); + + return rc; +} + +static void ecore_ilt_line_wr(struct bnx2x_softc *sc, int abs_idx, + ecore_dma_addr_t page_mapping) +{ + uint32_t reg; + + reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8; + + ecore_wr_64(sc, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); +} + +static void ecore_ilt_line_init_op(struct bnx2x_softc *sc, + struct ecore_ilt *ilt, int idx, uint8_t initop) +{ + ecore_dma_addr_t null_mapping; + int abs_idx = ilt->start_line + idx; + + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + ecore_ilt_line_wr(sc, abs_idx, ilt->lines[idx].page_mapping); + break; + case INITOP_CLEAR: + null_mapping = 0; + ecore_ilt_line_wr(sc, abs_idx, null_mapping); + break; + } +} + +static void ecore_ilt_boundry_init_op(struct bnx2x_softc *sc, + struct ilt_client_info *ilt_cli, + uint32_t ilt_start) +{ + uint32_t start_reg = 0; + uint32_t end_reg = 0; + + /* The boundary is either SET or INIT, + CLEAR => SET and for now SET ~~ INIT */ + + /* find the appropriate regs */ + switch (ilt_cli->client_num) { + case ILT_CLIENT_CDU: + start_reg = PXP2_REG_RQ_CDU_FIRST_ILT; + end_reg = PXP2_REG_RQ_CDU_LAST_ILT; + break; + case ILT_CLIENT_QM: + start_reg = PXP2_REG_RQ_QM_FIRST_ILT; + end_reg = PXP2_REG_RQ_QM_LAST_ILT; + break; + case ILT_CLIENT_SRC: + start_reg = PXP2_REG_RQ_SRC_FIRST_ILT; + end_reg = PXP2_REG_RQ_SRC_LAST_ILT; + break; + case ILT_CLIENT_TM: + start_reg = PXP2_REG_RQ_TM_FIRST_ILT; + end_reg = PXP2_REG_RQ_TM_LAST_ILT; + break; + } + REG_WR(sc, start_reg, (ilt_start + ilt_cli->start)); + REG_WR(sc, end_reg, (ilt_start + ilt_cli->end)); +} + +static void ecore_ilt_client_init_op_ilt(struct bnx2x_softc *sc, + struct ecore_ilt *ilt, + struct ilt_client_info *ilt_cli, + uint8_t initop) +{ + int i; + + if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) + return; + + for (i = ilt_cli->start; i <= ilt_cli->end; i++) + ecore_ilt_line_init_op(sc, ilt, i, initop); + + /* init/clear the ILT boundries */ + ecore_ilt_boundry_init_op(sc, ilt_cli, ilt->start_line); +} + +static void ecore_ilt_client_init_op(struct bnx2x_softc *sc, + struct ilt_client_info *ilt_cli, uint8_t initop) +{ + struct ecore_ilt *ilt = SC_ILT(sc); + + ecore_ilt_client_init_op_ilt(sc, ilt, ilt_cli, initop); +} + +static void ecore_ilt_client_id_init_op(struct bnx2x_softc *sc, + int cli_num, uint8_t initop) +{ + struct ecore_ilt *ilt = SC_ILT(sc); + struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + + ecore_ilt_client_init_op(sc, ilt_cli, initop); +} + +static inline void ecore_ilt_init_op_cnic(struct bnx2x_softc *sc, uint8_t initop) +{ + if (CONFIGURE_NIC_MODE(sc)) + ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop); + ecore_ilt_client_id_init_op(sc, ILT_CLIENT_TM, initop); +} + +static void ecore_ilt_init_op(struct bnx2x_softc *sc, uint8_t initop) +{ + ecore_ilt_client_id_init_op(sc, ILT_CLIENT_CDU, initop); + ecore_ilt_client_id_init_op(sc, ILT_CLIENT_QM, initop); + if (CNIC_SUPPORT(sc) && !CONFIGURE_NIC_MODE(sc)) + ecore_ilt_client_id_init_op(sc, ILT_CLIENT_SRC, initop); +} + +static void ecore_ilt_init_client_psz(struct bnx2x_softc *sc, int cli_num, + uint32_t psz_reg, uint8_t initop) +{ + struct ecore_ilt *ilt = SC_ILT(sc); + struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + + if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) + return; + + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + REG_WR(sc, psz_reg, ILOG2(ilt_cli->page_size >> 12)); + break; + case INITOP_CLEAR: + break; + } +} + +/* + * called during init common stage, ilt clients should be initialized + * prioir to calling this function + */ +static void ecore_ilt_init_page_size(struct bnx2x_softc *sc, uint8_t initop) +{ + ecore_ilt_init_client_psz(sc, ILT_CLIENT_CDU, + PXP2_REG_RQ_CDU_P_SIZE, initop); + ecore_ilt_init_client_psz(sc, ILT_CLIENT_QM, + PXP2_REG_RQ_QM_P_SIZE, initop); + ecore_ilt_init_client_psz(sc, ILT_CLIENT_SRC, + PXP2_REG_RQ_SRC_P_SIZE, initop); + ecore_ilt_init_client_psz(sc, ILT_CLIENT_TM, + PXP2_REG_RQ_TM_P_SIZE, initop); +} + +/**************************************************************************** +* QM initializations +****************************************************************************/ +#define QM_QUEUES_PER_FUNC 16 +#define QM_INIT_MIN_CID_COUNT 31 +#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT) + +/* called during init port stage */ +static void ecore_qm_init_cid_count(struct bnx2x_softc *sc, int qm_cid_count, + uint8_t initop) +{ + int port = SC_PORT(sc); + + if (QM_INIT(qm_cid_count)) { + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + REG_WR(sc, QM_REG_CONNNUM_0 + port*4, + qm_cid_count/16 - 1); + break; + case INITOP_CLEAR: + break; + } + } +} + +static void ecore_qm_set_ptr_table(struct bnx2x_softc *sc, int qm_cid_count, + uint32_t base_reg, uint32_t reg) +{ + int i; + uint32_t wb_data[2] = {0, 0}; + for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) { + REG_WR(sc, base_reg + i*4, + qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC)); + ecore_init_wr_wb(sc, reg + i*8, + wb_data, 2); + } +} + +/* called during init common stage */ +static void ecore_qm_init_ptr_table(struct bnx2x_softc *sc, int qm_cid_count, + uint8_t initop) +{ + if (!QM_INIT(qm_cid_count)) + return; + + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + ecore_qm_set_ptr_table(sc, qm_cid_count, + QM_REG_BASEADDR, QM_REG_PTRTBL); + if (CHIP_IS_E1H(sc)) + ecore_qm_set_ptr_table(sc, qm_cid_count, + QM_REG_BASEADDR_EXT_A, + QM_REG_PTRTBL_EXT_A); + break; + case INITOP_CLEAR: + break; + } +} + +/**************************************************************************** +* SRC initializations +****************************************************************************/ +#ifdef ECORE_L5 +/* called during init func stage */ +static void ecore_src_init_t2(struct bnx2x_softc *sc, struct src_ent *t2, + ecore_dma_addr_t t2_mapping, int src_cid_count) +{ + int i; + int port = SC_PORT(sc); + + /* Initialize T2 */ + for (i = 0; i < src_cid_count-1; i++) + t2[i].next = (uint64_t)(t2_mapping + + (i+1)*sizeof(struct src_ent)); + + /* tell the searcher where the T2 table is */ + REG_WR(sc, SRC_REG_COUNTFREE0 + port*4, src_cid_count); + + ecore_wr_64(sc, SRC_REG_FIRSTFREE0 + port*16, + U64_LO(t2_mapping), U64_HI(t2_mapping)); + + ecore_wr_64(sc, SRC_REG_LASTFREE0 + port*16, + U64_LO((uint64_t)t2_mapping + + (src_cid_count-1) * sizeof(struct src_ent)), + U64_HI((uint64_t)t2_mapping + + (src_cid_count-1) * sizeof(struct src_ent))); +} +#endif +#endif /* ECORE_INIT_OPS_H */ diff --git a/drivers/net/bnx2x/ecore_mfw_req.h b/drivers/net/bnx2x/ecore_mfw_req.h new file mode 100644 index 00000000..57529097 --- /dev/null +++ b/drivers/net/bnx2x/ecore_mfw_req.h @@ -0,0 +1,187 @@ +/*- + * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2014-2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#ifndef ECORE_MFW_REQ_H +#define ECORE_MFW_REQ_H + + + +#define PORT_0 0 +#define PORT_1 1 +#define PORT_MAX 2 +#define NVM_PATH_MAX 2 + +/* FCoE capabilities required from the driver */ +struct fcoe_capabilities { + uint32_t capability1; + /* Maximum number of I/Os per connection */ + #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff + #define FCOE_IOS_PER_CONNECTION_SHIFT 0 + /* Maximum number of Logins per port */ + #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000 + #define FCOE_LOGINS_PER_PORT_SHIFT 16 + + uint32_t capability2; + /* Maximum number of exchanges */ + #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff + #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0 + /* Maximum NPIV WWN per port */ + #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000 + #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16 + + uint32_t capability3; + /* Maximum number of targets supported */ + #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff + #define FCOE_TARGETS_SUPPORTED_SHIFT 0 + /* Maximum number of outstanding commands across all connections */ + #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000 + #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16 + + uint32_t capability4; + #define FCOE_CAPABILITY4_STATEFUL 0x00000001 + #define FCOE_CAPABILITY4_STATELESS 0x00000002 + #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004 +}; + +struct glob_ncsi_oem_data +{ + uint32_t driver_version; + uint32_t unused[3]; + struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX]; +}; + +/* current drv_info version */ +#define DRV_INFO_CUR_VER 2 + +/* drv_info op codes supported */ +enum drv_info_opcode { + ETH_STATS_OPCODE, + FCOE_STATS_OPCODE, + ISCSI_STATS_OPCODE +}; + +#define ETH_STAT_INFO_VERSION_LEN 12 +/* Per PCI Function Ethernet Statistics required from the driver */ +struct eth_stats_info { + /* Function's Driver Version. padded to 12 */ + char version[ETH_STAT_INFO_VERSION_LEN]; + /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */ + uint8_t mac_local[8]; + uint8_t mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + uint8_t mac_add2[8]; /* Additional Programmed MAC Addr 2. */ + uint32_t mtu_size; /* MTU Size. Note : Negotiated MTU */ + uint32_t feature_flags; /* Feature_Flags. */ +#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01 +#define FEATURE_ETH_LSO_MASK 0x02 +#define FEATURE_ETH_BOOTMODE_MASK 0x1C +#define FEATURE_ETH_BOOTMODE_SHIFT 2 +#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2) +#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2) +#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2) +#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2) +#define FEATURE_ETH_TOE_MASK 0x20 + uint32_t lso_max_size; /* LSO MaxOffloadSize. */ + uint32_t lso_min_seg_cnt; /* LSO MinSegmentCount. */ + /* Num Offloaded Connections TCP_IPv4. */ + uint32_t ipv4_ofld_cnt; + /* Num Offloaded Connections TCP_IPv6. */ + uint32_t ipv6_ofld_cnt; + uint32_t promiscuous_mode; /* Promiscuous Mode. non-zero true */ + uint32_t txq_size; /* TX Descriptors Queue Size */ + uint32_t rxq_size; /* RX Descriptors Queue Size */ + /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */ + uint32_t txq_avg_depth; + /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */ + uint32_t rxq_avg_depth; + /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/ + uint32_t iov_offload; + /* Number of NetQueue/VMQ Config'd. */ + uint32_t netq_cnt; + uint32_t vf_cnt; /* Num VF assigned to this PF. */ +}; + +/* Per PCI Function FCOE Statistics required from the driver */ +struct fcoe_stats_info { + uint8_t version[12]; /* Function's Driver Version. */ + uint8_t mac_local[8]; /* Locally Admin Addr. */ + uint8_t mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + uint8_t mac_add2[8]; /* Additional Programmed MAC Addr 2. */ + /* QoS Priority (per 802.1p). 0-7255 */ + uint32_t qos_priority; + uint32_t txq_size; /* FCoE TX Descriptors Queue Size. */ + uint32_t rxq_size; /* FCoE RX Descriptors Queue Size. */ + /* FCoE TX Descriptor Queue Avg Depth. */ + uint32_t txq_avg_depth; + /* FCoE RX Descriptors Queue Avg Depth. */ + uint32_t rxq_avg_depth; + uint32_t rx_frames_lo; /* FCoE RX Frames received. */ + uint32_t rx_frames_hi; /* FCoE RX Frames received. */ + uint32_t rx_bytes_lo; /* FCoE RX Bytes received. */ + uint32_t rx_bytes_hi; /* FCoE RX Bytes received. */ + uint32_t tx_frames_lo; /* FCoE TX Frames sent. */ + uint32_t tx_frames_hi; /* FCoE TX Frames sent. */ + uint32_t tx_bytes_lo; /* FCoE TX Bytes sent. */ + uint32_t tx_bytes_hi; /* FCoE TX Bytes sent. */ + uint32_t rx_fcs_errors; /* number of receive packets with FCS errors */ + uint32_t rx_fc_crc_errors; /* number of FC frames with CRC errors*/ + uint32_t fip_login_failures; /* number of FCoE/FIP Login failures */ +}; + +/* Per PCI Function iSCSI Statistics required from the driver*/ +struct iscsi_stats_info { + uint8_t version[12]; /* Function's Driver Version. */ + uint8_t mac_local[8]; /* Locally Admin iSCSI MAC Addr. */ + uint8_t mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + /* QoS Priority (per 802.1p). 0-7255 */ + uint32_t qos_priority; + + uint8_t initiator_name[64]; /* iSCSI Boot Initiator Node name. */ + + uint8_t ww_port_name[64]; /* iSCSI World wide port name */ + + uint8_t boot_target_name[64];/* iSCSI Boot Target Name. */ + + uint8_t boot_target_ip[16]; /* iSCSI Boot Target IP. */ + uint32_t boot_target_portal; /* iSCSI Boot Target Portal. */ + uint8_t boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */ + uint32_t max_frame_size; /* Max Frame Size. bytes */ + uint32_t txq_size; /* PDU TX Descriptors Queue Size. */ + uint32_t rxq_size; /* PDU RX Descriptors Queue Size. */ + + uint32_t txq_avg_depth; /*PDU TX Descriptor Queue Avg Depth. */ + uint32_t rxq_avg_depth; /*PDU RX Descriptors Queue Avg Depth. */ + uint32_t rx_pdus_lo; /* iSCSI PDUs received. */ + uint32_t rx_pdus_hi; /* iSCSI PDUs received. */ + + uint32_t rx_bytes_lo; /* iSCSI RX Bytes received. */ + uint32_t rx_bytes_hi; /* iSCSI RX Bytes received. */ + uint32_t tx_pdus_lo; /* iSCSI PDUs sent. */ + uint32_t tx_pdus_hi; /* iSCSI PDUs sent. */ + + uint32_t tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */ + uint32_t tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */ + uint32_t pcp_prior_map_tbl; /*C-PCP to S-PCP Priority MapTable. + 9 nibbles, the position of each nibble + represents the C-PCP value, the value + of the nibble = S-PCP value.*/ +}; + +union drv_info_to_mcp { + struct eth_stats_info ether_stat; + struct fcoe_stats_info fcoe_stat; + struct iscsi_stats_info iscsi_stat; +}; + + +#endif /* ECORE_MFW_REQ_H */ diff --git a/drivers/net/bnx2x/ecore_reg.h b/drivers/net/bnx2x/ecore_reg.h new file mode 100644 index 00000000..d8203b45 --- /dev/null +++ b/drivers/net/bnx2x/ecore_reg.h @@ -0,0 +1,3644 @@ +/*- + * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2014-2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#ifndef ECORE_REG_H +#define ECORE_REG_H + + +#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR \ + (0x1<<0) +#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS \ + (0x1<<2) +#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU \ + (0x1<<5) +#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT \ + (0x1<<3) +#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR \ + (0x1<<4) +#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND \ + (0x1<<1) +#define ATC_REG_ATC_INIT_DONE \ + 0x1100bcUL +#define ATC_REG_ATC_INT_STS_CLR \ + 0x1101c0UL +#define ATC_REG_ATC_PRTY_MASK \ + 0x1101d8UL +#define ATC_REG_ATC_PRTY_STS_CLR \ + 0x1101d0UL +#define BRB1_REG_BRB1_INT_MASK \ + 0x60128UL +#define BRB1_REG_BRB1_PRTY_MASK \ + 0x60138UL +#define BRB1_REG_BRB1_PRTY_STS_CLR \ + 0x60130UL +#define BRB1_REG_MAC_GUARANTIED_0 \ + 0x601e8UL +#define BRB1_REG_MAC_GUARANTIED_1 \ + 0x60240UL +#define BRB1_REG_NUM_OF_FULL_BLOCKS \ + 0x60090UL +#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 \ + 0x60078UL +#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 \ + 0x60068UL +#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 \ + 0x60094UL +#define CCM_REG_CCM_INT_MASK \ + 0xd01e4UL +#define CCM_REG_CCM_PRTY_MASK \ + 0xd01f4UL +#define CCM_REG_CCM_PRTY_STS_CLR \ + 0xd01ecUL +#define CDU_REG_CDU_GLOBAL_PARAMS \ + 0x101020UL +#define CDU_REG_CDU_INT_MASK \ + 0x10103cUL +#define CDU_REG_CDU_PRTY_MASK \ + 0x10104cUL +#define CDU_REG_CDU_PRTY_STS_CLR \ + 0x101044UL +#define CFC_REG_AC_INIT_DONE \ + 0x104078UL +#define CFC_REG_CAM_INIT_DONE \ + 0x10407cUL +#define CFC_REG_CFC_INT_MASK \ + 0x104108UL +#define CFC_REG_CFC_INT_STS_CLR \ + 0x104100UL +#define CFC_REG_CFC_PRTY_MASK \ + 0x104118UL +#define CFC_REG_CFC_PRTY_STS_CLR \ + 0x104110UL +#define CFC_REG_DEBUG0 \ + 0x104050UL +#define CFC_REG_INIT_REG \ + 0x10404cUL +#define CFC_REG_LL_INIT_DONE \ + 0x104074UL +#define CFC_REG_NUM_LCIDS_INSIDE_PF \ + 0x104120UL +#define CFC_REG_STRONG_ENABLE_PF \ + 0x104128UL +#define CFC_REG_WEAK_ENABLE_PF \ + 0x104124UL +#define CSDM_REG_CSDM_INT_MASK_0 \ + 0xc229cUL +#define CSDM_REG_CSDM_INT_MASK_1 \ + 0xc22acUL +#define CSDM_REG_CSDM_PRTY_MASK \ + 0xc22bcUL +#define CSDM_REG_CSDM_PRTY_STS_CLR \ + 0xc22b4UL +#define CSEM_REG_CSEM_INT_MASK_0 \ + 0x200110UL +#define CSEM_REG_CSEM_INT_MASK_1 \ + 0x200120UL +#define CSEM_REG_CSEM_PRTY_MASK_0 \ + 0x200130UL +#define CSEM_REG_CSEM_PRTY_MASK_1 \ + 0x200140UL +#define CSEM_REG_CSEM_PRTY_STS_CLR_0 \ + 0x200128UL +#define CSEM_REG_CSEM_PRTY_STS_CLR_1 \ + 0x200138UL +#define CSEM_REG_FAST_MEMORY \ + 0x220000UL +#define CSEM_REG_INT_TABLE \ + 0x200400UL +#define CSEM_REG_PASSIVE_BUFFER \ + 0x202000UL +#define CSEM_REG_PRAM \ + 0x240000UL +#define CSEM_REG_VFPF_ERR_NUM \ + 0x200380UL +#define DBG_REG_DBG_PRTY_MASK \ + 0xc0a8UL +#define DBG_REG_DBG_PRTY_STS_CLR \ + 0xc0a0UL +#define DMAE_REG_BACKWARD_COMP_EN \ + 0x10207cUL +#define DMAE_REG_CMD_MEM \ + 0x102400UL +#define DMAE_REG_DMAE_INT_MASK \ + 0x102054UL +#define DMAE_REG_DMAE_PRTY_MASK \ + 0x102064UL +#define DMAE_REG_DMAE_PRTY_STS_CLR \ + 0x10205cUL +#define DMAE_REG_GO_C0 \ + 0x102080UL +#define DMAE_REG_GO_C1 \ + 0x102084UL +#define DMAE_REG_GO_C10 \ + 0x102088UL +#define DMAE_REG_GO_C11 \ + 0x10208cUL +#define DMAE_REG_GO_C12 \ + 0x102090UL +#define DMAE_REG_GO_C13 \ + 0x102094UL +#define DMAE_REG_GO_C14 \ + 0x102098UL +#define DMAE_REG_GO_C15 \ + 0x10209cUL +#define DMAE_REG_GO_C2 \ + 0x1020a0UL +#define DMAE_REG_GO_C3 \ + 0x1020a4UL +#define DMAE_REG_GO_C4 \ + 0x1020a8UL +#define DMAE_REG_GO_C5 \ + 0x1020acUL +#define DMAE_REG_GO_C6 \ + 0x1020b0UL +#define DMAE_REG_GO_C7 \ + 0x1020b4UL +#define DMAE_REG_GO_C8 \ + 0x1020b8UL +#define DMAE_REG_GO_C9 \ + 0x1020bcUL +#define DORQ_REG_DORQ_INT_MASK \ + 0x170180UL +#define DORQ_REG_DORQ_INT_STS_CLR \ + 0x170178UL +#define DORQ_REG_DORQ_PRTY_MASK \ + 0x170190UL +#define DORQ_REG_DORQ_PRTY_STS_CLR \ + 0x170188UL +#define DORQ_REG_DPM_CID_OFST \ + 0x170030UL +#define DORQ_REG_MAX_RVFID_SIZE \ + 0x1701ecUL +#define DORQ_REG_NORM_CID_OFST \ + 0x17002cUL +#define DORQ_REG_PF_USAGE_CNT \ + 0x1701d0UL +#define DORQ_REG_VF_NORM_CID_BASE \ + 0x1701a0UL +#define DORQ_REG_VF_NORM_CID_OFST \ + 0x1701f4UL +#define DORQ_REG_VF_NORM_CID_WND_SIZE \ + 0x1701a4UL +#define DORQ_REG_VF_NORM_MAX_CID_COUNT \ + 0x1701e4UL +#define DORQ_REG_VF_NORM_VF_BASE \ + 0x1701a8UL +#define DORQ_REG_VF_TYPE_MASK_0 \ + 0x170218UL +#define DORQ_REG_VF_TYPE_MAX_MCID_0 \ + 0x1702d8UL +#define DORQ_REG_VF_TYPE_MIN_MCID_0 \ + 0x170298UL +#define DORQ_REG_VF_TYPE_VALUE_0 \ + 0x170258UL +#define DORQ_REG_VF_USAGE_CNT \ + 0x170320UL +#define DORQ_REG_VF_USAGE_CT_LIMIT \ + 0x170340UL +#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 \ + (0x1<<4) +#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 \ + (0x1<<0) +#define HC_CONFIG_0_REG_INT_LINE_EN_0 \ + (0x1<<3) +#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 \ + (0x1<<7) +#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 \ + (0x1<<2) +#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 \ + (0x1<<1) +#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 \ + (0x1<<0) +#define HC_REG_ATTN_MSG0_ADDR_L \ + 0x108018UL +#define HC_REG_ATTN_MSG1_ADDR_L \ + 0x108020UL +#define HC_REG_COMMAND_REG \ + 0x108180UL +#define HC_REG_CONFIG_0 \ + 0x108000UL +#define HC_REG_CONFIG_1 \ + 0x108004UL +#define HC_REG_HC_PRTY_MASK \ + 0x1080a0UL +#define HC_REG_HC_PRTY_STS_CLR \ + 0x108098UL +#define HC_REG_INT_MASK \ + 0x108108UL +#define HC_REG_LEADING_EDGE_0 \ + 0x108040UL +#define HC_REG_MAIN_MEMORY \ + 0x108800UL +#define HC_REG_MAIN_MEMORY_SIZE \ + 152 +#define HC_REG_TRAILING_EDGE_0 \ + 0x108044UL +#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN \ + (0x1<<1) +#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE \ + (0x1<<0) +#define IGU_REG_ATTENTION_ACK_BITS \ + 0x130108UL +#define IGU_REG_ATTN_MSG_ADDR_H \ + 0x13011cUL +#define IGU_REG_ATTN_MSG_ADDR_L \ + 0x130120UL +#define IGU_REG_BLOCK_CONFIGURATION \ + 0x130000UL +#define IGU_REG_COMMAND_REG_32LSB_DATA \ + 0x130124UL +#define IGU_REG_COMMAND_REG_CTRL \ + 0x13012cUL +#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP \ + 0x130200UL +#define IGU_REG_IGU_PRTY_MASK \ + 0x1300a8UL +#define IGU_REG_IGU_PRTY_STS_CLR \ + 0x1300a0UL +#define IGU_REG_LEADING_EDGE_LATCH \ + 0x130134UL +#define IGU_REG_MAPPING_MEMORY \ + 0x131000UL +#define IGU_REG_MAPPING_MEMORY_SIZE \ + 136 +#define IGU_REG_PBA_STATUS_LSB \ + 0x130138UL +#define IGU_REG_PBA_STATUS_MSB \ + 0x13013cUL +#define IGU_REG_PCI_PF_MSIX_EN \ + 0x130144UL +#define IGU_REG_PCI_PF_MSIX_FUNC_MASK \ + 0x130148UL +#define IGU_REG_PCI_PF_MSI_EN \ + 0x130140UL +#define IGU_REG_PENDING_BITS_STATUS \ + 0x130300UL +#define IGU_REG_PF_CONFIGURATION \ + 0x130154UL +#define IGU_REG_PROD_CONS_MEMORY \ + 0x132000UL +#define IGU_REG_RESET_MEMORIES \ + 0x130158UL +#define IGU_REG_SB_INT_BEFORE_MASK_LSB \ + 0x13015cUL +#define IGU_REG_SB_INT_BEFORE_MASK_MSB \ + 0x130160UL +#define IGU_REG_SB_MASK_LSB \ + 0x130164UL +#define IGU_REG_SB_MASK_MSB \ + 0x130168UL +#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT \ + 0x130800UL +#define IGU_REG_TRAILING_EDGE_LATCH \ + 0x130104UL +#define IGU_REG_VF_CONFIGURATION \ + 0x130170UL +#define MCP_REG_MCPR_ACCESS_LOCK \ + 0x8009c +#define MCP_REG_MCPR_GP_INPUTS \ + 0x800c0 +#define MCP_REG_MCPR_GP_OENABLE \ + 0x800c8 +#define MCP_REG_MCPR_GP_OUTPUTS \ + 0x800c4 +#define MCP_REG_MCPR_IMC_COMMAND \ + 0x85900 +#define MCP_REG_MCPR_IMC_DATAREG0 \ + 0x85920 +#define MCP_REG_MCPR_IMC_SLAVE_CONTROL \ + 0x85904 +#define MCP_REG_MCPR_NVM_ACCESS_ENABLE \ + 0x86424 +#define MCP_REG_MCPR_NVM_ADDR \ + 0x8640c +#define MCP_REG_MCPR_NVM_CFG4 \ + 0x8642c +#define MCP_REG_MCPR_NVM_COMMAND \ + 0x86400 +#define MCP_REG_MCPR_NVM_READ \ + 0x86410 +#define MCP_REG_MCPR_NVM_SW_ARB \ + 0x86420 +#define MCP_REG_MCPR_NVM_WRITE \ + 0x86408 +#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK \ + (0x1<<1) +#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK \ + (0x1<<0) +#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 \ + 0xa42cUL +#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 \ + 0xa438UL +#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 \ + 0xa444UL +#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 \ + 0xa450UL +#define MISC_REG_AEU_AFTER_INVERT_4_MCP \ + 0xa458UL +#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 \ + 0xa700UL +#define MISC_REG_AEU_CLR_LATCH_SIGNAL \ + 0xa45cUL +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 \ + 0xa06cUL +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 \ + 0xa07cUL +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 \ + 0xa08cUL +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 \ + 0xa10cUL +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 \ + 0xa11cUL +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 \ + 0xa12cUL +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 \ + 0xa078UL +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 \ + 0xa118UL +#define MISC_REG_AEU_ENABLE4_NIG_0 \ + 0xa0f8UL +#define MISC_REG_AEU_ENABLE4_NIG_1 \ + 0xa198UL +#define MISC_REG_AEU_ENABLE4_PXP_0 \ + 0xa108UL +#define MISC_REG_AEU_ENABLE4_PXP_1 \ + 0xa1a8UL +#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 \ + 0xa688UL +#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 \ + 0xa6b0UL +#define MISC_REG_AEU_GENERAL_ATTN_0 \ + 0xa000UL +#define MISC_REG_AEU_GENERAL_ATTN_1 \ + 0xa004UL +#define MISC_REG_AEU_GENERAL_ATTN_10 \ + 0xa028UL +#define MISC_REG_AEU_GENERAL_ATTN_11 \ + 0xa02cUL +#define MISC_REG_AEU_GENERAL_ATTN_12 \ + 0xa030UL +#define MISC_REG_AEU_GENERAL_ATTN_2 \ + 0xa008UL +#define MISC_REG_AEU_GENERAL_ATTN_3 \ + 0xa00cUL +#define MISC_REG_AEU_GENERAL_ATTN_4 \ + 0xa010UL +#define MISC_REG_AEU_GENERAL_ATTN_5 \ + 0xa014UL +#define MISC_REG_AEU_GENERAL_ATTN_6 \ + 0xa018UL +#define MISC_REG_AEU_GENERAL_ATTN_7 \ + 0xa01cUL +#define MISC_REG_AEU_GENERAL_ATTN_8 \ + 0xa020UL +#define MISC_REG_AEU_GENERAL_ATTN_9 \ + 0xa024UL +#define MISC_REG_AEU_GENERAL_MASK \ + 0xa61cUL +#define MISC_REG_AEU_MASK_ATTN_FUNC_0 \ + 0xa060UL +#define MISC_REG_AEU_MASK_ATTN_FUNC_1 \ + 0xa064UL +#define MISC_REG_BOND_ID \ + 0xa400UL +#define MISC_REG_CHIP_NUM \ + 0xa408UL +#define MISC_REG_CHIP_REV \ + 0xa40cUL +#define MISC_REG_CHIP_TYPE \ + 0xac60UL +#define MISC_REG_CHIP_TYPE_57811_MASK \ + (1<<1) +#define MISC_REG_CPMU_LP_DR_ENABLE \ + 0xa858UL +#define MISC_REG_CPMU_LP_FW_ENABLE_P0 \ + 0xa84cUL +#define MISC_REG_CPMU_LP_IDLE_THR_P0 \ + 0xa8a0UL +#define MISC_REG_CPMU_LP_MASK_ENT_P0 \ + 0xa880UL +#define MISC_REG_CPMU_LP_MASK_EXT_P0 \ + 0xa888UL +#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 \ + 0xa8b8UL +#define MISC_REG_CPMU_LP_SM_ENT_CNT_P1 \ + 0xa8bcUL +#define MISC_REG_DRIVER_CONTROL_1 \ + 0xa510UL +#define MISC_REG_DRIVER_CONTROL_7 \ + 0xa3c8UL +#define MISC_REG_FOUR_PORT_PATH_SWAP \ + 0xa75cUL +#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR \ + 0xa738UL +#define MISC_REG_FOUR_PORT_PORT_SWAP \ + 0xa754UL +#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR \ + 0xa734UL +#define MISC_REG_GENERIC_CR_0 \ + 0xa460UL +#define MISC_REG_GENERIC_CR_1 \ + 0xa464UL +#define MISC_REG_GENERIC_POR_1 \ + 0xa474UL +#define MISC_REG_GEN_PURP_HWG \ + 0xa9a0UL +#define MISC_REG_GPIO \ + 0xa490UL +#define MISC_REG_GPIO_EVENT_EN \ + 0xa2bcUL +#define MISC_REG_GPIO_INT \ + 0xa494UL +#define MISC_REG_GRC_RSV_ATTN \ + 0xa3c0UL +#define MISC_REG_GRC_TIMEOUT_ATTN \ + 0xa3c4UL +#define MISC_REG_LCPLL_E40_PWRDWN \ + 0xaa74UL +#define MISC_REG_LCPLL_E40_RESETB_ANA \ + 0xaa78UL +#define MISC_REG_LCPLL_E40_RESETB_DIG \ + 0xaa7cUL +#define MISC_REG_MISC_INT_MASK \ + 0xa388UL +#define MISC_REG_MISC_PRTY_MASK \ + 0xa398UL +#define MISC_REG_MISC_PRTY_STS_CLR \ + 0xa390UL +#define MISC_REG_PORT4MODE_EN \ + 0xa750UL +#define MISC_REG_PORT4MODE_EN_OVWR \ + 0xa720UL +#define MISC_REG_RESET_REG_1 \ + 0xa580UL +#define MISC_REG_RESET_REG_2 \ + 0xa590UL +#define MISC_REG_SHARED_MEM_ADDR \ + 0xa2b4UL +#define MISC_REG_SPIO \ + 0xa4fcUL +#define MISC_REG_SPIO_EVENT_EN \ + 0xa2b8UL +#define MISC_REG_SPIO_INT \ + 0xa500UL +#define MISC_REG_TWO_PORT_PATH_SWAP \ + 0xa758UL +#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR \ + 0xa72cUL +#define MISC_REG_UNPREPARED \ + 0xa424UL +#define MISC_REG_WC0_CTRL_PHY_ADDR \ + 0xa9ccUL +#define MISC_REG_WC0_RESET \ + 0xac30UL +#define MISC_REG_XMAC_CORE_PORT_MODE \ + 0xa964UL +#define MISC_REG_XMAC_PHY_PORT_MODE \ + 0xa960UL +#define MSTAT_REG_RX_STAT_GR64_LO \ + 0x200UL +#define MSTAT_REG_TX_STAT_GTXPOK_LO \ + 0UL +#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN \ + (0x1<<0) +#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN \ + (0x1<<0) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT \ + (0x1<<0) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS \ + (0x1<<9) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G \ + (0x1<<15) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS \ + (0xf<<18) +#define NIG_REG_BMAC0_IN_EN \ + 0x100acUL +#define NIG_REG_BMAC0_OUT_EN \ + 0x100e0UL +#define NIG_REG_BMAC0_PAUSE_OUT_EN \ + 0x10110UL +#define NIG_REG_BMAC0_REGS_OUT_EN \ + 0x100e8UL +#define NIG_REG_BRB0_PAUSE_IN_EN \ + 0x100c4UL +#define NIG_REG_BRB1_PAUSE_IN_EN \ + 0x100c8UL +#define NIG_REG_DEBUG_PACKET_LB \ + 0x10800UL +#define NIG_REG_EGRESS_DRAIN0_MODE \ + 0x10060UL +#define NIG_REG_EGRESS_EMAC0_OUT_EN \ + 0x10120UL +#define NIG_REG_EGRESS_EMAC0_PORT \ + 0x10058UL +#define NIG_REG_EMAC0_IN_EN \ + 0x100a4UL +#define NIG_REG_EMAC0_PAUSE_OUT_EN \ + 0x10118UL +#define NIG_REG_EMAC0_STATUS_MISC_MI_INT \ + 0x10494UL +#define NIG_REG_INGRESS_BMAC0_MEM \ + 0x10c00UL +#define NIG_REG_INGRESS_BMAC1_MEM \ + 0x11000UL +#define NIG_REG_INGRESS_EOP_LB_EMPTY \ + 0x104e0UL +#define NIG_REG_INGRESS_EOP_LB_FIFO \ + 0x104e4UL +#define NIG_REG_LATCH_BC_0 \ + 0x16210UL +#define NIG_REG_LATCH_STATUS_0 \ + 0x18000UL +#define NIG_REG_LED_10G_P0 \ + 0x10320UL +#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 \ + 0x10318UL +#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 \ + 0x10310UL +#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 \ + 0x10308UL +#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 \ + 0x102f8UL +#define NIG_REG_LED_CONTROL_TRAFFIC_P0 \ + 0x10300UL +#define NIG_REG_LED_MODE_P0 \ + 0x102f0UL +#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 \ + 0x16070UL +#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 \ + 0x16074UL +#define NIG_REG_LLFC_ENABLE_0 \ + 0x16208UL +#define NIG_REG_LLFC_ENABLE_1 \ + 0x1620cUL +#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 \ + 0x16058UL +#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 \ + 0x1605cUL +#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 \ + 0x16060UL +#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 \ + 0x16064UL +#define NIG_REG_LLFC_OUT_EN_0 \ + 0x160c8UL +#define NIG_REG_LLFC_OUT_EN_1 \ + 0x160ccUL +#define NIG_REG_LLH0_BRB1_DRV_MASK \ + 0x10244UL +#define NIG_REG_LLH0_BRB1_DRV_MASK_MF \ + 0x16048UL +#define NIG_REG_LLH0_BRB1_NOT_MCP \ + 0x1025cUL +#define NIG_REG_LLH0_CLS_TYPE \ + 0x16080UL +#define NIG_REG_LLH0_FUNC_EN \ + 0x160fcUL +#define NIG_REG_LLH0_FUNC_MEM \ + 0x16180UL +#define NIG_REG_LLH0_FUNC_MEM_ENABLE \ + 0x16140UL +#define NIG_REG_LLH0_FUNC_VLAN_ID \ + 0x16100UL +#define NIG_REG_LLH0_XCM_MASK \ + 0x10130UL +#define NIG_REG_LLH1_BRB1_NOT_MCP \ + 0x102dcUL +#define NIG_REG_LLH1_CLS_TYPE \ + 0x16084UL +#define NIG_REG_LLH1_FUNC_MEM \ + 0x161c0UL +#define NIG_REG_LLH1_FUNC_MEM_ENABLE \ + 0x16160UL +#define NIG_REG_LLH1_FUNC_MEM_SIZE \ + 16 +#define NIG_REG_LLH1_MF_MODE \ + 0x18614UL +#define NIG_REG_LLH1_XCM_MASK \ + 0x10134UL +#define NIG_REG_LLH_E1HOV_MODE \ + 0x160d8UL +#define NIG_REG_LLH_MF_MODE \ + 0x16024UL +#define NIG_REG_MASK_INTERRUPT_PORT0 \ + 0x10330UL +#define NIG_REG_MASK_INTERRUPT_PORT1 \ + 0x10334UL +#define NIG_REG_NIG_EMAC0_EN \ + 0x1003cUL +#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC \ + 0x10044UL +#define NIG_REG_NIG_INT_STS_CLR_0 \ + 0x103b4UL +#define NIG_REG_NIG_PRTY_MASK \ + 0x103dcUL +#define NIG_REG_NIG_PRTY_MASK_0 \ + 0x183c8UL +#define NIG_REG_NIG_PRTY_MASK_1 \ + 0x183d8UL +#define NIG_REG_NIG_PRTY_STS_CLR \ + 0x103d4UL +#define NIG_REG_NIG_PRTY_STS_CLR_0 \ + 0x183c0UL +#define NIG_REG_NIG_PRTY_STS_CLR_1 \ + 0x183d0UL +#define NIG_REG_P0_HDRS_AFTER_BASIC \ + 0x18038UL +#define NIG_REG_P0_HWPFC_ENABLE \ + 0x18078UL +#define NIG_REG_P0_LLH_FUNC_MEM2 \ + 0x18480UL +#define NIG_REG_P0_MAC_IN_EN \ + 0x185acUL +#define NIG_REG_P0_MAC_OUT_EN \ + 0x185b0UL +#define NIG_REG_P0_MAC_PAUSE_OUT_EN \ + 0x185b4UL +#define NIG_REG_P0_PKT_PRIORITY_TO_COS \ + 0x18054UL +#define NIG_REG_P0_RX_COS0_PRIORITY_MASK \ + 0x18058UL +#define NIG_REG_P0_RX_COS1_PRIORITY_MASK \ + 0x1805cUL +#define NIG_REG_P0_RX_COS2_PRIORITY_MASK \ + 0x186b0UL +#define NIG_REG_P0_RX_COS3_PRIORITY_MASK \ + 0x186b4UL +#define NIG_REG_P0_RX_COS4_PRIORITY_MASK \ + 0x186b8UL +#define NIG_REG_P0_RX_COS5_PRIORITY_MASK \ + 0x186bcUL +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP \ + 0x180f0UL +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB \ + 0x18688UL +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB \ + 0x1868cUL +#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT \ + 0x180e8UL +#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ \ + 0x180ecUL +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 \ + 0x1810cUL +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 \ + 0x18110UL +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 \ + 0x18114UL +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 \ + 0x18118UL +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 \ + 0x1811cUL +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 \ + 0x186a0UL +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 \ + 0x186a4UL +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 \ + 0x186a8UL +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 \ + 0x186acUL +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 \ + 0x180f8UL +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 \ + 0x180fcUL +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 \ + 0x18100UL +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 \ + 0x18104UL +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 \ + 0x18108UL +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 \ + 0x18690UL +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 \ + 0x18694UL +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 \ + 0x18698UL +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 \ + 0x1869cUL +#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS \ + 0x180f4UL +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT \ + 0x180e4UL +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB \ + 0x18680UL +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB \ + 0x18684UL +#define NIG_REG_P1_HDRS_AFTER_BASIC \ + 0x1818cUL +#define NIG_REG_P1_HWPFC_ENABLE \ + 0x181d0UL +#define NIG_REG_P1_LLH_FUNC_MEM2 \ + 0x184c0UL +#define NIG_REG_P1_MAC_IN_EN \ + 0x185c0UL +#define NIG_REG_P1_MAC_OUT_EN \ + 0x185c4UL +#define NIG_REG_P1_MAC_PAUSE_OUT_EN \ + 0x185c8UL +#define NIG_REG_P1_PKT_PRIORITY_TO_COS \ + 0x181a8UL +#define NIG_REG_P1_RX_COS0_PRIORITY_MASK \ + 0x181acUL +#define NIG_REG_P1_RX_COS1_PRIORITY_MASK \ + 0x181b0UL +#define NIG_REG_P1_RX_COS2_PRIORITY_MASK \ + 0x186f8UL +#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB \ + 0x186e8UL +#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB \ + 0x186ecUL +#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT \ + 0x18234UL +#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ \ + 0x18238UL +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 \ + 0x18258UL +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 \ + 0x1825cUL +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 \ + 0x18260UL +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 \ + 0x18264UL +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 \ + 0x18268UL +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 \ + 0x186f4UL +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 \ + 0x18244UL +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 \ + 0x18248UL +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 \ + 0x1824cUL +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 \ + 0x18250UL +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 \ + 0x18254UL +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 \ + 0x186f0UL +#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS \ + 0x18240UL +#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB \ + 0x186e0UL +#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB \ + 0x186e4UL +#define NIG_REG_PAUSE_ENABLE_0 \ + 0x160c0UL +#define NIG_REG_PAUSE_ENABLE_1 \ + 0x160c4UL +#define NIG_REG_PORT_SWAP \ + 0x10394UL +#define NIG_REG_PPP_ENABLE_0 \ + 0x160b0UL +#define NIG_REG_PPP_ENABLE_1 \ + 0x160b4UL +#define NIG_REG_PRS_REQ_IN_EN \ + 0x100b8UL +#define NIG_REG_SERDES0_CTRL_MD_DEVAD \ + 0x10370UL +#define NIG_REG_SERDES0_CTRL_MD_ST \ + 0x1036cUL +#define NIG_REG_SERDES0_CTRL_PHY_ADDR \ + 0x10374UL +#define NIG_REG_SERDES0_STATUS_LINK_STATUS \ + 0x10578UL +#define NIG_REG_STAT0_BRB_DISCARD \ + 0x105f0UL +#define NIG_REG_STAT0_BRB_TRUNCATE \ + 0x105f8UL +#define NIG_REG_STAT0_EGRESS_MAC_PKT0 \ + 0x10750UL +#define NIG_REG_STAT0_EGRESS_MAC_PKT1 \ + 0x10760UL +#define NIG_REG_STAT1_BRB_DISCARD \ + 0x10628UL +#define NIG_REG_STAT1_EGRESS_MAC_PKT0 \ + 0x107a0UL +#define NIG_REG_STAT1_EGRESS_MAC_PKT1 \ + 0x107b0UL +#define NIG_REG_STAT2_BRB_OCTET \ + 0x107e0UL +#define NIG_REG_STATUS_INTERRUPT_PORT0 \ + 0x10328UL +#define NIG_REG_STRAP_OVERRIDE \ + 0x10398UL +#define NIG_REG_XCM0_OUT_EN \ + 0x100f0UL +#define NIG_REG_XCM1_OUT_EN \ + 0x100f4UL +#define NIG_REG_XGXS0_CTRL_MD_DEVAD \ + 0x1033cUL +#define NIG_REG_XGXS0_CTRL_MD_ST \ + 0x10338UL +#define NIG_REG_XGXS0_CTRL_PHY_ADDR \ + 0x10340UL +#define NIG_REG_XGXS0_STATUS_LINK10G \ + 0x10680UL +#define NIG_REG_XGXS0_STATUS_LINK_STATUS \ + 0x10684UL +#define NIG_REG_XGXS_LANE_SEL_P0 \ + 0x102e8UL +#define NIG_REG_XGXS_SERDES0_MODE_SEL \ + 0x102e0UL +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT \ + (0x1<<0) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS \ + (0x1<<9) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G \ + (0x1<<15) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS \ + (0xf<<18) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE \ + 18 +#define PBF_REG_COS0_UPPER_BOUND \ + 0x15c05cUL +#define PBF_REG_COS0_UPPER_BOUND_P0 \ + 0x15c2ccUL +#define PBF_REG_COS0_UPPER_BOUND_P1 \ + 0x15c2e4UL +#define PBF_REG_COS0_WEIGHT \ + 0x15c054UL +#define PBF_REG_COS0_WEIGHT_P0 \ + 0x15c2a8UL +#define PBF_REG_COS0_WEIGHT_P1 \ + 0x15c2c0UL +#define PBF_REG_COS1_UPPER_BOUND \ + 0x15c060UL +#define PBF_REG_COS1_WEIGHT \ + 0x15c058UL +#define PBF_REG_COS1_WEIGHT_P0 \ + 0x15c2acUL +#define PBF_REG_COS1_WEIGHT_P1 \ + 0x15c2c4UL +#define PBF_REG_COS2_WEIGHT_P0 \ + 0x15c2b0UL +#define PBF_REG_COS2_WEIGHT_P1 \ + 0x15c2c8UL +#define PBF_REG_COS3_WEIGHT_P0 \ + 0x15c2b4UL +#define PBF_REG_COS4_WEIGHT_P0 \ + 0x15c2b8UL +#define PBF_REG_COS5_WEIGHT_P0 \ + 0x15c2bcUL +#define PBF_REG_CREDIT_LB_Q \ + 0x140338UL +#define PBF_REG_CREDIT_Q0 \ + 0x14033cUL +#define PBF_REG_CREDIT_Q1 \ + 0x140340UL +#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 \ + 0x14005cUL +#define PBF_REG_DISABLE_PF \ + 0x1402e8UL +#define PBF_REG_DISABLE_VF \ + 0x1402ecUL +#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 \ + 0x15c288UL +#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 \ + 0x15c28cUL +#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 \ + 0x15c278UL +#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 \ + 0x15c27cUL +#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 \ + 0x15c280UL +#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 \ + 0x15c284UL +#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 \ + 0x15c2a0UL +#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 \ + 0x15c2a4UL +#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 \ + 0x15c270UL +#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 \ + 0x15c274UL +#define PBF_REG_ETS_ENABLED \ + 0x15c050UL +#define PBF_REG_HDRS_AFTER_BASIC \ + 0x15c0a8UL +#define PBF_REG_HDRS_AFTER_TAG_0 \ + 0x15c0b8UL +#define PBF_REG_HIGH_PRIORITY_COS_NUM \ + 0x15c04cUL +#define PBF_REG_INIT_CRD_LB_Q \ + 0x15c248UL +#define PBF_REG_INIT_CRD_Q0 \ + 0x15c230UL +#define PBF_REG_INIT_CRD_Q1 \ + 0x15c234UL +#define PBF_REG_INIT_P0 \ + 0x140004UL +#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q \ + 0x140354UL +#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 \ + 0x140358UL +#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 \ + 0x14035cUL +#define PBF_REG_MUST_HAVE_HDRS \ + 0x15c0c4UL +#define PBF_REG_NUM_STRICT_ARB_SLOTS \ + 0x15c064UL +#define PBF_REG_P0_ARB_THRSH \ + 0x1400e4UL +#define PBF_REG_P0_CREDIT \ + 0x140200UL +#define PBF_REG_P0_INIT_CRD \ + 0x1400d0UL +#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT \ + 0x140308UL +#define PBF_REG_P0_PAUSE_ENABLE \ + 0x140014UL +#define PBF_REG_P0_TQ_LINES_FREED_CNT \ + 0x1402f0UL +#define PBF_REG_P0_TQ_OCCUPANCY \ + 0x1402fcUL +#define PBF_REG_P1_CREDIT \ + 0x140208UL +#define PBF_REG_P1_INIT_CRD \ + 0x1400d4UL +#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT \ + 0x14030cUL +#define PBF_REG_P1_TQ_LINES_FREED_CNT \ + 0x1402f4UL +#define PBF_REG_P1_TQ_OCCUPANCY \ + 0x140300UL +#define PBF_REG_P4_CREDIT \ + 0x140210UL +#define PBF_REG_P4_INIT_CRD \ + 0x1400e0UL +#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT \ + 0x140310UL +#define PBF_REG_P4_TQ_LINES_FREED_CNT \ + 0x1402f8UL +#define PBF_REG_P4_TQ_OCCUPANCY \ + 0x140304UL +#define PBF_REG_PBF_INT_MASK \ + 0x1401d4UL +#define PBF_REG_PBF_PRTY_MASK \ + 0x1401e4UL +#define PBF_REG_PBF_PRTY_STS_CLR \ + 0x1401dcUL +#define PBF_REG_TAG_ETHERTYPE_0 \ + 0x15c090UL +#define PBF_REG_TAG_LEN_0 \ + 0x15c09cUL +#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q \ + 0x14038cUL +#define PBF_REG_TQ_LINES_FREED_CNT_Q0 \ + 0x140390UL +#define PBF_REG_TQ_LINES_FREED_CNT_Q1 \ + 0x140394UL +#define PBF_REG_TQ_OCCUPANCY_LB_Q \ + 0x1403a8UL +#define PBF_REG_TQ_OCCUPANCY_Q0 \ + 0x1403acUL +#define PBF_REG_TQ_OCCUPANCY_Q1 \ + 0x1403b0UL +#define PB_REG_PB_INT_MASK \ + 0x28UL +#define PB_REG_PB_PRTY_MASK \ + 0x38UL +#define PB_REG_PB_PRTY_STS_CLR \ + 0x30UL +#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR \ + (0x1<<0) +#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW \ + (0x1<<8) +#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR \ + (0x1<<1) +#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN \ + (0x1<<6) +#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN \ + (0x1<<7) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN \ + (0x1<<4) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN \ + (0x1<<3) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN \ + (0x1<<5) +#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN \ + (0x1<<2) +#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR \ + 0x9418UL +#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \ + 0x9478UL +#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR \ + 0x947cUL +#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR \ + 0x9480UL +#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR \ + 0x9474UL +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \ + 0x942cUL +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \ + 0x9430UL +#define PGLUE_B_REG_INTERNAL_VFID_ENABLE \ + 0x9438UL +#define PGLUE_B_REG_PGLUE_B_INT_STS \ + 0x9298UL +#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR \ + 0x929cUL +#define PGLUE_B_REG_PGLUE_B_PRTY_MASK \ + 0x92b4UL +#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR \ + 0x92acUL +#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR \ + 0x9458UL +#define PGLUE_B_REG_TAGS_63_32 \ + 0x9244UL +#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR \ + 0x9470UL +#define PRS_REG_A_PRSU_20 \ + 0x40134UL +#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT \ + 0x4011cUL +#define PRS_REG_E1HOV_MODE \ + 0x401c8UL +#define PRS_REG_HDRS_AFTER_BASIC \ + 0x40238UL +#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 \ + 0x40270UL +#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 \ + 0x40290UL +#define PRS_REG_HDRS_AFTER_TAG_0 \ + 0x40248UL +#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 \ + 0x40280UL +#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 \ + 0x402a0UL +#define PRS_REG_MUST_HAVE_HDRS \ + 0x40254UL +#define PRS_REG_MUST_HAVE_HDRS_PORT_0 \ + 0x4028cUL +#define PRS_REG_MUST_HAVE_HDRS_PORT_1 \ + 0x402acUL +#define PRS_REG_NIC_MODE \ + 0x40138UL +#define PRS_REG_NUM_OF_PACKETS \ + 0x40124UL +#define PRS_REG_PRS_PRTY_MASK \ + 0x401a4UL +#define PRS_REG_PRS_PRTY_STS_CLR \ + 0x4019cUL +#define PRS_REG_TAG_ETHERTYPE_0 \ + 0x401d4UL +#define PRS_REG_TAG_LEN_0 \ + 0x4022cUL +#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT \ + (0x1<<19) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF \ + (0x1<<20) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN \ + (0x1<<22) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED \ + (0x1<<23) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED \ + (0x1<<24) +#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR \ + (0x1<<7) +#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR \ + (0x1<<7) +#define PXP2_REG_PGL_ADDR_88_F0 \ + 0x120534UL +#define PXP2_REG_PGL_ADDR_88_F1 \ + 0x120544UL +#define PXP2_REG_PGL_ADDR_8C_F0 \ + 0x120538UL +#define PXP2_REG_PGL_ADDR_8C_F1 \ + 0x120548UL +#define PXP2_REG_PGL_ADDR_90_F0 \ + 0x12053cUL +#define PXP2_REG_PGL_ADDR_90_F1 \ + 0x12054cUL +#define PXP2_REG_PGL_ADDR_94_F0 \ + 0x120540UL +#define PXP2_REG_PGL_ADDR_94_F1 \ + 0x120550UL +#define PXP2_REG_PGL_EXP_ROM2 \ + 0x120808UL +#define PXP2_REG_PGL_PRETEND_FUNC_F0 \ + 0x120674UL +#define PXP2_REG_PGL_PRETEND_FUNC_F1 \ + 0x120678UL +#define PXP2_REG_PGL_TAGS_LIMIT \ + 0x1205a8UL +#define PXP2_REG_PSWRQ_BW_ADD1 \ + 0x1201c0UL +#define PXP2_REG_PSWRQ_BW_ADD10 \ + 0x1201e4UL +#define PXP2_REG_PSWRQ_BW_ADD11 \ + 0x1201e8UL +#define PXP2_REG_PSWRQ_BW_ADD2 \ + 0x1201c4UL +#define PXP2_REG_PSWRQ_BW_ADD28 \ + 0x120228UL +#define PXP2_REG_PSWRQ_BW_ADD3 \ + 0x1201c8UL +#define PXP2_REG_PSWRQ_BW_ADD6 \ + 0x1201d4UL +#define PXP2_REG_PSWRQ_BW_ADD7 \ + 0x1201d8UL +#define PXP2_REG_PSWRQ_BW_ADD8 \ + 0x1201dcUL +#define PXP2_REG_PSWRQ_BW_ADD9 \ + 0x1201e0UL +#define PXP2_REG_PSWRQ_BW_L1 \ + 0x1202b0UL +#define PXP2_REG_PSWRQ_BW_L10 \ + 0x1202d4UL +#define PXP2_REG_PSWRQ_BW_L11 \ + 0x1202d8UL +#define PXP2_REG_PSWRQ_BW_L2 \ + 0x1202b4UL +#define PXP2_REG_PSWRQ_BW_L28 \ + 0x120318UL +#define PXP2_REG_PSWRQ_BW_L3 \ + 0x1202b8UL +#define PXP2_REG_PSWRQ_BW_L6 \ + 0x1202c4UL +#define PXP2_REG_PSWRQ_BW_L7 \ + 0x1202c8UL +#define PXP2_REG_PSWRQ_BW_L8 \ + 0x1202ccUL +#define PXP2_REG_PSWRQ_BW_L9 \ + 0x1202d0UL +#define PXP2_REG_PSWRQ_BW_RD \ + 0x120324UL +#define PXP2_REG_PSWRQ_BW_UB1 \ + 0x120238UL +#define PXP2_REG_PSWRQ_BW_UB10 \ + 0x12025cUL +#define PXP2_REG_PSWRQ_BW_UB11 \ + 0x120260UL +#define PXP2_REG_PSWRQ_BW_UB2 \ + 0x12023cUL +#define PXP2_REG_PSWRQ_BW_UB28 \ + 0x1202a0UL +#define PXP2_REG_PSWRQ_BW_UB3 \ + 0x120240UL +#define PXP2_REG_PSWRQ_BW_UB6 \ + 0x12024cUL +#define PXP2_REG_PSWRQ_BW_UB7 \ + 0x120250UL +#define PXP2_REG_PSWRQ_BW_UB8 \ + 0x120254UL +#define PXP2_REG_PSWRQ_BW_UB9 \ + 0x120258UL +#define PXP2_REG_PSWRQ_BW_WR \ + 0x120328UL +#define PXP2_REG_PSWRQ_CDU0_L2P \ + 0x120000UL +#define PXP2_REG_PSWRQ_QM0_L2P \ + 0x120038UL +#define PXP2_REG_PSWRQ_SRC0_L2P \ + 0x120054UL +#define PXP2_REG_PSWRQ_TM0_L2P \ + 0x12001cUL +#define PXP2_REG_PXP2_INT_MASK_0 \ + 0x120578UL +#define PXP2_REG_PXP2_INT_MASK_1 \ + 0x120614UL +#define PXP2_REG_PXP2_INT_STS_0 \ + 0x12056cUL +#define PXP2_REG_PXP2_INT_STS_1 \ + 0x120608UL +#define PXP2_REG_PXP2_INT_STS_CLR_0 \ + 0x120570UL +#define PXP2_REG_PXP2_PRTY_MASK_0 \ + 0x120588UL +#define PXP2_REG_PXP2_PRTY_MASK_1 \ + 0x120598UL +#define PXP2_REG_PXP2_PRTY_STS_CLR_0 \ + 0x120580UL +#define PXP2_REG_PXP2_PRTY_STS_CLR_1 \ + 0x120590UL +#define PXP2_REG_RD_BLK_CNT \ + 0x120418UL +#define PXP2_REG_RD_CDURD_SWAP_MODE \ + 0x120404UL +#define PXP2_REG_RD_DISABLE_INPUTS \ + 0x120374UL +#define PXP2_REG_RD_INIT_DONE \ + 0x120370UL +#define PXP2_REG_RD_PBF_SWAP_MODE \ + 0x1203f4UL +#define PXP2_REG_RD_PORT_IS_IDLE_0 \ + 0x12041cUL +#define PXP2_REG_RD_PORT_IS_IDLE_1 \ + 0x120420UL +#define PXP2_REG_RD_QM_SWAP_MODE \ + 0x1203f8UL +#define PXP2_REG_RD_SRC_SWAP_MODE \ + 0x120400UL +#define PXP2_REG_RD_SR_CNT \ + 0x120414UL +#define PXP2_REG_RD_START_INIT \ + 0x12036cUL +#define PXP2_REG_RD_TM_SWAP_MODE \ + 0x1203fcUL +#define PXP2_REG_RQ_BW_RD_ADD0 \ + 0x1201bcUL +#define PXP2_REG_RQ_BW_RD_ADD12 \ + 0x1201ecUL +#define PXP2_REG_RQ_BW_RD_ADD13 \ + 0x1201f0UL +#define PXP2_REG_RQ_BW_RD_ADD14 \ + 0x1201f4UL +#define PXP2_REG_RQ_BW_RD_ADD15 \ + 0x1201f8UL +#define PXP2_REG_RQ_BW_RD_ADD16 \ + 0x1201fcUL +#define PXP2_REG_RQ_BW_RD_ADD17 \ + 0x120200UL +#define PXP2_REG_RQ_BW_RD_ADD18 \ + 0x120204UL +#define PXP2_REG_RQ_BW_RD_ADD19 \ + 0x120208UL +#define PXP2_REG_RQ_BW_RD_ADD20 \ + 0x12020cUL +#define PXP2_REG_RQ_BW_RD_ADD22 \ + 0x120210UL +#define PXP2_REG_RQ_BW_RD_ADD23 \ + 0x120214UL +#define PXP2_REG_RQ_BW_RD_ADD24 \ + 0x120218UL +#define PXP2_REG_RQ_BW_RD_ADD25 \ + 0x12021cUL +#define PXP2_REG_RQ_BW_RD_ADD26 \ + 0x120220UL +#define PXP2_REG_RQ_BW_RD_ADD27 \ + 0x120224UL +#define PXP2_REG_RQ_BW_RD_ADD4 \ + 0x1201ccUL +#define PXP2_REG_RQ_BW_RD_ADD5 \ + 0x1201d0UL +#define PXP2_REG_RQ_BW_RD_L0 \ + 0x1202acUL +#define PXP2_REG_RQ_BW_RD_L12 \ + 0x1202dcUL +#define PXP2_REG_RQ_BW_RD_L13 \ + 0x1202e0UL +#define PXP2_REG_RQ_BW_RD_L14 \ + 0x1202e4UL +#define PXP2_REG_RQ_BW_RD_L15 \ + 0x1202e8UL +#define PXP2_REG_RQ_BW_RD_L16 \ + 0x1202ecUL +#define PXP2_REG_RQ_BW_RD_L17 \ + 0x1202f0UL +#define PXP2_REG_RQ_BW_RD_L18 \ + 0x1202f4UL +#define PXP2_REG_RQ_BW_RD_L19 \ + 0x1202f8UL +#define PXP2_REG_RQ_BW_RD_L20 \ + 0x1202fcUL +#define PXP2_REG_RQ_BW_RD_L22 \ + 0x120300UL +#define PXP2_REG_RQ_BW_RD_L23 \ + 0x120304UL +#define PXP2_REG_RQ_BW_RD_L24 \ + 0x120308UL +#define PXP2_REG_RQ_BW_RD_L25 \ + 0x12030cUL +#define PXP2_REG_RQ_BW_RD_L26 \ + 0x120310UL +#define PXP2_REG_RQ_BW_RD_L27 \ + 0x120314UL +#define PXP2_REG_RQ_BW_RD_L4 \ + 0x1202bcUL +#define PXP2_REG_RQ_BW_RD_L5 \ + 0x1202c0UL +#define PXP2_REG_RQ_BW_RD_UBOUND0 \ + 0x120234UL +#define PXP2_REG_RQ_BW_RD_UBOUND12 \ + 0x120264UL +#define PXP2_REG_RQ_BW_RD_UBOUND13 \ + 0x120268UL +#define PXP2_REG_RQ_BW_RD_UBOUND14 \ + 0x12026cUL +#define PXP2_REG_RQ_BW_RD_UBOUND15 \ + 0x120270UL +#define PXP2_REG_RQ_BW_RD_UBOUND16 \ + 0x120274UL +#define PXP2_REG_RQ_BW_RD_UBOUND17 \ + 0x120278UL +#define PXP2_REG_RQ_BW_RD_UBOUND18 \ + 0x12027cUL +#define PXP2_REG_RQ_BW_RD_UBOUND19 \ + 0x120280UL +#define PXP2_REG_RQ_BW_RD_UBOUND20 \ + 0x120284UL +#define PXP2_REG_RQ_BW_RD_UBOUND22 \ + 0x120288UL +#define PXP2_REG_RQ_BW_RD_UBOUND23 \ + 0x12028cUL +#define PXP2_REG_RQ_BW_RD_UBOUND24 \ + 0x120290UL +#define PXP2_REG_RQ_BW_RD_UBOUND25 \ + 0x120294UL +#define PXP2_REG_RQ_BW_RD_UBOUND26 \ + 0x120298UL +#define PXP2_REG_RQ_BW_RD_UBOUND27 \ + 0x12029cUL +#define PXP2_REG_RQ_BW_RD_UBOUND4 \ + 0x120244UL +#define PXP2_REG_RQ_BW_RD_UBOUND5 \ + 0x120248UL +#define PXP2_REG_RQ_BW_WR_ADD29 \ + 0x12022cUL +#define PXP2_REG_RQ_BW_WR_ADD30 \ + 0x120230UL +#define PXP2_REG_RQ_BW_WR_L29 \ + 0x12031cUL +#define PXP2_REG_RQ_BW_WR_L30 \ + 0x120320UL +#define PXP2_REG_RQ_BW_WR_UBOUND29 \ + 0x1202a4UL +#define PXP2_REG_RQ_BW_WR_UBOUND30 \ + 0x1202a8UL +#define PXP2_REG_RQ_CDU_ENDIAN_M \ + 0x1201a0UL +#define PXP2_REG_RQ_CDU_FIRST_ILT \ + 0x12061cUL +#define PXP2_REG_RQ_CDU_LAST_ILT \ + 0x120620UL +#define PXP2_REG_RQ_CDU_P_SIZE \ + 0x120018UL +#define PXP2_REG_RQ_CFG_DONE \ + 0x1201b4UL +#define PXP2_REG_RQ_DBG_ENDIAN_M \ + 0x1201a4UL +#define PXP2_REG_RQ_DISABLE_INPUTS \ + 0x120330UL +#define PXP2_REG_RQ_DRAM_ALIGN \ + 0x1205b0UL +#define PXP2_REG_RQ_DRAM_ALIGN_RD \ + 0x12092cUL +#define PXP2_REG_RQ_DRAM_ALIGN_SEL \ + 0x120930UL +#define PXP2_REG_RQ_HC_ENDIAN_M \ + 0x1201a8UL +#define PXP2_REG_RQ_ONCHIP_AT \ + 0x122000UL +#define PXP2_REG_RQ_ONCHIP_AT_B0 \ + 0x128000UL +#define PXP2_REG_RQ_PDR_LIMIT \ + 0x12033cUL +#define PXP2_REG_RQ_QM_ENDIAN_M \ + 0x120194UL +#define PXP2_REG_RQ_QM_FIRST_ILT \ + 0x120634UL +#define PXP2_REG_RQ_QM_LAST_ILT \ + 0x120638UL +#define PXP2_REG_RQ_QM_P_SIZE \ + 0x120050UL +#define PXP2_REG_RQ_RBC_DONE \ + 0x1201b0UL +#define PXP2_REG_RQ_RD_MBS0 \ + 0x120160UL +#define PXP2_REG_RQ_RD_MBS1 \ + 0x120168UL +#define PXP2_REG_RQ_SRC_ENDIAN_M \ + 0x12019cUL +#define PXP2_REG_RQ_SRC_FIRST_ILT \ + 0x12063cUL +#define PXP2_REG_RQ_SRC_LAST_ILT \ + 0x120640UL +#define PXP2_REG_RQ_SRC_P_SIZE \ + 0x12006cUL +#define PXP2_REG_RQ_TM_ENDIAN_M \ + 0x120198UL +#define PXP2_REG_RQ_TM_FIRST_ILT \ + 0x120644UL +#define PXP2_REG_RQ_TM_LAST_ILT \ + 0x120648UL +#define PXP2_REG_RQ_TM_P_SIZE \ + 0x120034UL +#define PXP2_REG_RQ_WR_MBS0 \ + 0x12015cUL +#define PXP2_REG_RQ_WR_MBS1 \ + 0x120164UL +#define PXP2_REG_WR_CDU_MPS \ + 0x1205f0UL +#define PXP2_REG_WR_CSDM_MPS \ + 0x1205d0UL +#define PXP2_REG_WR_DBG_MPS \ + 0x1205e8UL +#define PXP2_REG_WR_DMAE_MPS \ + 0x1205ecUL +#define PXP2_REG_WR_HC_MPS \ + 0x1205c8UL +#define PXP2_REG_WR_QM_MPS \ + 0x1205dcUL +#define PXP2_REG_WR_SRC_MPS \ + 0x1205e4UL +#define PXP2_REG_WR_TM_MPS \ + 0x1205e0UL +#define PXP2_REG_WR_TSDM_MPS \ + 0x1205d4UL +#define PXP2_REG_WR_USDMDP_TH \ + 0x120348UL +#define PXP2_REG_WR_USDM_MPS \ + 0x1205ccUL +#define PXP2_REG_WR_XSDM_MPS \ + 0x1205d8UL +#define PXP_REG_HST_DISCARD_DOORBELLS \ + 0x1030a4UL +#define PXP_REG_HST_DISCARD_INTERNAL_WRITES \ + 0x1030a8UL +#define PXP_REG_HST_ZONE_PERMISSION_TABLE \ + 0x103400UL +#define PXP_REG_PXP_INT_MASK_0 \ + 0x103074UL +#define PXP_REG_PXP_INT_MASK_1 \ + 0x103084UL +#define PXP_REG_PXP_INT_STS_CLR_0 \ + 0x10306cUL +#define PXP_REG_PXP_INT_STS_CLR_1 \ + 0x10307cUL +#define PXP_REG_PXP_PRTY_MASK \ + 0x103094UL +#define PXP_REG_PXP_PRTY_STS_CLR \ + 0x10308cUL +#define QM_REG_BASEADDR \ + 0x168900UL +#define QM_REG_BASEADDR_EXT_A \ + 0x16e100UL +#define QM_REG_BYTECRDCMDQ_0 \ + 0x16e6e8UL +#define QM_REG_CONNNUM_0 \ + 0x168020UL +#define QM_REG_PF_EN \ + 0x16e70cUL +#define QM_REG_PF_USG_CNT_0 \ + 0x16e040UL +#define QM_REG_PTRTBL \ + 0x168a00UL +#define QM_REG_PTRTBL_EXT_A \ + 0x16e200UL +#define QM_REG_QM_INT_MASK \ + 0x168444UL +#define QM_REG_QM_PRTY_MASK \ + 0x168454UL +#define QM_REG_QM_PRTY_STS_CLR \ + 0x16844cUL +#define QM_REG_QVOQIDX_0 \ + 0x1680f4UL +#define QM_REG_SOFT_RESET \ + 0x168428UL +#define QM_REG_VOQQMASK_0_LSB \ + 0x168240UL +#define SEM_FAST_REG_PARITY_RST \ + 0x18840UL +#define SRC_REG_COUNTFREE0 \ + 0x40500UL +#define SRC_REG_FIRSTFREE0 \ + 0x40510UL +#define SRC_REG_KEYSEARCH_0 \ + 0x40458UL +#define SRC_REG_KEYSEARCH_1 \ + 0x4045cUL +#define SRC_REG_KEYSEARCH_2 \ + 0x40460UL +#define SRC_REG_KEYSEARCH_3 \ + 0x40464UL +#define SRC_REG_KEYSEARCH_4 \ + 0x40468UL +#define SRC_REG_KEYSEARCH_5 \ + 0x4046cUL +#define SRC_REG_KEYSEARCH_6 \ + 0x40470UL +#define SRC_REG_KEYSEARCH_7 \ + 0x40474UL +#define SRC_REG_KEYSEARCH_8 \ + 0x40478UL +#define SRC_REG_KEYSEARCH_9 \ + 0x4047cUL +#define SRC_REG_LASTFREE0 \ + 0x40530UL +#define SRC_REG_NUMBER_HASH_BITS0 \ + 0x40400UL +#define SRC_REG_SOFT_RST \ + 0x4049cUL +#define SRC_REG_SRC_PRTY_MASK \ + 0x404c8UL +#define SRC_REG_SRC_PRTY_STS_CLR \ + 0x404c0UL +#define TCM_REG_PRS_IFEN \ + 0x50020UL +#define TCM_REG_TCM_INT_MASK \ + 0x501dcUL +#define TCM_REG_TCM_PRTY_MASK \ + 0x501ecUL +#define TCM_REG_TCM_PRTY_STS_CLR \ + 0x501e4UL +#define TM_REG_EN_LINEAR0_TIMER \ + 0x164014UL +#define TM_REG_LIN0_MAX_ACTIVE_CID \ + 0x164048UL +#define TM_REG_LIN0_NUM_SCANS \ + 0x1640a0UL +#define TM_REG_LIN0_SCAN_ON \ + 0x1640d0UL +#define TM_REG_LIN0_SCAN_TIME \ + 0x16403cUL +#define TM_REG_LIN0_VNIC_UC \ + 0x164128UL +#define TM_REG_TM_INT_MASK \ + 0x1640fcUL +#define TM_REG_TM_PRTY_MASK \ + 0x16410cUL +#define TM_REG_TM_PRTY_STS_CLR \ + 0x164104UL +#define TSDM_REG_ENABLE_IN1 \ + 0x42238UL +#define TSDM_REG_TSDM_INT_MASK_0 \ + 0x4229cUL +#define TSDM_REG_TSDM_INT_MASK_1 \ + 0x422acUL +#define TSDM_REG_TSDM_PRTY_MASK \ + 0x422bcUL +#define TSDM_REG_TSDM_PRTY_STS_CLR \ + 0x422b4UL +#define TSEM_REG_FAST_MEMORY \ + 0x1a0000UL +#define TSEM_REG_INT_TABLE \ + 0x180400UL +#define TSEM_REG_PASSIVE_BUFFER \ + 0x181000UL +#define TSEM_REG_PRAM \ + 0x1c0000UL +#define TSEM_REG_TSEM_INT_MASK_0 \ + 0x180100UL +#define TSEM_REG_TSEM_INT_MASK_1 \ + 0x180110UL +#define TSEM_REG_TSEM_PRTY_MASK_0 \ + 0x180120UL +#define TSEM_REG_TSEM_PRTY_MASK_1 \ + 0x180130UL +#define TSEM_REG_TSEM_PRTY_STS_CLR_0 \ + 0x180118UL +#define TSEM_REG_TSEM_PRTY_STS_CLR_1 \ + 0x180128UL +#define TSEM_REG_VFPF_ERR_NUM \ + 0x180380UL +#define UCM_REG_UCM_INT_MASK \ + 0xe01d4UL +#define UCM_REG_UCM_PRTY_MASK \ + 0xe01e4UL +#define UCM_REG_UCM_PRTY_STS_CLR \ + 0xe01dcUL +#define UMAC_COMMAND_CONFIG_REG_HD_ENA \ + (0x1<<10) +#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE \ + (0x1<<28) +#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA \ + (0x1<<15) +#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK \ + (0x1<<24) +#define UMAC_COMMAND_CONFIG_REG_PAD_EN \ + (0x1<<5) +#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE \ + (0x1<<8) +#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN \ + (0x1<<4) +#define UMAC_COMMAND_CONFIG_REG_RX_ENA \ + (0x1<<1) +#define UMAC_COMMAND_CONFIG_REG_SW_RESET \ + (0x1<<13) +#define UMAC_COMMAND_CONFIG_REG_TX_ENA \ + (0x1<<0) +#define UMAC_REG_COMMAND_CONFIG \ + 0x8UL +#define UMAC_REG_EEE_WAKE_TIMER \ + 0x6cUL +#define UMAC_REG_MAC_ADDR0 \ + 0xcUL +#define UMAC_REG_MAC_ADDR1 \ + 0x10UL +#define UMAC_REG_MAXFR \ + 0x14UL +#define UMAC_REG_UMAC_EEE_CTRL \ + 0x64UL +#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN \ + (0x1<<3) +#define USDM_REG_USDM_INT_MASK_0 \ + 0xc42a0UL +#define USDM_REG_USDM_INT_MASK_1 \ + 0xc42b0UL +#define USDM_REG_USDM_PRTY_MASK \ + 0xc42c0UL +#define USDM_REG_USDM_PRTY_STS_CLR \ + 0xc42b8UL +#define USEM_REG_FAST_MEMORY \ + 0x320000UL +#define USEM_REG_INT_TABLE \ + 0x300400UL +#define USEM_REG_PASSIVE_BUFFER \ + 0x302000UL +#define USEM_REG_PRAM \ + 0x340000UL +#define USEM_REG_USEM_INT_MASK_0 \ + 0x300110UL +#define USEM_REG_USEM_INT_MASK_1 \ + 0x300120UL +#define USEM_REG_USEM_PRTY_MASK_0 \ + 0x300130UL +#define USEM_REG_USEM_PRTY_MASK_1 \ + 0x300140UL +#define USEM_REG_USEM_PRTY_STS_CLR_0 \ + 0x300128UL +#define USEM_REG_USEM_PRTY_STS_CLR_1 \ + 0x300138UL +#define USEM_REG_VFPF_ERR_NUM \ + 0x300380UL +#define VFC_MEMORIES_RST_REG_CAM_RST \ + (0x1<<0) +#define VFC_MEMORIES_RST_REG_RAM_RST \ + (0x1<<1) +#define VFC_REG_MEMORIES_RST \ + 0x1943cUL +#define XCM_REG_XCM_INT_MASK \ + 0x202b4UL +#define XCM_REG_XCM_PRTY_MASK \ + 0x202c4UL +#define XCM_REG_XCM_PRTY_STS_CLR \ + 0x202bcUL +#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS \ + (0x1<<0) +#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS \ + (0x1<<1) +#define XMAC_CTRL_REG_LINE_LOCAL_LPBK \ + (0x1<<2) +#define XMAC_CTRL_REG_RX_EN \ + (0x1<<1) +#define XMAC_CTRL_REG_SOFT_RESET \ + (0x1<<6) +#define XMAC_CTRL_REG_TX_EN \ + (0x1<<0) +#define XMAC_CTRL_REG_XLGMII_ALIGN_ENB \ + (0x1<<7) +#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN \ + (0x1<<18) +#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN \ + (0x1<<17) +#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON \ + (0x1<<1) +#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN \ + (0x1<<0) +#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN \ + (0x1<<3) +#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN \ + (0x1<<4) +#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN \ + (0x1<<5) +#define XMAC_REG_CLEAR_RX_LSS_STATUS \ + 0x60UL +#define XMAC_REG_CTRL \ + 0UL +#define XMAC_REG_CTRL_SA_HI \ + 0x2cUL +#define XMAC_REG_CTRL_SA_LO \ + 0x28UL +#define XMAC_REG_EEE_CTRL \ + 0xd8UL +#define XMAC_REG_EEE_TIMERS_HI \ + 0xe4UL +#define XMAC_REG_PAUSE_CTRL \ + 0x68UL +#define XMAC_REG_PFC_CTRL \ + 0x70UL +#define XMAC_REG_PFC_CTRL_HI \ + 0x74UL +#define XMAC_REG_RX_LSS_CTRL \ + 0x50UL +#define XMAC_REG_RX_LSS_STATUS \ + 0x58UL +#define XMAC_REG_RX_MAX_SIZE \ + 0x40UL +#define XMAC_REG_TX_CTRL \ + 0x20UL +#define XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE \ + (0x1<<0) +#define XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE \ + (0x1<<1) +#define XSDM_REG_OPERATION_GEN \ + 0x1664c4UL +#define XSDM_REG_XSDM_INT_MASK_0 \ + 0x16629cUL +#define XSDM_REG_XSDM_INT_MASK_1 \ + 0x1662acUL +#define XSDM_REG_XSDM_PRTY_MASK \ + 0x1662bcUL +#define XSDM_REG_XSDM_PRTY_STS_CLR \ + 0x1662b4UL +#define XSEM_REG_FAST_MEMORY \ + 0x2a0000UL +#define XSEM_REG_INT_TABLE \ + 0x280400UL +#define XSEM_REG_PASSIVE_BUFFER \ + 0x282000UL +#define XSEM_REG_PRAM \ + 0x2c0000UL +#define XSEM_REG_VFPF_ERR_NUM \ + 0x280380UL +#define XSEM_REG_XSEM_INT_MASK_0 \ + 0x280110UL +#define XSEM_REG_XSEM_INT_MASK_1 \ + 0x280120UL +#define XSEM_REG_XSEM_PRTY_MASK_0 \ + 0x280130UL +#define XSEM_REG_XSEM_PRTY_MASK_1 \ + 0x280140UL +#define XSEM_REG_XSEM_PRTY_STS_CLR_0 \ + 0x280128UL +#define XSEM_REG_XSEM_PRTY_STS_CLR_1 \ + 0x280138UL +#define MCPR_ACCESS_LOCK_LOCK (1L<<31) +#define MCPR_IMC_COMMAND_ENABLE (1L<<31) +#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16 +#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28 +#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8 +#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) +#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) +#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) +#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L<<0) +#define MCPR_NVM_COMMAND_DOIT (1L<<4) +#define MCPR_NVM_COMMAND_DONE (1L<<3) +#define MCPR_NVM_COMMAND_FIRST (1L<<7) +#define MCPR_NVM_COMMAND_LAST (1L<<8) +#define MCPR_NVM_COMMAND_WR (1L<<5) +#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L<<9) +#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5) +#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1) + + +#define BIGMAC_REGISTER_BMAC_CONTROL (0x00<<3) +#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01<<3) +#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3) +#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3) +#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3) +#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3) +#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3) +#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3) +#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3) +#define BIGMAC_REGISTER_TX_CONTROL (0x07<<3) +#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09<<3) +#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A<<3) +#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3) +#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3) +#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3) +#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3) +#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3) +#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3) +#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3) +#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3) +#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3) +#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3) +#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3) +#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3) +#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3) +#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3) +#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3) +#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3) +#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3) +#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3) +#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3) + + +#define EMAC_LED_1000MB_OVERRIDE (1L<<1) +#define EMAC_LED_100MB_OVERRIDE (1L<<2) +#define EMAC_LED_10MB_OVERRIDE (1L<<3) +#define EMAC_LED_OVERRIDE (1L<<0) +#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26) +#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26) +#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26) +#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26) +#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26) +#define EMAC_MDIO_COMM_DATA (0xffffL<<0) +#define EMAC_MDIO_COMM_START_BUSY (1L<<29) +#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4) +#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31) +#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16) +#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16 +#define EMAC_MDIO_STATUS_10MB (1L<<1) +#define EMAC_MODE_25G_MODE (1L<<5) +#define EMAC_MODE_HALF_DUPLEX (1L<<1) +#define EMAC_MODE_PORT_GMII (2L<<2) +#define EMAC_MODE_PORT_MII (1L<<2) +#define EMAC_MODE_PORT_MII_10M (3L<<2) +#define EMAC_MODE_RESET (1L<<0) +#define EMAC_REG_EMAC_LED 0xc +#define EMAC_REG_EMAC_MAC_MATCH 0x10 +#define EMAC_REG_EMAC_MDIO_COMM 0xac +#define EMAC_REG_EMAC_MDIO_MODE 0xb4 +#define EMAC_REG_EMAC_MDIO_STATUS 0xb0 +#define EMAC_REG_EMAC_MODE 0x0 +#define EMAC_REG_EMAC_RX_MODE 0xc8 +#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c +#define EMAC_REG_EMAC_RX_STAT_AC 0x180 +#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4 +#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23 +#define EMAC_REG_EMAC_TX_MODE 0xbc +#define EMAC_REG_EMAC_TX_STAT_AC 0x280 +#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22 +#define EMAC_REG_RX_PFC_MODE 0x320 +#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2) +#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1) +#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0) +#define EMAC_REG_RX_PFC_PARAM 0x324 +#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0 +#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16 +#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328 +#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0) +#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330 +#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0) +#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c +#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0) +#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334 +#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0) +#define EMAC_RX_MODE_FLOW_EN (1L<<2) +#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3) +#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10) +#define EMAC_RX_MODE_PROMISCUOUS (1L<<8) +#define EMAC_RX_MODE_RESET (1L<<0) +#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) +#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) +#define EMAC_TX_MODE_FLOW_EN (1L<<4) +#define EMAC_TX_MODE_RESET (1L<<0) + + +#define MISC_REGISTERS_GPIO_0 0 +#define MISC_REGISTERS_GPIO_1 1 +#define MISC_REGISTERS_GPIO_2 2 +#define MISC_REGISTERS_GPIO_3 3 +#define MISC_REGISTERS_GPIO_CLR_POS 16 +#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24) +#define MISC_REGISTERS_GPIO_FLOAT_POS 24 +#define MISC_REGISTERS_GPIO_HIGH 1 +#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2 +#define MISC_REGISTERS_GPIO_INT_CLR_POS 24 +#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0 +#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1 +#define MISC_REGISTERS_GPIO_INT_SET_POS 16 +#define MISC_REGISTERS_GPIO_LOW 0 +#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1 +#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0 +#define MISC_REGISTERS_GPIO_PORT_SHIFT 4 +#define MISC_REGISTERS_GPIO_SET_POS 8 +#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 +#define MISC_REGISTERS_RESET_REG_1_RST_BRB1 (0x1<<0) +#define MISC_REGISTERS_RESET_REG_1_RST_DORQ \ + (0x1<<19) +#define MISC_REGISTERS_RESET_REG_1_RST_HC \ + (0x1<<29) +#define MISC_REGISTERS_RESET_REG_1_RST_PXP \ + (0x1<<26) +#define MISC_REGISTERS_RESET_REG_1_RST_PXPV \ + (0x1<<27) +#define MISC_REGISTERS_RESET_REG_1_RST_QM \ + (0x1<<17) +#define MISC_REGISTERS_RESET_REG_1_SET 0x584 +#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 +#define MISC_REGISTERS_RESET_REG_2_MSTAT0 \ + (0x1<<24) +#define MISC_REGISTERS_RESET_REG_2_MSTAT1 \ + (0x1<<25) +#define MISC_REGISTERS_RESET_REG_2_PGLC \ + (0x1<<19) +#define MISC_REGISTERS_RESET_REG_2_RST_ATC \ + (0x1<<17) +#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) +#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1<<1) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1<<2) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE \ + (0x1<<14) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1<<3) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE \ + (0x1<<15) +#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5) +#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE \ + (0x1<<11) +#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO \ + (0x1<<13) +#define MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR \ + (0x1<<16) +#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9) +#define MISC_REGISTERS_RESET_REG_2_SET 0x594 +#define MISC_REGISTERS_RESET_REG_2_UMAC0 \ + (0x1<<20) +#define MISC_REGISTERS_RESET_REG_2_UMAC1 \ + (0x1<<21) +#define MISC_REGISTERS_RESET_REG_2_XMAC \ + (0x1<<22) +#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT \ + (0x1<<23) +#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8 +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1<<0) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1<<5) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1<<6) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1<<7) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8) +#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4 +#define MISC_SPIO_CLR_POS 16 +#define MISC_SPIO_FLOAT (0xffL<<24) +#define MISC_SPIO_FLOAT_POS 24 +#define MISC_SPIO_INPUT_HI_Z 2 +#define MISC_SPIO_INT_OLD_SET_POS 16 +#define MISC_SPIO_OUTPUT_HIGH 1 +#define MISC_SPIO_OUTPUT_LOW 0 +#define MISC_SPIO_SET_POS 8 +#define MISC_SPIO_SPIO4 0x10 +#define MISC_SPIO_SPIO5 0x20 +#define HW_LOCK_MAX_RESOURCE_VALUE 31 +#define HW_LOCK_RESOURCE_DRV_FLAGS 10 +#define HW_LOCK_RESOURCE_GPIO 1 +#define HW_LOCK_RESOURCE_NVRAM 12 +#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 +#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 +#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 +#define HW_LOCK_RESOURCE_RECOVERY_REG 11 +#define HW_LOCK_RESOURCE_RESET 5 +#define HW_LOCK_RESOURCE_SPIO 2 + + +#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT (0x1<<19) +#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) +#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9) +#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8) +#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7) +#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6) +#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1) +#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0) +#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18) +#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11) +#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10) +#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13) +#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12) +#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1UL<<31) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15) +#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14) +#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14) +#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20) +#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1UL<<31) +#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0) +#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22) +#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15) +#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27) +#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26) +#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25) +#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24) +#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23) +#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22) +#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27) +#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26) +#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21) +#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20) +#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25) +#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24) +#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16) +#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9) +#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8) +#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7) +#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6) +#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11) +#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10) +#define HW_PRTY_ASSERT_SET_0 \ +(AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) +#define HW_PRTY_ASSERT_SET_1 \ +(AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) +#define HW_PRTY_ASSERT_SET_2 \ +(AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) +#define HW_PRTY_ASSERT_SET_3 \ +(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) +#define HW_PRTY_ASSERT_SET_4 \ +(AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) +#define HW_INTERRUT_ASSERT_SET_0 \ +(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT) +#define HW_INTERRUT_ASSERT_SET_1 \ +(AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT) +#define HW_INTERRUT_ASSERT_SET_2 \ +(AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT) + + +#define RESERVED_GENERAL_ATTENTION_BIT_0 0 + +#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0 +#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000 + +#define RESERVED_GENERAL_ATTENTION_BIT_6 6 +#define RESERVED_GENERAL_ATTENTION_BIT_7 7 +#define RESERVED_GENERAL_ATTENTION_BIT_8 8 +#define RESERVED_GENERAL_ATTENTION_BIT_9 9 +#define RESERVED_GENERAL_ATTENTION_BIT_10 10 +#define RESERVED_GENERAL_ATTENTION_BIT_11 11 +#define RESERVED_GENERAL_ATTENTION_BIT_12 12 +#define RESERVED_GENERAL_ATTENTION_BIT_13 13 +#define RESERVED_GENERAL_ATTENTION_BIT_14 14 +#define RESERVED_GENERAL_ATTENTION_BIT_15 15 +#define RESERVED_GENERAL_ATTENTION_BIT_16 16 +#define RESERVED_GENERAL_ATTENTION_BIT_17 17 +#define RESERVED_GENERAL_ATTENTION_BIT_18 18 +#define RESERVED_GENERAL_ATTENTION_BIT_19 19 +#define RESERVED_GENERAL_ATTENTION_BIT_20 20 +#define RESERVED_GENERAL_ATTENTION_BIT_21 21 + +/* storm asserts attention bits */ +#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7 +#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8 +#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9 +#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10 + +/* mcp error attention bit */ +#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11 + +/*E1H NIG status sync attention mapped to group 4-7*/ +#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12 +#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13 +#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14 +#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15 +#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16 +#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17 +#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18 +#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19 + + /* Used For Error Recovery: changing this will require more \ + changes in code that assume + * error recovery uses general attn bit20 ! */ +#define ERROR_RECOVERY_ATTENTION_BIT \ + RESERVED_GENERAL_ATTENTION_BIT_20 +#define RESERVED_ATTENTION_BIT \ + RESERVED_GENERAL_ATTENTION_BIT_21 + +#define LATCHED_ATTN_RBCR 23 +#define LATCHED_ATTN_RBCT 24 +#define LATCHED_ATTN_RBCN 25 +#define LATCHED_ATTN_RBCU 26 +#define LATCHED_ATTN_RBCP 27 +#define LATCHED_ATTN_TIMEOUT_GRC 28 +#define LATCHED_ATTN_RSVD_GRC 29 +#define LATCHED_ATTN_ROM_PARITY_MCP 30 +#define LATCHED_ATTN_UM_RX_PARITY_MCP 31 +#define LATCHED_ATTN_UM_TX_PARITY_MCP 32 +#define LATCHED_ATTN_SCPAD_PARITY_MCP 33 + +#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32) +#define GENERAL_ATTEN_OFFSET(atten_name) (1UL << ((94 + atten_name) % 32)) + + +/* + * This file defines GRC base address for every block. + * This file is included by chipsim, asm microcode and cpp microcode. + * These values are used in Design.xml on regBase attribute + * Use the base with the generated offsets of specific registers. + */ + +#define GRCBASE_PXPCS 0x000000 +#define GRCBASE_PCICONFIG 0x002000 +#define GRCBASE_PCIREG 0x002400 +#define GRCBASE_EMAC0 0x008000 +#define GRCBASE_EMAC1 0x008400 +#define GRCBASE_DBU 0x008800 +#define GRCBASE_PGLUE_B 0x009000 +#define GRCBASE_MISC 0x00A000 +#define GRCBASE_DBG 0x00C000 +#define GRCBASE_NIG 0x010000 +#define GRCBASE_XCM 0x020000 +#define GRCBASE_PRS 0x040000 +#define GRCBASE_SRCH 0x040400 +#define GRCBASE_TSDM 0x042000 +#define GRCBASE_TCM 0x050000 +#define GRCBASE_BRB1 0x060000 +#define GRCBASE_MCP 0x080000 +#define GRCBASE_UPB 0x0C1000 +#define GRCBASE_CSDM 0x0C2000 +#define GRCBASE_USDM 0x0C4000 +#define GRCBASE_CCM 0x0D0000 +#define GRCBASE_UCM 0x0E0000 +#define GRCBASE_CDU 0x101000 +#define GRCBASE_DMAE 0x102000 +#define GRCBASE_PXP 0x103000 +#define GRCBASE_CFC 0x104000 +#define GRCBASE_HC 0x108000 +#define GRCBASE_ATC 0x110000 +#define GRCBASE_PXP2 0x120000 +#define GRCBASE_IGU 0x130000 +#define GRCBASE_PBF 0x140000 +#define GRCBASE_UMAC0 0x160000 +#define GRCBASE_UMAC1 0x160400 +#define GRCBASE_XPB 0x161000 +#define GRCBASE_MSTAT0 0x162000 +#define GRCBASE_MSTAT1 0x162800 +#define GRCBASE_XMAC0 0x163000 +#define GRCBASE_XMAC1 0x163800 +#define GRCBASE_TIMERS 0x164000 +#define GRCBASE_XSDM 0x166000 +#define GRCBASE_QM 0x168000 +#define GRCBASE_QM_4PORT 0x168000 +#define GRCBASE_DQ 0x170000 +#define GRCBASE_TSEM 0x180000 +#define GRCBASE_CSEM 0x200000 +#define GRCBASE_XSEM 0x280000 +#define GRCBASE_XSEM_4PORT 0x280000 +#define GRCBASE_USEM 0x300000 +#define GRCBASE_MCP_A 0x380000 +#define GRCBASE_MISC_AEU GRCBASE_MISC +#define GRCBASE_Tstorm GRCBASE_TSEM +#define GRCBASE_Cstorm GRCBASE_CSEM +#define GRCBASE_Xstorm GRCBASE_XSEM +#define GRCBASE_Ustorm GRCBASE_USEM + + +/* offset of configuration space in the pci core register */ +#define PCICFG_OFFSET 0x2000 +#define PCICFG_VENDOR_ID_OFFSET 0x00 +#define PCICFG_DEVICE_ID_OFFSET 0x02 +#define PCICFG_COMMAND_OFFSET 0x04 +#define PCICFG_COMMAND_IO_SPACE (1<<0) +#define PCICFG_COMMAND_MEM_SPACE (1<<1) +#define PCICFG_COMMAND_BUS_MASTER (1<<2) +#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3) +#define PCICFG_COMMAND_MWI_CYCLES (1<<4) +#define PCICFG_COMMAND_VGA_SNOOP (1<<5) +#define PCICFG_COMMAND_PERR_ENA (1<<6) +#define PCICFG_COMMAND_STEPPING (1<<7) +#define PCICFG_COMMAND_SERR_ENA (1<<8) +#define PCICFG_COMMAND_FAST_B2B (1<<9) +#define PCICFG_COMMAND_INT_DISABLE (1<<10) +#define PCICFG_COMMAND_RESERVED (0x1f<<11) +#define PCICFG_STATUS_OFFSET 0x06 +#define PCICFG_REVISION_ID_OFFSET 0x08 +#define PCICFG_REVESION_ID_MASK 0xff +#define PCICFG_REVESION_ID_ERROR_VAL 0xff +#define PCICFG_CACHE_LINE_SIZE 0x0c +#define PCICFG_LATENCY_TIMER 0x0d +#define PCICFG_HEADER_TYPE 0x0e +#define PCICFG_HEADER_TYPE_NORMAL 0 +#define PCICFG_HEADER_TYPE_BRIDGE 1 +#define PCICFG_HEADER_TYPE_CARDBUS 2 +#define PCICFG_BAR_1_LOW 0x10 +#define PCICFG_BAR_1_HIGH 0x14 +#define PCICFG_BAR_2_LOW 0x18 +#define PCICFG_BAR_2_HIGH 0x1c +#define PCICFG_BAR_3_LOW 0x20 +#define PCICFG_BAR_3_HIGH 0x24 +#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c +#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e +#define PCICFG_INT_LINE 0x3c +#define PCICFG_INT_PIN 0x3d +#define PCICFG_PM_CAPABILITY 0x48 +#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16) +#define PCICFG_PM_CAPABILITY_CLOCK (1<<19) +#define PCICFG_PM_CAPABILITY_RESERVED (1<<20) +#define PCICFG_PM_CAPABILITY_DSI (1<<21) +#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22) +#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25) +#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26) +#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27) +#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28) +#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29) +#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30) +#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31) +#define PCICFG_PM_CSR_OFFSET 0x4c +#define PCICFG_PM_CSR_STATE (0x3<<0) +#define PCICFG_PM_CSR_PME_ENABLE (1<<8) +#define PCICFG_PM_CSR_PME_STATUS (1<<15) +#define PCICFG_VPD_FLAG_ADDR_OFFSET 0x50 +#define PCICFG_VPD_DATA_OFFSET 0x54 +#define PCICFG_MSI_CAP_ID_OFFSET 0x58 +#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16) +#define PCICFG_MSI_CONTROL_MCAP (0x7<<17) +#define PCICFG_MSI_CONTROL_MENA (0x7<<20) +#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23) +#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24) +#define PCICFG_MSI_ADDR_LOW_OFFSET 0x5c +#define PCICFG_MSI_ADDR_HIGH_OFFSET 0x60 +#define PCICFG_MSI_DATA_OFFSET 0x64 +#define PCICFG_GRC_ADDRESS 0x78 +#define PCICFG_GRC_DATA 0x80 +#define PCICFG_ME_REGISTER 0x98 +#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0 +#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16) +#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27) +#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30) +#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31) + +#define PCICFG_DEVICE_CONTROL 0xb4 +#define PCICFG_DEVICE_CONTROL_NP_TRANSACTION_PEND (1<<21) +#define PCICFG_DEVICE_STATUS 0xb6 +#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0) +#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1) +#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2) +#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3) +#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4) +#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5) +#define PCICFG_LINK_CONTROL 0xbc + + +/* config_2 offset */ +#define GRC_CONFIG_2_SIZE_REG 0x408 +#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) +#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) +#define PCI_CONFIG_2_BAR1_64ENA (1L<<4) +#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) +#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) +#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) +#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8) +#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16) +#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17) + +/* config_3 offset */ +#define GRC_CONFIG_3_SIZE_REG 0x40c +#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) +#define PCI_CONFIG_3_FORCE_PME (1L<<24) +#define PCI_CONFIG_3_PME_STATUS (1L<<25) +#define PCI_CONFIG_3_PME_ENABLE (1L<<26) +#define PCI_CONFIG_3_PM_STATE (0x3L<<27) +#define PCI_CONFIG_3_VAUX_PRESET (1L<<30) +#define PCI_CONFIG_3_PCI_POWER (1L<<31) + +#define GRC_REG_DEVICE_CONTROL 0x4d8 +#define PCIE_SRIOV_DISABLE_IN_PROGRESS \ + (1 << 29) /*When VF Enable is cleared(after it was previously set), + this register will read a value of 1, indicating that all the + VFs that belong to this PF should be flushed. + Software should clear this bit within 1 second of VF Enable + being set by writing a 1 to it, so that VFs are visible to the system again. + WC */ +#define PCIE_FLR_IN_PROGRESS \ + (1 << 27) /*When FLR is initiated, this register will read a \ + value of 1 indicating that the + Function is in FLR state. Func can be brought out of FLR state either by + writing 1 to this register (at least 50 ms after FLR was initiated), + or it can also be cleared automatically after 55 ms if auto_clear bit + in private reg space is set. This bit also exists in VF register space + WC */ + +#define GRC_BAR2_CONFIG 0x4e0 +#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0) +#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0) +#define PCI_CONFIG_2_BAR2_64ENA (1L<<4) + +#define GRC_BAR3_CONFIG 0x4f4 +#define PCI_CONFIG_2_BAR3_SIZE (0xfL<<0) +#define PCI_CONFIG_2_BAR3_SIZE_DISABLED (0L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_64K (1L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_128K (2L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_256K (3L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_512K (4L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_1M (5L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_2M (6L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_4M (7L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_8M (8L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_16M (9L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_32M (10L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_64M (11L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_128M (12L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_256M (13L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_512M (14L<<0) +#define PCI_CONFIG_2_BAR3_SIZE_1G (15L<<0) +#define PCI_CONFIG_2_BAR3_64ENA (1L<<4) + +#define PCI_PM_DATA_A 0x410 +#define PCI_PM_DATA_B 0x414 +#define PCI_ID_VAL1 0x434 +#define PCI_ID_VAL2 0x438 +#define PCI_ID_VAL3 0x43c +#define PCI_ID_VAL3_REVISION_ID_ERROR (0xffL<<24) + + +#define GRC_CONFIG_REG_VF_BAR_REG_1 0x608 +#define GRC_CONFIG_REG_VF_BAR_REG_BAR0_SIZE 0xf + +#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C +#define GRC_CR_VF_MSIX_CTRL_VF_MSIX_TBL_SIZE_MASK \ + 0x3F /*This field resides in VF only and does not exist in PF. + This register controls the read value of the MSIX_CONTROL[10:0] register + in the VF configuration space. A value of "00000000011" indicates + a table size of 4. The value is controlled by IOV_MSIX_TBL_SIZ + define in version.v */ + +#define GRC_CONFIG_REG_PF_INIT_VF 0x624 +#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK \ + 0xf /*First VF_NUM for PF is encoded in this register. + The number of VFs assigned to a PF is assumed to be a multiple of 8. + Software should program these bits based on Total Number of VFs \ + programmed for each PF. + Since registers from 0x000-0x7ff are spilt across functions, each PF will have + the same location for the same 4 bits*/ + +#define PXPCS_TL_CONTROL_5 0x814 +#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/ +#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/ +#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/ +#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/ +#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/ +#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/ +#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/ +#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/ +#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/ +#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/ +#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/ +#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/ + + +#define PXPCS_TL_FUNC345_STAT 0x854 +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 \ + (1 << 28) /* Unsupported Request Error Status in function4, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4 \ + (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4 \ + (1 << 26) /* Malformed TLP Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4 \ + (1 << 25) /* Receiver Overflow Status Status in function 4, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4 \ + (1 << 24) /* Unexpected Completion Status Status in function 4, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4 \ + (1 << 23) /* Receive UR Statusin function 4. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4 \ + (1 << 22) /* Completer Timeout Status Status in function 4, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4 \ + (1 << 21) /* Flow Control Protocol Error Status Status in \ + function 4, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4 \ + (1 << 20) /* Poisoned Error Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 \ + (1 << 18) /* Unsupported Request Error Status in function3, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3 \ + (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3 \ + (1 << 16) /* Malformed TLP Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3 \ + (1 << 15) /* Receiver Overflow Status Status in function 3, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3 \ + (1 << 14) /* Unexpected Completion Status Status in function 3, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3 \ + (1 << 13) /* Receive UR Statusin function 3. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3 \ + (1 << 12) /* Completer Timeout Status Status in function 3, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3 \ + (1 << 11) /* Flow Control Protocol Error Status Status in \ + function 3, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3 \ + (1 << 10) /* Poisoned Error Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2 \ + (1 << 8) /* Unsupported Request Error Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2 \ + (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2 \ + (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2 \ + (1 << 5) /* Receiver Overflow Status Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2 \ + (1 << 4) /* Unexpected Completion Status Status for Function 2, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2 \ + (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2 \ + (1 << 2) /* Completer Timeout Status Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2 \ + (1 << 1) /* Flow Control Protocol Error Status Status for \ + Function 2, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2 \ + (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ + + +#define PXPCS_TL_FUNC678_STAT 0x85C +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 \ + (1 << 28) /* Unsupported Request Error Status in function7, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7 \ + (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7 \ + (1 << 26) /* Malformed TLP Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7 \ + (1 << 25) /* Receiver Overflow Status Status in function 7, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7 \ + (1 << 24) /* Unexpected Completion Status Status in function 7, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7 \ + (1 << 23) /* Receive UR Statusin function 7. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7 \ + (1 << 22) /* Completer Timeout Status Status in function 7, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7 \ + (1 << 21) /* Flow Control Protocol Error Status Status in \ + function 7, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7 \ + (1 << 20) /* Poisoned Error Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 \ + (1 << 18) /* Unsupported Request Error Status in function6, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6 \ + (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6 \ + (1 << 16) /* Malformed TLP Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6 \ + (1 << 15) /* Receiver Overflow Status Status in function 6, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6 \ + (1 << 14) /* Unexpected Completion Status Status in function 6, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6 \ + (1 << 13) /* Receive UR Statusin function 6. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6 \ + (1 << 12) /* Completer Timeout Status Status in function 6, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6 \ + (1 << 11) /* Flow Control Protocol Error Status Status in \ + function 6, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6 \ + (1 << 10) /* Poisoned Error Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5 \ + (1 << 8) /* Unsupported Request Error Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5 \ + (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5 \ + (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5 \ + (1 << 5) /* Receiver Overflow Status Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5 \ + (1 << 4) /* Unexpected Completion Status Status for Function 5, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5 \ + (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5 \ + (1 << 2) /* Completer Timeout Status Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5 \ + (1 << 1) /* Flow Control Protocol Error Status Status for \ + Function 5, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5 \ + (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ + + +#define BAR_USTRORM_INTMEM 0x400000 +#define BAR_CSTRORM_INTMEM 0x410000 +#define BAR_XSTRORM_INTMEM 0x420000 +#define BAR_TSTRORM_INTMEM 0x430000 + +/* for accessing the IGU in case of status block ACK */ +#define BAR_IGU_INTMEM 0x440000 + +#define BAR_DOORBELL_OFFSET 0x800000 + +#define BAR_ME_REGISTER 0x450000 +#define ME_REG_PF_NUM_SHIFT 0 +#define ME_REG_PF_NUM \ + (7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */ +#define ME_REG_VF_VALID (1<<8) +#define ME_REG_VF_NUM_SHIFT 9 +#define ME_REG_VF_NUM_MASK (0x3f<<ME_REG_VF_NUM_SHIFT) +#define VF_ID(x) ((x & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT) +#define ME_REG_VF_ERR (0x1<<3) +#define ME_REG_ABS_PF_NUM_SHIFT 16 +#define ME_REG_ABS_PF_NUM \ + (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */ + + +#define PXP_VF_ADRR_NUM_QUEUES 136 +#define PXP_ADDR_QUEUE_SIZE 32 +#define PXP_ADDR_REG_SIZE 512 + + +#define PXP_VF_ADDR_IGU_START 0 +#define PXP_VF_ADDR_IGU_SIZE (0x3000) +#define PXP_VF_ADDR_IGU_END \ + ((PXP_VF_ADDR_IGU_START) + (PXP_VF_ADDR_IGU_SIZE) - 1) + +#define PXP_VF_ADDR_USDM_QUEUES_START 0x3000 +#define PXP_VF_ADDR_USDM_QUEUES_SIZE \ + (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE) +#define PXP_VF_ADDR_USDM_QUEUES_END \ + ((PXP_VF_ADDR_USDM_QUEUES_START) + (PXP_VF_ADDR_USDM_QUEUES_SIZE) - 1) + +#define PXP_VF_ADDR_CSDM_QUEUES_START 0x4100 +#define PXP_VF_ADDR_CSDM_QUEUES_SIZE \ + (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE) +#define PXP_VF_ADDR_CSDM_QUEUES_END \ + ((PXP_VF_ADDR_CSDM_QUEUES_START) + (PXP_VF_ADDR_CSDM_QUEUES_SIZE) - 1) + +#define PXP_VF_ADDR_XSDM_QUEUES_START 0x5200 +#define PXP_VF_ADDR_XSDM_QUEUES_SIZE \ + (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE) +#define PXP_VF_ADDR_XSDM_QUEUES_END \ + ((PXP_VF_ADDR_XSDM_QUEUES_START) + (PXP_VF_ADDR_XSDM_QUEUES_SIZE) - 1) + +#define PXP_VF_ADDR_TSDM_QUEUES_START 0x6300 +#define PXP_VF_ADDR_TSDM_QUEUES_SIZE \ + (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE) +#define PXP_VF_ADDR_TSDM_QUEUES_END \ + ((PXP_VF_ADDR_TSDM_QUEUES_START) + (PXP_VF_ADDR_TSDM_QUEUES_SIZE) - 1) + +#define PXP_VF_ADDR_USDM_GLOBAL_START 0x7400 +#define PXP_VF_ADDR_USDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE) +#define PXP_VF_ADDR_USDM_GLOBAL_END \ + ((PXP_VF_ADDR_USDM_GLOBAL_START) + (PXP_VF_ADDR_USDM_GLOBAL_SIZE) - 1) + +#define PXP_VF_ADDR_CSDM_GLOBAL_START 0x7600 +#define PXP_VF_ADDR_CSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE) +#define PXP_VF_ADDR_CSDM_GLOBAL_END \ + ((PXP_VF_ADDR_CSDM_GLOBAL_START) + (PXP_VF_ADDR_CSDM_GLOBAL_SIZE) - 1) + +#define PXP_VF_ADDR_XSDM_GLOBAL_START 0x7800 +#define PXP_VF_ADDR_XSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE) +#define PXP_VF_ADDR_XSDM_GLOBAL_END \ + ((PXP_VF_ADDR_XSDM_GLOBAL_START) + (PXP_VF_ADDR_XSDM_GLOBAL_SIZE) - 1) + +#define PXP_VF_ADDR_TSDM_GLOBAL_START 0x7a00 +#define PXP_VF_ADDR_TSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE) +#define PXP_VF_ADDR_TSDM_GLOBAL_END \ + ((PXP_VF_ADDR_TSDM_GLOBAL_START) + (PXP_VF_ADDR_TSDM_GLOBAL_SIZE) - 1) + +#define PXP_VF_ADDR_DB_START 0x7c00 +#define PXP_VF_ADDR_DB_SIZE (0x200) +#define PXP_VF_ADDR_DB_END \ + ((PXP_VF_ADDR_DB_START) + (PXP_VF_ADDR_DB_SIZE) - 1) + +#define PXP_VF_ADDR_GRC_START 0x7e00 +#define PXP_VF_ADDR_GRC_SIZE (0x200) +#define PXP_VF_ADDR_GRC_END \ + ((PXP_VF_ADDR_GRC_START) + (PXP_VF_ADDR_GRC_SIZE) - 1) + +#define PXP_VF_ADDR_DORQ_START (0x0) +#define PXP_VF_ADDR_DORQ_SIZE (0xffffffff) +#define PXP_VF_ADDR_DORQ_END (0xffffffff) + +#define PXP_BAR_GRC 0 +#define PXP_BAR_TSDM 0 +#define PXP_BAR_USDM 0 +#define PXP_BAR_XSDM 0 +#define PXP_BAR_CSDM 0 +#define PXP_BAR_IGU 0 +#define PXP_BAR_DQ 1 + +#define PXP_VF_BAR_IGU 0 +#define PXP_VF_BAR_USDM_QUEUES 0 +#define PXP_VF_BAR_TSDM_QUEUES 0 +#define PXP_VF_BAR_XSDM_QUEUES 0 +#define PXP_VF_BAR_CSDM_QUEUES 0 +#define PXP_VF_BAR_USDM_GLOBAL 0 +#define PXP_VF_BAR_TSDM_GLOBAL 0 +#define PXP_VF_BAR_XSDM_GLOBAL 0 +#define PXP_VF_BAR_CSDM_GLOBAL 0 +#define PXP_VF_BAR_DB 0 +#define PXP_VF_BAR_GRC 0 +#define PXP_VF_BAR_DORQ 1 + +/* PCI CAPABILITIES*/ + +#define PCI_CAP_PCIE 0x10 /*PCIe capability ID*/ + +#define PCIE_DEV_CAPS 0x04 + +#define PCIE_DEV_CTRL 0x08 +#define PCIE_DEV_CTRL_FLR 0x8000; + +#define PCIE_DEV_STATUS 0x0A + +#define PCI_CAP_MSIX 0x11 /*MSI-X capability ID*/ +#define PCI_MSIX_CONTROL_SHIFT 16 +#define PCI_MSIX_TABLE_SIZE_MASK 0x07FF +#define PCI_MSIX_TABLE_ENABLE_MASK 0x8000 + + +#define MDIO_REG_BANK_CL73_IEEEB0 0x0 +#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 +#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200 +#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000 +#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000 + +#define MDIO_REG_BANK_CL73_IEEEB1 0x10 +#define MDIO_CL73_IEEEB1_AN_ADV1 0x00 +#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400 +#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800 +#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00 +#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00 +#define MDIO_CL73_IEEEB1_AN_ADV2 0x01 +#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000 +#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020 +#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040 +#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080 +#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03 +#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400 +#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800 +#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00 +#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00 +#define MDIO_CL73_IEEEB1_AN_LP_ADV2 0x04 + +#define MDIO_REG_BANK_RX0 0x80b0 +#define MDIO_RX0_RX_STATUS 0x10 +#define MDIO_RX0_RX_STATUS_SIGDET 0x8000 +#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000 +#define MDIO_RX0_RX_EQ_BOOST 0x1c +#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 +#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_RX1 0x80c0 +#define MDIO_RX1_RX_EQ_BOOST 0x1c +#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 +#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_RX2 0x80d0 +#define MDIO_RX2_RX_EQ_BOOST 0x1c +#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 +#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_RX3 0x80e0 +#define MDIO_RX3_RX_EQ_BOOST 0x1c +#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 +#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_RX_ALL 0x80f0 +#define MDIO_RX_ALL_RX_EQ_BOOST 0x1c +#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 +#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_TX0 0x8060 +#define MDIO_TX0_TX_DRIVER 0x17 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12 +#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 +#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4 +#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e +#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1 +#define MDIO_TX0_TX_DRIVER_ICBUF1T 1 + +#define MDIO_REG_BANK_TX1 0x8070 +#define MDIO_TX1_TX_DRIVER 0x17 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12 +#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 +#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4 +#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e +#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1 +#define MDIO_TX0_TX_DRIVER_ICBUF1T 1 + +#define MDIO_REG_BANK_TX2 0x8080 +#define MDIO_TX2_TX_DRIVER 0x17 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12 +#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 +#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4 +#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e +#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1 +#define MDIO_TX0_TX_DRIVER_ICBUF1T 1 + +#define MDIO_REG_BANK_TX3 0x8090 +#define MDIO_TX3_TX_DRIVER 0x17 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12 +#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 +#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4 +#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e +#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1 +#define MDIO_TX0_TX_DRIVER_ICBUF1T 1 + +#define MDIO_REG_BANK_XGXS_BLOCK0 0x8000 +#define MDIO_BLOCK0_XGXS_CONTROL 0x10 + +#define MDIO_REG_BANK_XGXS_BLOCK1 0x8010 +#define MDIO_BLOCK1_LANE_CTRL0 0x15 +#define MDIO_BLOCK1_LANE_CTRL1 0x16 +#define MDIO_BLOCK1_LANE_CTRL2 0x17 +#define MDIO_BLOCK1_LANE_PRBS 0x19 + +#define MDIO_REG_BANK_XGXS_BLOCK2 0x8100 +#define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10 +#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000 +#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000 +#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11 +#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000 +#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14 +#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001 +#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010 +#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15 + +#define MDIO_REG_BANK_GP_STATUS 0x8120 +#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B +#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 0x3900 + + +#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1) + +#define MDIO_REG_BANK_SERDES_DIGITAL 0x8300 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002 +#define MDIO_SERDES_DIGITAL_MISC1 0x18 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009 + +#define MDIO_REG_BANK_OVER_1G 0x8320 +#define MDIO_OVER_1G_DIGCTL_3_4 0x14 +#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0 +#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5 +#define MDIO_OVER_1G_UP1 0x19 +#define MDIO_OVER_1G_UP1_2_5G 0x0001 +#define MDIO_OVER_1G_UP1_5G 0x0002 +#define MDIO_OVER_1G_UP1_6G 0x0004 +#define MDIO_OVER_1G_UP1_10G 0x0010 +#define MDIO_OVER_1G_UP1_10GH 0x0008 +#define MDIO_OVER_1G_UP1_12G 0x0020 +#define MDIO_OVER_1G_UP1_12_5G 0x0040 +#define MDIO_OVER_1G_UP1_13G 0x0080 +#define MDIO_OVER_1G_UP1_15G 0x0100 +#define MDIO_OVER_1G_UP1_16G 0x0200 +#define MDIO_OVER_1G_UP2 0x1A +#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007 +#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038 +#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0 +#define MDIO_OVER_1G_UP3 0x1B +#define MDIO_OVER_1G_UP3_HIGIG2 0x0001 +#define MDIO_OVER_1G_LP_UP1 0x1C +#define MDIO_OVER_1G_LP_UP2 0x1D +#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff +#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780 +#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7 +#define MDIO_OVER_1G_LP_UP3 0x1E + +#define MDIO_REG_BANK_REMOTE_PHY 0x8330 +#define MDIO_REMOTE_PHY_MISC_RX_STATUS 0x10 +#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG 0x0010 +#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG 0x0600 + +#define MDIO_REG_BANK_BAM_NEXT_PAGE 0x8350 +#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10 +#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001 +#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002 + +#define MDIO_REG_BANK_CL73_USERB0 0x8370 +#define MDIO_CL73_USERB0_CL73_UCTRL 0x10 +#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL 0x0002 +#define MDIO_CL73_USERB0_CL73_USTAT1 0x11 +#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK 0x0100 +#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37 0x0400 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001 + +#define MDIO_REG_BANK_AER_BLOCK 0xFFD0 +#define MDIO_AER_BLOCK_AER_REG 0x1E + +#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0 +#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10 +#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040 +#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000 +#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000 +#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040 +#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100 +#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200 +#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000 +#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000 +#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000 +#define MDIO_COMBO_IEEE0_MII_STATUS 0x11 +#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004 +#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE 0x0000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH 0x0180 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020 +/*WhenthelinkpartnerisinSGMIImode(bit0=1), then +bit15=link, bit12=duplex, bits11:10=speed, bit14=acknowledge. +Theotherbitsarereservedandshouldbezero*/ +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001 + + +#define MDIO_PMA_DEVAD 0x1 +/*ieee*/ +#define MDIO_PMA_REG_CTRL 0x0 +#define MDIO_PMA_REG_STATUS 0x1 +#define MDIO_PMA_REG_10G_CTRL2 0x7 +#define MDIO_PMA_REG_TX_DISABLE 0x0009 +#define MDIO_PMA_REG_RX_SD 0xa +/*bnx2x*/ +#define MDIO_PMA_REG_BNX2X_CTRL 0x0096 +#define MDIO_PMA_REG_FEC_CTRL 0x00ab +#define MDIO_PMA_LASI_RXCTRL 0x9000 +#define MDIO_PMA_LASI_TXCTRL 0x9001 +#define MDIO_PMA_LASI_CTRL 0x9002 +#define MDIO_PMA_LASI_RXSTAT 0x9003 +#define MDIO_PMA_LASI_TXSTAT 0x9004 +#define MDIO_PMA_LASI_STAT 0x9005 +#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800 +#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808 +#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809 +#define MDIO_PMA_REG_TX_POWER_DOWN 0xca02 +#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09 +#define MDIO_PMA_REG_MISC_CTRL 0xca0a +#define MDIO_PMA_REG_GEN_CTRL 0xca10 +#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188 +#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a +#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12 +#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13 +#define MDIO_PMA_REG_ROM_VER1 0xca19 +#define MDIO_PMA_REG_ROM_VER2 0xca1a +#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b +#define MDIO_PMA_REG_PLL_BANDWIDTH 0xca1d +#define MDIO_PMA_REG_PLL_CTRL 0xca1e +#define MDIO_PMA_REG_MISC_CTRL0 0xca23 +#define MDIO_PMA_REG_LRM_MODE 0xca3f +#define MDIO_PMA_REG_CDR_BANDWIDTH 0xca46 +#define MDIO_PMA_REG_MISC_CTRL1 0xca85 + +#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL 0x8000 +#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c +#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000 +#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004 +#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008 +#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c +#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT 0x8002 +#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR 0x8003 +#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF 0xc820 +#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff +#define MDIO_PMA_REG_8726_TX_CTRL1 0xca01 +#define MDIO_PMA_REG_8726_TX_CTRL2 0xca05 + +#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005 +#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007 +#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff +#define MDIO_PMA_REG_8727_MISC_CTRL 0x8309 +#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02 +#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05 +#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808 +#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e +#define MDIO_PMA_REG_8727_PCS_GP 0xc842 +#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4 + +#define MDIO_AN_REG_8727_MISC_CTRL 0x8309 +#define MDIO_PMA_REG_8073_CHIP_REV 0xc801 +#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820 +#define MDIO_PMA_REG_8073_XAUI_WA 0xc841 +#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08 + +#define MDIO_PMA_REG_7101_RESET 0xc000 +#define MDIO_PMA_REG_7107_LED_CNTL 0xc007 +#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009 +#define MDIO_PMA_REG_7101_VER1 0xc026 +#define MDIO_PMA_REG_7101_VER2 0xc027 + +#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811 +#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c +#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f +#define MDIO_PMA_REG_8481_LED3_MASK 0xa832 +#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834 +#define MDIO_PMA_REG_8481_LED5_MASK 0xa838 +#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835 +#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b +#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800 +#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11 + + +#define MDIO_WIS_DEVAD 0x2 +/*bnx2x*/ +#define MDIO_WIS_REG_LASI_CNTL 0x9002 +#define MDIO_WIS_REG_LASI_STATUS 0x9005 + +#define MDIO_PCS_DEVAD 0x3 +#define MDIO_PCS_REG_STATUS 0x0020 +#define MDIO_PCS_REG_LASI_STATUS 0x9005 +#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000 +#define MDIO_PCS_REG_7101_SPI_MUX 0xD008 +#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A +#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5) +#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A +#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6) +#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7) +#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2) +#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028 + + +#define MDIO_XS_DEVAD 0x4 +#define MDIO_XS_REG_STATUS 0x0001 +#define MDIO_XS_PLL_SEQUENCER 0x8000 +#define MDIO_XS_SFX7101_XGXS_TEST1 0xc00a + +#define MDIO_XS_8706_REG_BANK_RX0 0x80bc +#define MDIO_XS_8706_REG_BANK_RX1 0x80cc +#define MDIO_XS_8706_REG_BANK_RX2 0x80dc +#define MDIO_XS_8706_REG_BANK_RX3 0x80ec +#define MDIO_XS_8706_REG_BANK_RXA 0x80fc + +#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA + +#define MDIO_AN_DEVAD 0x7 +/*ieee*/ +#define MDIO_AN_REG_CTRL 0x0000 +#define MDIO_AN_REG_STATUS 0x0001 +#define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020 +#define MDIO_AN_REG_ADV_PAUSE 0x0010 +#define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400 +#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800 +#define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00 +#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00 +#define MDIO_AN_REG_ADV 0x0011 +#define MDIO_AN_REG_ADV2 0x0012 +#define MDIO_AN_REG_LP_AUTO_NEG 0x0013 +#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014 +#define MDIO_AN_REG_MASTER_STATUS 0x0021 +#define MDIO_AN_REG_EEE_ADV 0x003c +#define MDIO_AN_REG_LP_EEE_ADV 0x003d +/*bnx2x*/ +#define MDIO_AN_REG_LINK_STATUS 0x8304 +#define MDIO_AN_REG_CL37_CL73 0x8370 +#define MDIO_AN_REG_CL37_AN 0xffe0 +#define MDIO_AN_REG_CL37_FC_LD 0xffe4 +#define MDIO_AN_REG_CL37_FC_LP 0xffe5 +#define MDIO_AN_REG_1000T_STATUS 0xffea + +#define MDIO_AN_REG_8073_2_5G 0x8329 +#define MDIO_AN_REG_8073_BAM 0x8350 + +#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020 +#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0 +#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40 +#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1 +#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4 +#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6 +#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9 +#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL 0xfff0 +#define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF 0x0008 +#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5 +#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7 +#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8 +#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc + +/* BNX2X84823 only */ +#define MDIO_CTL_DEVAD 0x1e +#define MDIO_CTL_REG_84823_MEDIA 0x401a +#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018 + /* These pins configure the BNX2X84823 interface to MAC after reset. */ +#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008 +#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010 + /* These pins configure the BNX2X84823 interface to Line after reset. */ +#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060 +#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020 +#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040 + /* When this pin is active high during reset, 10GBASE-T core is power + * down, When it is active low the 10GBASE-T is power up + */ +#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080 +#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100 +#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000 +#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100 +#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000 +#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005 +#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080 +#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH 0xa82b +#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f +#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 +#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec +#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 + +/* BNX2X84833 only */ +#define MDIO_84833_TOP_CFG_FW_REV 0x400f +#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1 +#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81 +#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a +#define MDIO_84833_SUPER_ISOLATE 0x8000 +/* These are mailbox register set used by 84833. */ +#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005 +#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006 +#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007 +#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008 +#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009 +#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037 +#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038 +#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039 +#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a +#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b +#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c +#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0 +#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26 +#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27 +#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28 +#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29 +#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30 +#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31 + +/* Mailbox command set used by 84833. */ +#define PHY84833_CMD_SET_PAIR_SWAP 0x8001 +#define PHY84833_CMD_GET_EEE_MODE 0x8008 +#define PHY84833_CMD_SET_EEE_MODE 0x8009 +#define PHY84833_CMD_GET_CURRENT_TEMP 0x8031 +/* Mailbox status set used by 84833. */ +#define PHY84833_STATUS_CMD_RECEIVED 0x0001 +#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002 +#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004 +#define PHY84833_STATUS_CMD_COMPLETE_ERROR 0x0008 +#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS 0x0010 +#define PHY84833_STATUS_CMD_SYSTEM_BOOT 0x0020 +#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040 +#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 +#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 + + +/* Warpcore clause 45 addressing */ +#define MDIO_WC_DEVAD 0x3 +#define MDIO_WC_REG_IEEE0BLK_MIICNTL 0x0 +#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000 +#define MDIO_WC_REG_PCS_STATUS2 0x0021 +#define MDIO_WC_REG_PMD_KR_CONTROL 0x0096 +#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 +#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e +#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL0 0x8015 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL1 0x8016 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL2 0x8017 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL3 0x8018 +#define MDIO_WC_REG_XGXSBLK1_LANETEST0 0x801a +#define MDIO_WC_REG_TX0_ANA_CTRL0 0x8061 +#define MDIO_WC_REG_TX1_ANA_CTRL0 0x8071 +#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081 +#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091 +#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067 +#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04 +#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0 +#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08 +#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 +#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET 0x0c +#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK 0x7000 +#define MDIO_WC_REG_TX1_TX_DRIVER 0x8077 +#define MDIO_WC_REG_TX2_TX_DRIVER 0x8087 +#define MDIO_WC_REG_TX3_TX_DRIVER 0x8097 +#define MDIO_WC_REG_RX0_ANARXCONTROL1G 0x80b9 +#define MDIO_WC_REG_RX2_ANARXCONTROL1G 0x80d9 +#define MDIO_WC_REG_RX0_PCI_CTRL 0x80ba +#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca +#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da +#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea +#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 +#define MDIO_WC_REG_XGXS_STATUS3 0x8129 +#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 +#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131 +#define MDIO_WC_REG_XGXS_STATUS4 0x813c +#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141 +#define MDIO_WC_REG_XGXS_X2_CONTROL3 0x8142 +#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B +#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169 +#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0 +#define MDIO_WC_REG_GP2_STATUS_GP_2_1 0x81d1 +#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2 +#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1 +#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE +#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc +#define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE +#define MDIO_WC_REG_DSC1B0_UC_CTRL 0x820e +#define MDIO_WC_REG_DSC1B0_UC_CTRL_RDY4CMD (1<<7) +#define MDIO_WC_REG_DSC_SMC 0x8213 +#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e +#define MDIO_WC_REG_TX_FIR_TAP 0x82e2 +#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00 +#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f +#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04 +#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0 +#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a +#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00 +#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000 +#define MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP 0x82e2 +#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3 +#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6 +#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7 +#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL 0x82e8 +#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL 0x82ec +#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1 0x8300 +#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2 0x8301 +#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3 0x8302 +#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1 0x8304 +#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308 +#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309 +#define MDIO_WC_REG_DIGITAL3_UP1 0x8329 +#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c +#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c +#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e +#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345 +#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349 +#define MDIO_WC_REG_DIGITAL5_LINK_STATUS 0x834d +#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e +#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350 +#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368 +#define MDIO_WC_REG_CL73_USERB0_CTRL 0x8370 +#define MDIO_WC_REG_CL73_USERB0_USTAT 0x8371 +#define MDIO_WC_REG_CL73_BAM_CTRL1 0x8372 +#define MDIO_WC_REG_CL73_BAM_CTRL2 0x8373 +#define MDIO_WC_REG_CL73_BAM_CTRL3 0x8374 +#define MDIO_WC_REG_CL73_BAM_CODE_FIELD 0x837b +#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390 +#define MDIO_WC_REG_TX66_CONTROL 0x83b0 +#define MDIO_WC_REG_RX66_CONTROL 0x83c0 +#define MDIO_WC_REG_RX66_SCW0 0x83c2 +#define MDIO_WC_REG_RX66_SCW1 0x83c3 +#define MDIO_WC_REG_RX66_SCW2 0x83c4 +#define MDIO_WC_REG_RX66_SCW3 0x83c5 +#define MDIO_WC_REG_RX66_SCW0_MASK 0x83c6 +#define MDIO_WC_REG_RX66_SCW1_MASK 0x83c7 +#define MDIO_WC_REG_RX66_SCW2_MASK 0x83c8 +#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9 +#define MDIO_WC_REG_FX100_CTRL1 0x8400 +#define MDIO_WC_REG_FX100_CTRL3 0x8402 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL5 0x8436 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL6 0x8437 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL7 0x8438 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL9 0x8439 +#define MDIO_WC_REG_CL82_USERB1_RX_CTRL10 0x843a +#define MDIO_WC_REG_CL82_USERB1_RX_CTRL11 0x843b +#define MDIO_WC_REG_ETA_CL73_OUI1 0x8453 +#define MDIO_WC_REG_ETA_CL73_OUI2 0x8454 +#define MDIO_WC_REG_ETA_CL73_OUI3 0x8455 +#define MDIO_WC_REG_ETA_CL73_LD_BAM_CODE 0x8456 +#define MDIO_WC_REG_ETA_CL73_LD_UD_CODE 0x8457 +#define MDIO_WC_REG_MICROBLK_CMD 0xffc2 +#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5 +#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc + +#define MDIO_WC_REG_AERBLK_AER 0xffde +#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL 0xffe0 +#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1 + +#define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A +#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0 +#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4 + +#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141 + +#define DIGITAL5_ACTUAL_SPEED_TX_MASK 0x003f + +/* 54618se */ +#define MDIO_REG_GPHY_MII_STATUS 0x1 +#define MDIO_REG_GPHY_PHYID_LSB 0x3 +#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd +#define MDIO_REG_GPHY_CL45_REG_WRITE 0x4000 +#define MDIO_REG_GPHY_CL45_REG_READ 0xc000 +#define MDIO_REG_GPHY_CL45_DATA_REG 0xe +#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e +#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15 +#define MDIO_REG_GPHY_EXP_ACCESS 0x17 +#define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00 +#define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40 +#define MDIO_REG_GPHY_AUX_STATUS 0x19 +#define MDIO_REG_INTR_STATUS 0x1a +#define MDIO_REG_INTR_MASK 0x1b +#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1) +#define MDIO_REG_GPHY_SHADOW 0x1c +#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10) +#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10) +#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15) +#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10) +#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8) + + +#define IGU_FUNC_BASE 0x0400 + +#define IGU_ADDR_MSIX 0x0000 +#define IGU_ADDR_INT_ACK 0x0200 +#define IGU_ADDR_PROD_UPD 0x0201 +#define IGU_ADDR_ATTN_BITS_UPD 0x0202 +#define IGU_ADDR_ATTN_BITS_SET 0x0203 +#define IGU_ADDR_ATTN_BITS_CLR 0x0204 +#define IGU_ADDR_COALESCE_NOW 0x0205 +#define IGU_ADDR_SIMD_MASK 0x0206 +#define IGU_ADDR_SIMD_NOMASK 0x0207 +#define IGU_ADDR_MSI_CTL 0x0210 +#define IGU_ADDR_MSI_ADDR_LO 0x0211 +#define IGU_ADDR_MSI_ADDR_HI 0x0212 +#define IGU_ADDR_MSI_DATA 0x0213 + + +#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0 +#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1 +#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2 +#define IGU_USE_REGISTER_cstorm_type_1_sb_cleanup 3 + +#define COMMAND_REG_INT_ACK 0x0 +#define COMMAND_REG_PROD_UPD 0x4 +#define COMMAND_REG_ATTN_BITS_UPD 0x8 +#define COMMAND_REG_ATTN_BITS_SET 0xc +#define COMMAND_REG_ATTN_BITS_CLR 0x10 +#define COMMAND_REG_COALESCE_NOW 0x14 +#define COMMAND_REG_SIMD_MASK 0x18 +#define COMMAND_REG_SIMD_NOMASK 0x1c + + +#define IGU_MEM_BASE 0x0000 + +#define IGU_MEM_MSIX_BASE 0x0000 +#define IGU_MEM_MSIX_UPPER 0x007f +#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff + +#define IGU_MEM_PBA_MSIX_BASE 0x0200 +#define IGU_MEM_PBA_MSIX_UPPER 0x0200 + +#define IGU_CMD_BACKWARD_COMP_PROD_UPD 0x0201 +#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff + +#define IGU_CMD_INT_ACK_BASE 0x0400 +#define IGU_CMD_INT_ACK_UPPER \ + (IGU_CMD_INT_ACK_BASE + MAX_SB_PER_PATH - 1) +#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x04ff + +#define IGU_CMD_E2_PROD_UPD_BASE 0x0500 +#define IGU_CMD_E2_PROD_UPD_UPPER \ + (IGU_CMD_E2_PROD_UPD_BASE + MAX_SB_PER_PATH - 1) +#define IGU_CMD_E2_PROD_UPD_RESERVED_UPPER 0x059f + +#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05a0 +#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05a1 +#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05a2 + +#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05a3 +#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05a4 +#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05a5 +#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6 + + +#define IGU_REG_RESERVED_UPPER 0x05ff + +#define IGU_SEG_IDX_ATTN 2 +#define IGU_SEG_IDX_DEFAULT 1 +/* Fields of IGU PF CONFIGRATION REGISTER */ +#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */ +#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */ +#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */ +#define IGU_PF_CONF_ATTN_BIT_EN (0x1<<3) /* attention enable */ +#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */ +#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */ + +/* Fields of IGU VF CONFIGRATION REGISTER */ +#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */ +#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */ +#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */ +#define IGU_VF_CONF_PARENT_SHIFT 2 /* Parent PF */ +#define IGU_VF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */ + + +#define IGU_BC_DSB_NUM_SEGS 5 +#define IGU_BC_NDSB_NUM_SEGS 2 +#define IGU_NORM_DSB_NUM_SEGS 2 +#define IGU_NORM_NDSB_NUM_SEGS 1 +#define IGU_BC_BASE_DSB_PROD 128 +#define IGU_NORM_BASE_DSB_PROD 136 + + /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \ + [5:2] = 0; [1:0] = PF number) */ +#define IGU_FID_ENCODE_IS_PF (0x1<<6) +#define IGU_FID_ENCODE_IS_PF_SHIFT 6 +#define IGU_FID_VF_NUM_MASK (0x3f) +#define IGU_FID_PF_NUM_MASK (0x7) + +#define IGU_REG_MAPPING_MEMORY_VALID (1<<0) +#define IGU_REG_MAPPING_MEMORY_VECTOR_MASK (0x3F<<1) +#define IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT 1 +#define IGU_REG_MAPPING_MEMORY_FID_MASK (0x7F<<7) +#define IGU_REG_MAPPING_MEMORY_FID_SHIFT 7 + + +#define CDU_REGION_NUMBER_XCM_AG 2 +#define CDU_REGION_NUMBER_UCM_AG 4 + + +/* String-to-compress [31:8] = CID (all 24 bits) + * String-to-compress [7:4] = Region + * String-to-compress [3:0] = Type + */ +#define CDU_VALID_DATA(_cid, _region, _type) \ + (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf))) +#define CDU_CRC8(_cid, _region, _type) \ + (ecore_calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff)) +#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type) \ + (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f)) +#define CDU_RSRVD_VALUE_TYPE_B(_crc, _type) \ + (0x80 | ((_type)&0xf << 3) | ((CDU_CRC8(_cid, _region, _type)) & 0x7)) +#define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80) + +#endif /* ECORE_REG_H */ diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c new file mode 100644 index 00000000..e6fecd88 --- /dev/null +++ b/drivers/net/bnx2x/ecore_sp.c @@ -0,0 +1,5430 @@ +/*- + * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#include "bnx2x.h" +#include "ecore_init.h" + +/**** Exe Queue interfaces ****/ + +/** + * ecore_exe_queue_init - init the Exe Queue object + * + * @o: pointer to the object + * @exe_len: length + * @owner: pointer to the owner + * @validate: validate function pointer + * @optimize: optimize function pointer + * @exec: execute function pointer + * @get: get function pointer + */ +static void +ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused, + struct ecore_exe_queue_obj *o, + int exe_len, + union ecore_qable_obj *owner, + exe_q_validate validate, + exe_q_remove remove, + exe_q_optimize optimize, exe_q_execute exec, exe_q_get get) +{ + ECORE_MEMSET(o, 0, sizeof(*o)); + + ECORE_LIST_INIT(&o->exe_queue); + ECORE_LIST_INIT(&o->pending_comp); + + ECORE_SPIN_LOCK_INIT(&o->lock, sc); + + o->exe_chunk_len = exe_len; + o->owner = owner; + + /* Owner specific callbacks */ + o->validate = validate; + o->remove = remove; + o->optimize = optimize; + o->execute = exec; + o->get = get; + + ECORE_MSG("Setup the execution queue with the chunk length of %d", + exe_len); +} + +static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused, + struct ecore_exeq_elem *elem) +{ + ECORE_MSG("Deleting an exe_queue element"); + ECORE_FREE(sc, elem, sizeof(*elem)); +} + +static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o) +{ + struct ecore_exeq_elem *elem; + int cnt = 0; + + ECORE_SPIN_LOCK_BH(&o->lock); + + ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link, + struct ecore_exeq_elem) cnt++; + + ECORE_SPIN_UNLOCK_BH(&o->lock); + + return cnt; +} + +/** + * ecore_exe_queue_add - add a new element to the execution queue + * + * @sc: driver handle + * @o: queue + * @cmd: new command to add + * @restore: true - do not optimize the command + * + * If the element is optimized or is illegal, frees it. + */ +static int ecore_exe_queue_add(struct bnx2x_softc *sc, + struct ecore_exe_queue_obj *o, + struct ecore_exeq_elem *elem, int restore) +{ + int rc; + + ECORE_SPIN_LOCK_BH(&o->lock); + + if (!restore) { + /* Try to cancel this element queue */ + rc = o->optimize(sc, o->owner, elem); + if (rc) + goto free_and_exit; + + /* Check if this request is ok */ + rc = o->validate(sc, o->owner, elem); + if (rc) { + ECORE_MSG("Preamble failed: %d", rc); + goto free_and_exit; + } + } + + /* If so, add it to the execution queue */ + ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue); + + ECORE_SPIN_UNLOCK_BH(&o->lock); + + return ECORE_SUCCESS; + +free_and_exit: + ecore_exe_queue_free_elem(sc, elem); + + ECORE_SPIN_UNLOCK_BH(&o->lock); + + return rc; +} + +static void __ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, struct ecore_exe_queue_obj + *o) +{ + struct ecore_exeq_elem *elem; + + while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) { + elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp, + struct ecore_exeq_elem, link); + + ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp); + ecore_exe_queue_free_elem(sc, elem); + } +} + +static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, + struct ecore_exe_queue_obj *o) +{ + ECORE_SPIN_LOCK_BH(&o->lock); + + __ecore_exe_queue_reset_pending(sc, o); + + ECORE_SPIN_UNLOCK_BH(&o->lock); +} + +/** + * ecore_exe_queue_step - execute one execution chunk atomically + * + * @sc: driver handle + * @o: queue + * @ramrod_flags: flags + * + * (Should be called while holding the exe_queue->lock). + */ +static int ecore_exe_queue_step(struct bnx2x_softc *sc, + struct ecore_exe_queue_obj *o, + unsigned long *ramrod_flags) +{ + struct ecore_exeq_elem *elem, spacer; + int cur_len = 0, rc; + + ECORE_MEMSET(&spacer, 0, sizeof(spacer)); + + /* Next step should not be performed until the current is finished, + * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to + * properly clear object internals without sending any command to the FW + * which also implies there won't be any completion to clear the + * 'pending' list. + */ + if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) { + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { + ECORE_MSG + ("RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list"); + __ecore_exe_queue_reset_pending(sc, o); + } else { + return ECORE_PENDING; + } + } + + /* Run through the pending commands list and create a next + * execution chunk. + */ + while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) { + elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue, + struct ecore_exeq_elem, link); + ECORE_DBG_BREAK_IF(!elem->cmd_len); + + if (cur_len + elem->cmd_len <= o->exe_chunk_len) { + cur_len += elem->cmd_len; + /* Prevent from both lists being empty when moving an + * element. This will allow the call of + * ecore_exe_queue_empty() without locking. + */ + ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp); + mb(); + ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue); + ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp); + ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp); + } else + break; + } + + /* Sanity check */ + if (!cur_len) + return ECORE_SUCCESS; + + rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags); + if (rc < 0) + /* In case of an error return the commands back to the queue + * and reset the pending_comp. + */ + ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue); + else if (!rc) + /* If zero is returned, means there are no outstanding pending + * completions and we may dismiss the pending list. + */ + __ecore_exe_queue_reset_pending(sc, o); + + return rc; +} + +static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj *o) +{ + int empty = ECORE_LIST_IS_EMPTY(&o->exe_queue); + + /* Don't reorder!!! */ + mb(); + + return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp); +} + +static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct + bnx2x_softc *sc + __rte_unused) +{ + ECORE_MSG("Allocating a new exe_queue element"); + return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc); +} + +/************************ raw_obj functions ***********************************/ +static int ecore_raw_check_pending(struct ecore_raw_obj *o) +{ + /* + * !! converts the value returned by ECORE_TEST_BIT such that it + * is guaranteed not to be truncated regardless of int definition. + * + * Note we cannot simply define the function's return value type + * to match the type returned by ECORE_TEST_BIT, as it varies by + * platform/implementation. + */ + + return ! !ECORE_TEST_BIT(o->state, o->pstate); +} + +static void ecore_raw_clear_pending(struct ecore_raw_obj *o) +{ + ECORE_SMP_MB_BEFORE_CLEAR_BIT(); + ECORE_CLEAR_BIT(o->state, o->pstate); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); +} + +static void ecore_raw_set_pending(struct ecore_raw_obj *o) +{ + ECORE_SMP_MB_BEFORE_CLEAR_BIT(); + ECORE_SET_BIT(o->state, o->pstate); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); +} + +/** + * ecore_state_wait - wait until the given bit(state) is cleared + * + * @sc: device handle + * @state: state which is to be cleared + * @state_p: state buffer + * + */ +static int ecore_state_wait(struct bnx2x_softc *sc, int state, + unsigned long *pstate) +{ + /* can take a while if any port is running */ + int cnt = 5000; + + if (CHIP_REV_IS_EMUL(sc)) + cnt *= 20; + + ECORE_MSG("waiting for state to become %d", state); + + ECORE_MIGHT_SLEEP(); + while (cnt--) { + bnx2x_intr_legacy(sc, 1); + if (!ECORE_TEST_BIT(state, pstate)) { +#ifdef ECORE_STOP_ON_ERROR + ECORE_MSG("exit (cnt %d)", 5000 - cnt); +#endif + return ECORE_SUCCESS; + } + + ECORE_WAIT(sc, delay_us); + + if (sc->panic) + return ECORE_IO; + } + + /* timeout! */ + PMD_DRV_LOG(ERR, "timeout waiting for state %d", state); +#ifdef ECORE_STOP_ON_ERROR + ecore_panic(); +#endif + + return ECORE_TIMEOUT; +} + +static int ecore_raw_wait(struct bnx2x_softc *sc, struct ecore_raw_obj *raw) +{ + return ecore_state_wait(sc, raw->state, raw->pstate); +} + +/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ +/* credit handling callbacks */ +static int ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset) +{ + struct ecore_credit_pool_obj *mp = o->macs_pool; + + ECORE_DBG_BREAK_IF(!mp); + + return mp->get_entry(mp, offset); +} + +static int ecore_get_credit_mac(struct ecore_vlan_mac_obj *o) +{ + struct ecore_credit_pool_obj *mp = o->macs_pool; + + ECORE_DBG_BREAK_IF(!mp); + + return mp->get(mp, 1); +} + +static int ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset) +{ + struct ecore_credit_pool_obj *mp = o->macs_pool; + + return mp->put_entry(mp, offset); +} + +static int ecore_put_credit_mac(struct ecore_vlan_mac_obj *o) +{ + struct ecore_credit_pool_obj *mp = o->macs_pool; + + return mp->put(mp, 1); +} + +/** + * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac + * head list. + * + * @sc: device handle + * @o: vlan_mac object + * + * @details: Non-blocking implementation; should be called under execution + * queue lock. + */ +static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused, + struct ecore_vlan_mac_obj *o) +{ + if (o->head_reader) { + ECORE_MSG("vlan_mac_lock writer - There are readers; Busy"); + return ECORE_BUSY; + } + + ECORE_MSG("vlan_mac_lock writer - Taken"); + return ECORE_SUCCESS; +} + +/** + * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step + * which wasn't able to run due to a taken lock on vlan mac head list. + * + * @sc: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock; notice it might release + * and reclaim it during its run. + */ +static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o) +{ + int rc; + unsigned long ramrod_flags = o->saved_ramrod_flags; + + ECORE_MSG("vlan_mac_lock execute pending command with ramrod flags %lu", + ramrod_flags); + o->head_exe_request = FALSE; + o->saved_ramrod_flags = 0; + rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags); + if (rc != ECORE_SUCCESS) { + PMD_DRV_LOG(ERR, + "execution of pending commands failed with rc %d", + rc); +#ifdef ECORE_STOP_ON_ERROR + ecore_panic(); +#endif + } +} + +/** + * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been + * called due to vlan mac head list lock being taken. + * + * @sc: device handle + * @o: vlan_mac object + * @ramrod_flags: ramrod flags of missed execution + * + * @details Should be called under execution queue lock. + */ +static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused, + struct ecore_vlan_mac_obj *o, + unsigned long ramrod_flags) +{ + o->head_exe_request = TRUE; + o->saved_ramrod_flags = ramrod_flags; + ECORE_MSG("Placing pending execution with ramrod flags %lu", + ramrod_flags); +} + +/** + * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock + * + * @sc: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock. Notice if a pending + * execution exists, it would perform it - possibly releasing and + * reclaiming the execution queue lock. + */ +static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o) +{ + /* It's possible a new pending execution was added since this writer + * executed. If so, execute again. [Ad infinitum] + */ + while (o->head_exe_request) { + ECORE_MSG + ("vlan_mac_lock - writer release encountered a pending request"); + __ecore_vlan_mac_h_exec_pending(sc, o); + } +} + +/** + * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock + * + * @sc: device handle + * @o: vlan_mac object + * + * @details Notice if a pending execution exists, it would perform it - + * possibly releasing and reclaiming the execution queue lock. + */ +void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o) +{ + ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); + __ecore_vlan_mac_h_write_unlock(sc, o); + ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); +} + +/** + * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock + * + * @sc: device handle + * @o: vlan_mac object + * + * @details Should be called under the execution queue lock. May sleep. May + * release and reclaim execution queue lock during its run. + */ +static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused, + struct ecore_vlan_mac_obj *o) +{ + /* If we got here, we're holding lock --> no WRITER exists */ + o->head_reader++; + ECORE_MSG("vlan_mac_lock - locked reader - number %d", o->head_reader); + + return ECORE_SUCCESS; +} + +/** + * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock + * + * @sc: device handle + * @o: vlan_mac object + * + * @details May sleep. Claims and releases execution queue lock during its run. + */ +static int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o) +{ + int rc; + + ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); + rc = __ecore_vlan_mac_h_read_lock(sc, o); + ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); + + return rc; +} + +/** + * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @sc: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock. Notice if a pending + * execution exists, it would be performed if this was the last + * reader. possibly releasing and reclaiming the execution queue lock. + */ +static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o) +{ + if (!o->head_reader) { + PMD_DRV_LOG(ERR, + "Need to release vlan mac reader lock, but lock isn't taken"); +#ifdef ECORE_STOP_ON_ERROR + ecore_panic(); +#endif + } else { + o->head_reader--; + PMD_DRV_LOG(INFO, + "vlan_mac_lock - decreased readers to %d", + o->head_reader); + } + + /* It's possible a new pending execution was added, and that this reader + * was last - if so we need to execute the command. + */ + if (!o->head_reader && o->head_exe_request) { + PMD_DRV_LOG(INFO, + "vlan_mac_lock - reader release encountered a pending request"); + + /* Writer release will do the trick */ + __ecore_vlan_mac_h_write_unlock(sc, o); + } +} + +/** + * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @sc: device handle + * @o: vlan_mac object + * + * @details Notice if a pending execution exists, it would be performed if this + * was the last reader. Claims and releases the execution queue lock + * during its run. + */ +void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o) +{ + ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); + __ecore_vlan_mac_h_read_unlock(sc, o); + ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); +} + +/** + * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @sc: device handle + * @o: vlan_mac object + * @n: number of elements to get + * @base: base address for element placement + * @stride: stride between elements (in bytes) + */ +static int ecore_get_n_elements(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, int n, + uint8_t * base, uint8_t stride, uint8_t size) +{ + struct ecore_vlan_mac_registry_elem *pos; + uint8_t *next = base; + int counter = 0, read_lock; + + ECORE_MSG("get_n_elements - taking vlan_mac_lock (reader)"); + read_lock = ecore_vlan_mac_h_read_lock(sc, o); + if (read_lock != ECORE_SUCCESS) + PMD_DRV_LOG(ERR, + "get_n_elements failed to get vlan mac reader lock; Access without lock"); + + /* traverse list */ + ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, + struct ecore_vlan_mac_registry_elem) { + if (counter < n) { + ECORE_MEMCPY(next, &pos->u, size); + counter++; + ECORE_MSG + ("copied element number %d to address %p element was:", + counter, next); + next += stride + size; + } + } + + if (read_lock == ECORE_SUCCESS) { + ECORE_MSG("get_n_elements - releasing vlan_mac_lock (reader)"); + ecore_vlan_mac_h_read_unlock(sc, o); + } + + return counter * ETH_ALEN; +} + +/* check_add() callbacks */ +static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused, + struct ecore_vlan_mac_obj *o, + union ecore_classification_ramrod_data *data) +{ + struct ecore_vlan_mac_registry_elem *pos; + + ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command", + data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], + data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]); + + if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac)) + return ECORE_INVAL; + + /* Check if a requested MAC already exists */ + ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, + struct ecore_vlan_mac_registry_elem) + if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) && + (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) + return ECORE_EXISTS; + + return ECORE_SUCCESS; +} + +/* check_del() callbacks */ +static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_softc + *sc + __rte_unused, + struct + ecore_vlan_mac_obj + *o, union + ecore_classification_ramrod_data + *data) +{ + struct ecore_vlan_mac_registry_elem *pos; + + ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command", + data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], + data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]); + + ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, + struct ecore_vlan_mac_registry_elem) + if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) && + (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) + return pos; + + return NULL; +} + +/* check_move() callback */ +static int ecore_check_move(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *src_o, + struct ecore_vlan_mac_obj *dst_o, + union ecore_classification_ramrod_data *data) +{ + struct ecore_vlan_mac_registry_elem *pos; + int rc; + + /* Check if we can delete the requested configuration from the first + * object. + */ + pos = src_o->check_del(sc, src_o, data); + + /* check if configuration can be added */ + rc = dst_o->check_add(sc, dst_o, data); + + /* If this classification can not be added (is already set) + * or can't be deleted - return an error. + */ + if (rc || !pos) + return FALSE; + + return TRUE; +} + +static int ecore_check_move_always_err(__rte_unused struct bnx2x_softc *sc, + __rte_unused struct ecore_vlan_mac_obj + *src_o, __rte_unused struct ecore_vlan_mac_obj + *dst_o, __rte_unused union + ecore_classification_ramrod_data *data) +{ + return FALSE; +} + +static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj + *o) +{ + struct ecore_raw_obj *raw = &o->raw; + uint8_t rx_tx_flag = 0; + + if ((raw->obj_type == ECORE_OBJ_TYPE_TX) || + (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; + + if ((raw->obj_type == ECORE_OBJ_TYPE_RX) || + (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; + + return rx_tx_flag; +} + +static void ecore_set_mac_in_nig(struct bnx2x_softc *sc, + int add, unsigned char *dev_addr, int index) +{ + uint32_t wb_data[2]; + uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM : + NIG_REG_LLH0_FUNC_MEM; + + if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc)) + return; + + if (index > ECORE_LLH_CAM_MAX_PF_LINE) + return; + + ECORE_MSG("Going to %s LLH configuration at entry %d", + (add ? "ADD" : "DELETE"), index); + + if (add) { + /* LLH_FUNC_MEM is a uint64_t WB register */ + reg_offset += 8 * index; + + wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | + (dev_addr[4] << 8) | dev_addr[5]); + wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); + + ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2); + } + + REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : + NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4 * index, add); +} + +/** + * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod + * + * @sc: device handle + * @o: queue for which we want to configure this rule + * @add: if TRUE the command is an ADD command, DEL otherwise + * @opcode: CLASSIFY_RULE_OPCODE_XXX + * @hdr: pointer to a header to setup + * + */ +static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj *o, + int add, int opcode, + struct eth_classify_cmd_header + *hdr) +{ + struct ecore_raw_obj *raw = &o->raw; + + hdr->client_id = raw->cl_id; + hdr->func_id = raw->func_id; + + /* Rx or/and Tx (internal switching) configuration ? */ + hdr->cmd_general_data |= ecore_vlan_mac_get_rx_tx_flag(o); + + if (add) + hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; + + hdr->cmd_general_data |= + (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); +} + +/** + * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header + * + * @cid: connection id + * @type: ECORE_FILTER_XXX_PENDING + * @hdr: pointer to header to setup + * @rule_cnt: + * + * currently we always configure one rule and echo field to contain a CID and an + * opcode type. + */ +static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, struct eth_classify_header + *hdr, int rule_cnt) +{ + hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) | + (type << ECORE_SWCID_SHIFT)); + hdr->rule_cnt = (uint8_t) rule_cnt; +} + +/* hw_config() callbacks */ +static void ecore_set_one_mac_e2(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + struct ecore_exeq_elem *elem, int rule_idx, + __rte_unused int cam_offset) +{ + struct ecore_raw_obj *raw = &o->raw; + struct eth_classify_rules_ramrod_data *data = + (struct eth_classify_rules_ramrod_data *)(raw->rdata); + int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; + union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; + int add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE; + unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; + uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac; + + /* Set LLH CAM entry: currently only iSCSI and ETH macs are + * relevant. In addition, current implementation is tuned for a + * single ETH MAC. + * + * When multiple unicast ETH MACs PF configuration in switch + * independent mode is required (NetQ, multiple netdev MACs, + * etc.), consider better utilisation of 8 per function MAC + * entries in the LLH register. There is also + * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the + * total number of CAM entries to 16. + * + * Currently we won't configure NIG for MACs other than a primary ETH + * MAC and iSCSI L2 MAC. + * + * If this MAC is moving from one Queue to another, no need to change + * NIG configuration. + */ + if (cmd != ECORE_VLAN_MAC_MOVE) { + if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags)) + ecore_set_mac_in_nig(sc, add, mac, + ECORE_LLH_CAM_ISCSI_ETH_LINE); + else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags)) + ecore_set_mac_in_nig(sc, add, mac, + ECORE_LLH_CAM_ETH_LINE); + } + + /* Reset the ramrod data buffer for the first rule */ + if (rule_idx == 0) + ECORE_MEMSET(data, 0, sizeof(*data)); + + /* Setup a command header */ + ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC, + &rule_entry->mac.header); + + ECORE_MSG("About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d", + (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], + mac[4], mac[5], raw->cl_id); + + /* Set a MAC itself */ + ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb, + &rule_entry->mac.mac_mid, + &rule_entry->mac.mac_lsb, mac); + rule_entry->mac.inner_mac = elem->cmd_data.vlan_mac.u.mac.is_inner_mac; + + /* MOVE: Add a rule that will add this MAC to the target Queue */ + if (cmd == ECORE_VLAN_MAC_MOVE) { + rule_entry++; + rule_cnt++; + + /* Setup ramrod data */ + ecore_vlan_mac_set_cmd_hdr_e2(elem->cmd_data. + vlan_mac.target_obj, TRUE, + CLASSIFY_RULE_OPCODE_MAC, + &rule_entry->mac.header); + + /* Set a MAC itself */ + ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb, + &rule_entry->mac.mac_mid, + &rule_entry->mac.mac_lsb, mac); + rule_entry->mac.inner_mac = + elem->cmd_data.vlan_mac.u.mac.is_inner_mac; + } + + /* Set the ramrod data header */ + ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, + rule_cnt); +} + +/** + * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod + * + * @sc: device handle + * @o: queue + * @type: + * @cam_offset: offset in cam memory + * @hdr: pointer to a header to setup + * + * E1H + */ +static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj + *o, int type, int cam_offset, struct mac_configuration_hdr + *hdr) +{ + struct ecore_raw_obj *r = &o->raw; + + hdr->length = 1; + hdr->offset = (uint8_t) cam_offset; + hdr->client_id = ECORE_CPU_TO_LE16(0xff); + hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | + (type << ECORE_SWCID_SHIFT)); +} + +static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj + *o, int add, int opcode, + uint8_t * mac, + uint16_t vlan_id, struct + mac_configuration_entry + *cfg_entry) +{ + struct ecore_raw_obj *r = &o->raw; + uint32_t cl_bit_vec = (1 << r->cl_id); + + cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec); + cfg_entry->pf_id = r->func_id; + cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id); + + if (add) { + ECORE_SET_FLAG(cfg_entry->flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_SET); + ECORE_SET_FLAG(cfg_entry->flags, + MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, + opcode); + + /* Set a MAC in a ramrod data */ + ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr, + &cfg_entry->middle_mac_addr, + &cfg_entry->lsb_mac_addr, mac); + } else + ECORE_SET_FLAG(cfg_entry->flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_INVALIDATE); +} + +static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc + __rte_unused, + struct ecore_vlan_mac_obj *o, + int type, int cam_offset, + int add, uint8_t * mac, + uint16_t vlan_id, int opcode, + struct mac_configuration_cmd + *config) +{ + struct mac_configuration_entry *cfg_entry = &config->config_table[0]; + + ecore_vlan_mac_set_rdata_hdr_e1x(o, type, cam_offset, &config->hdr); + ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id, + cfg_entry); + + ECORE_MSG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d", + (add ? "setting" : "clearing"), + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], + o->raw.cl_id, cam_offset); +} + +/** + * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data + * + * @sc: device handle + * @o: ecore_vlan_mac_obj + * @elem: ecore_exeq_elem + * @rule_idx: rule_idx + * @cam_offset: cam_offset + */ +static void ecore_set_one_mac_e1x(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + struct ecore_exeq_elem *elem, + __rte_unused int rule_idx, int cam_offset) +{ + struct ecore_raw_obj *raw = &o->raw; + struct mac_configuration_cmd *config = + (struct mac_configuration_cmd *)(raw->rdata); + /* 57711 do not support MOVE command, + * so it's either ADD or DEL + */ + int add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? + TRUE : FALSE; + + /* Reset the ramrod data buffer */ + ECORE_MEMSET(config, 0, sizeof(*config)); + + ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state, + cam_offset, add, + elem->cmd_data.vlan_mac.u.mac.mac, 0, + ETH_VLAN_FILTER_ANY_VLAN, config); +} + +/** + * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element + * + * @sc: device handle + * @p: command parameters + * @ppos: pointer to the cookie + * + * reconfigure next MAC/VLAN/VLAN-MAC element from the + * previously configured elements list. + * + * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken + * into an account + * + * pointer to the cookie - that should be given back in the next call to make + * function handle the next element. If *ppos is set to NULL it will restart the + * iterator. If returned *ppos == NULL this means that the last element has been + * handled. + * + */ +static int ecore_vlan_mac_restore(struct bnx2x_softc *sc, + struct ecore_vlan_mac_ramrod_params *p, + struct ecore_vlan_mac_registry_elem **ppos) +{ + struct ecore_vlan_mac_registry_elem *pos; + struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; + + /* If list is empty - there is nothing to do here */ + if (ECORE_LIST_IS_EMPTY(&o->head)) { + *ppos = NULL; + return 0; + } + + /* make a step... */ + if (*ppos == NULL) + *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, struct + ecore_vlan_mac_registry_elem, + link); + else + *ppos = ECORE_LIST_NEXT(*ppos, link, + struct ecore_vlan_mac_registry_elem); + + pos = *ppos; + + /* If it's the last step - return NULL */ + if (ECORE_LIST_IS_LAST(&pos->link, &o->head)) + *ppos = NULL; + + /* Prepare a 'user_req' */ + ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u)); + + /* Set the command */ + p->user_req.cmd = ECORE_VLAN_MAC_ADD; + + /* Set vlan_mac_flags */ + p->user_req.vlan_mac_flags = pos->vlan_mac_flags; + + /* Set a restore bit */ + ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags); + + return ecore_config_vlan_mac(sc, p); +} + +/* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a + * pointer to an element with a specific criteria and NULL if such an element + * hasn't been found. + */ +static struct ecore_exeq_elem *ecore_exeq_get_mac(struct ecore_exe_queue_obj *o, + struct ecore_exeq_elem *elem) +{ + struct ecore_exeq_elem *pos; + struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; + + /* Check pending for execution commands */ + ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link, + struct ecore_exeq_elem) + if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data, + sizeof(*data)) && + (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) + return pos; + + return NULL; +} + +/** + * ecore_validate_vlan_mac_add - check if an ADD command can be executed + * + * @sc: device handle + * @qo: ecore_qable_obj + * @elem: ecore_exeq_elem + * + * Checks that the requested configuration can be added. If yes and if + * requested, consume CAM credit. + * + * The 'validate' is run after the 'optimize'. + * + */ +static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc, + union ecore_qable_obj *qo, + struct ecore_exeq_elem *elem) +{ + struct ecore_vlan_mac_obj *o = &qo->vlan_mac; + struct ecore_exe_queue_obj *exeq = &o->exe_queue; + int rc; + + /* Check the registry */ + rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u); + if (rc) { + ECORE_MSG + ("ADD command is not allowed considering current registry state."); + return rc; + } + + /* Check if there is a pending ADD command for this + * MAC/VLAN/VLAN-MAC. Return an error if there is. + */ + if (exeq->get(exeq, elem)) { + ECORE_MSG("There is a pending ADD command already"); + return ECORE_EXISTS; + } + + /* Consume the credit if not requested not to */ + if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + o->get_credit(o))) + return ECORE_INVAL; + + return ECORE_SUCCESS; +} + +/** + * ecore_validate_vlan_mac_del - check if the DEL command can be executed + * + * @sc: device handle + * @qo: quable object to check + * @elem: element that needs to be deleted + * + * Checks that the requested configuration can be deleted. If yes and if + * requested, returns a CAM credit. + * + * The 'validate' is run after the 'optimize'. + */ +static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc, + union ecore_qable_obj *qo, + struct ecore_exeq_elem *elem) +{ + struct ecore_vlan_mac_obj *o = &qo->vlan_mac; + struct ecore_vlan_mac_registry_elem *pos; + struct ecore_exe_queue_obj *exeq = &o->exe_queue; + struct ecore_exeq_elem query_elem; + + /* If this classification can not be deleted (doesn't exist) + * - return a ECORE_EXIST. + */ + pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u); + if (!pos) { + ECORE_MSG + ("DEL command is not allowed considering current registry state"); + return ECORE_EXISTS; + } + + /* Check if there are pending DEL or MOVE commands for this + * MAC/VLAN/VLAN-MAC. Return an error if so. + */ + ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem)); + + /* Check for MOVE commands */ + query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE; + if (exeq->get(exeq, &query_elem)) { + PMD_DRV_LOG(ERR, "There is a pending MOVE command already"); + return ECORE_INVAL; + } + + /* Check for DEL commands */ + if (exeq->get(exeq, elem)) { + ECORE_MSG("There is a pending DEL command already"); + return ECORE_EXISTS; + } + + /* Return the credit to the credit pool if not requested not to */ + if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + o->put_credit(o))) { + PMD_DRV_LOG(ERR, "Failed to return a credit"); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +/** + * ecore_validate_vlan_mac_move - check if the MOVE command can be executed + * + * @sc: device handle + * @qo: quable object to check (source) + * @elem: element that needs to be moved + * + * Checks that the requested configuration can be moved. If yes and if + * requested, returns a CAM credit. + * + * The 'validate' is run after the 'optimize'. + */ +static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc, + union ecore_qable_obj *qo, + struct ecore_exeq_elem *elem) +{ + struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac; + struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; + struct ecore_exeq_elem query_elem; + struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue; + struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue; + + /* Check if we can perform this operation based on the current registry + * state. + */ + if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) { + ECORE_MSG + ("MOVE command is not allowed considering current registry state"); + return ECORE_INVAL; + } + + /* Check if there is an already pending DEL or MOVE command for the + * source object or ADD command for a destination object. Return an + * error if so. + */ + ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem)); + + /* Check DEL on source */ + query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL; + if (src_exeq->get(src_exeq, &query_elem)) { + PMD_DRV_LOG(ERR, + "There is a pending DEL command on the source queue already"); + return ECORE_INVAL; + } + + /* Check MOVE on source */ + if (src_exeq->get(src_exeq, elem)) { + ECORE_MSG("There is a pending MOVE command already"); + return ECORE_EXISTS; + } + + /* Check ADD on destination */ + query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD; + if (dest_exeq->get(dest_exeq, &query_elem)) { + PMD_DRV_LOG(ERR, + "There is a pending ADD command on the destination queue already"); + return ECORE_INVAL; + } + + /* Consume the credit if not requested not to */ + if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + dest_o->get_credit(dest_o))) + return ECORE_INVAL; + + if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + src_o->put_credit(src_o))) { + /* return the credit taken from dest... */ + dest_o->put_credit(dest_o); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +static int ecore_validate_vlan_mac(struct bnx2x_softc *sc, + union ecore_qable_obj *qo, + struct ecore_exeq_elem *elem) +{ + switch (elem->cmd_data.vlan_mac.cmd) { + case ECORE_VLAN_MAC_ADD: + return ecore_validate_vlan_mac_add(sc, qo, elem); + case ECORE_VLAN_MAC_DEL: + return ecore_validate_vlan_mac_del(sc, qo, elem); + case ECORE_VLAN_MAC_MOVE: + return ecore_validate_vlan_mac_move(sc, qo, elem); + default: + return ECORE_INVAL; + } +} + +static int ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc *sc, + union ecore_qable_obj *qo, + struct ecore_exeq_elem *elem) +{ + int rc = 0; + + /* If consumption wasn't required, nothing to do */ + if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags)) + return ECORE_SUCCESS; + + switch (elem->cmd_data.vlan_mac.cmd) { + case ECORE_VLAN_MAC_ADD: + case ECORE_VLAN_MAC_MOVE: + rc = qo->vlan_mac.put_credit(&qo->vlan_mac); + break; + case ECORE_VLAN_MAC_DEL: + rc = qo->vlan_mac.get_credit(&qo->vlan_mac); + break; + default: + return ECORE_INVAL; + } + + if (rc != TRUE) + return ECORE_INVAL; + + return ECORE_SUCCESS; +} + +/** + * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes. + * + * @sc: device handle + * @o: ecore_vlan_mac_obj + * + */ +static int ecore_wait_vlan_mac(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o) +{ + int cnt = 5000, rc; + struct ecore_exe_queue_obj *exeq = &o->exe_queue; + struct ecore_raw_obj *raw = &o->raw; + + while (cnt--) { + /* Wait for the current command to complete */ + rc = raw->wait_comp(sc, raw); + if (rc) + return rc; + + /* Wait until there are no pending commands */ + if (!ecore_exe_queue_empty(exeq)) + ECORE_WAIT(sc, 1000); + else + return ECORE_SUCCESS; + } + + return ECORE_TIMEOUT; +} + +static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + unsigned long *ramrod_flags) +{ + int rc = ECORE_SUCCESS; + + ECORE_SPIN_LOCK_BH(&o->exe_queue.lock); + + ECORE_MSG("vlan_mac_execute_step - trying to take writer lock"); + rc = __ecore_vlan_mac_h_write_trylock(sc, o); + + if (rc != ECORE_SUCCESS) { + __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags); + + /** Calling function should not diffrentiate between this case + * and the case in which there is already a pending ramrod + */ + rc = ECORE_PENDING; + } else { + rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags); + } + ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock); + + return rc; +} + +/** + * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod + * + * @sc: device handle + * @o: ecore_vlan_mac_obj + * @cqe: + * @cont: if TRUE schedule next execution chunk + * + */ +static int ecore_complete_vlan_mac(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + union event_ring_elem *cqe, + unsigned long *ramrod_flags) +{ + struct ecore_raw_obj *r = &o->raw; + int rc; + + /* Reset pending list */ + ecore_exe_queue_reset_pending(sc, &o->exe_queue); + + /* Clear pending */ + r->clear_pending(r); + + /* If ramrod failed this is most likely a SW bug */ + if (cqe->message.error) + return ECORE_INVAL; + + /* Run the next bulk of pending commands if requested */ + if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) { + rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags); + if (rc < 0) + return rc; + } + + /* If there is more work to do return PENDING */ + if (!ecore_exe_queue_empty(&o->exe_queue)) + return ECORE_PENDING; + + return ECORE_SUCCESS; +} + +/** + * ecore_optimize_vlan_mac - optimize ADD and DEL commands. + * + * @sc: device handle + * @o: ecore_qable_obj + * @elem: ecore_exeq_elem + */ +static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc, + union ecore_qable_obj *qo, + struct ecore_exeq_elem *elem) +{ + struct ecore_exeq_elem query, *pos; + struct ecore_vlan_mac_obj *o = &qo->vlan_mac; + struct ecore_exe_queue_obj *exeq = &o->exe_queue; + + ECORE_MEMCPY(&query, elem, sizeof(query)); + + switch (elem->cmd_data.vlan_mac.cmd) { + case ECORE_VLAN_MAC_ADD: + query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL; + break; + case ECORE_VLAN_MAC_DEL: + query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD; + break; + default: + /* Don't handle anything other than ADD or DEL */ + return 0; + } + + /* If we found the appropriate element - delete it */ + pos = exeq->get(exeq, &query); + if (pos) { + + /* Return the credit of the optimized command */ + if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT, + &pos->cmd_data.vlan_mac.vlan_mac_flags)) { + if ((query.cmd_data.vlan_mac.cmd == + ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) { + PMD_DRV_LOG(ERR, + "Failed to return the credit for the optimized ADD command"); + return ECORE_INVAL; + } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ + PMD_DRV_LOG(ERR, + "Failed to recover the credit from the optimized DEL command"); + return ECORE_INVAL; + } + } + + ECORE_MSG("Optimizing %s command", + (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ? + "ADD" : "DEL"); + + ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue); + ecore_exe_queue_free_elem(sc, pos); + return 1; + } + + return 0; +} + +/** + * ecore_vlan_mac_get_registry_elem - prepare a registry element + * + * @sc: device handle + * @o: + * @elem: + * @restore: + * @re: + * + * prepare a registry element according to the current command request. + */ +static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + struct ecore_exeq_elem *elem, + int restore, struct + ecore_vlan_mac_registry_elem + **re) +{ + enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; + struct ecore_vlan_mac_registry_elem *reg_elem; + + /* Allocate a new registry element if needed. */ + if (!restore && + ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) { + reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc); + if (!reg_elem) + return ECORE_NOMEM; + + /* Get a new CAM offset */ + if (!o->get_cam_offset(o, ®_elem->cam_offset)) { + /* This shall never happen, because we have checked the + * CAM availability in the 'validate'. + */ + ECORE_DBG_BREAK_IF(1); + ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); + return ECORE_INVAL; + } + + ECORE_MSG("Got cam offset %d", reg_elem->cam_offset); + + /* Set a VLAN-MAC data */ + ECORE_MEMCPY(®_elem->u, &elem->cmd_data.vlan_mac.u, + sizeof(reg_elem->u)); + + /* Copy the flags (needed for DEL and RESTORE flows) */ + reg_elem->vlan_mac_flags = + elem->cmd_data.vlan_mac.vlan_mac_flags; + } else /* DEL, RESTORE */ + reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u); + + *re = reg_elem; + return ECORE_SUCCESS; +} + +/** + * ecore_execute_vlan_mac - execute vlan mac command + * + * @sc: device handle + * @qo: + * @exe_chunk: + * @ramrod_flags: + * + * go and send a ramrod! + */ +static int ecore_execute_vlan_mac(struct bnx2x_softc *sc, + union ecore_qable_obj *qo, + ecore_list_t * exe_chunk, + unsigned long *ramrod_flags) +{ + struct ecore_exeq_elem *elem; + struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; + struct ecore_raw_obj *r = &o->raw; + int rc, idx = 0; + int restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags); + int drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags); + struct ecore_vlan_mac_registry_elem *reg_elem; + enum ecore_vlan_mac_cmd cmd; + + /* If DRIVER_ONLY execution is requested, cleanup a registry + * and exit. Otherwise send a ramrod to FW. + */ + if (!drv_only) { + + /* Set pending */ + r->set_pending(r); + + /* Fill the ramrod data */ + ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, + struct ecore_exeq_elem) { + cmd = elem->cmd_data.vlan_mac.cmd; + /* We will add to the target object in MOVE command, so + * change the object for a CAM search. + */ + if (cmd == ECORE_VLAN_MAC_MOVE) + cam_obj = elem->cmd_data.vlan_mac.target_obj; + else + cam_obj = o; + + rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj, + elem, restore, + ®_elem); + if (rc) + goto error_exit; + + ECORE_DBG_BREAK_IF(!reg_elem); + + /* Push a new entry into the registry */ + if (!restore && + ((cmd == ECORE_VLAN_MAC_ADD) || + (cmd == ECORE_VLAN_MAC_MOVE))) + ECORE_LIST_PUSH_HEAD(®_elem->link, + &cam_obj->head); + + /* Configure a single command in a ramrod data buffer */ + o->set_one_rule(sc, o, elem, idx, reg_elem->cam_offset); + + /* MOVE command consumes 2 entries in the ramrod data */ + if (cmd == ECORE_VLAN_MAC_MOVE) + idx += 2; + else + idx++; + } + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid, + r->rdata_mapping, ETH_CONNECTION_TYPE); + if (rc) + goto error_exit; + } + + /* Now, when we are done with the ramrod - clean up the registry */ + ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) { + cmd = elem->cmd_data.vlan_mac.cmd; + if ((cmd == ECORE_VLAN_MAC_DEL) || (cmd == ECORE_VLAN_MAC_MOVE)) { + reg_elem = o->check_del(sc, o, + &elem->cmd_data.vlan_mac.u); + + ECORE_DBG_BREAK_IF(!reg_elem); + + o->put_cam_offset(o, reg_elem->cam_offset); + ECORE_LIST_REMOVE_ENTRY(®_elem->link, &o->head); + ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); + } + } + + if (!drv_only) + return ECORE_PENDING; + else + return ECORE_SUCCESS; + +error_exit: + r->clear_pending(r); + + /* Cleanup a registry in case of a failure */ + ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) { + cmd = elem->cmd_data.vlan_mac.cmd; + + if (cmd == ECORE_VLAN_MAC_MOVE) + cam_obj = elem->cmd_data.vlan_mac.target_obj; + else + cam_obj = o; + + /* Delete all newly added above entries */ + if (!restore && + ((cmd == ECORE_VLAN_MAC_ADD) || + (cmd == ECORE_VLAN_MAC_MOVE))) { + reg_elem = o->check_del(sc, cam_obj, + &elem->cmd_data.vlan_mac.u); + if (reg_elem) { + ECORE_LIST_REMOVE_ENTRY(®_elem->link, + &cam_obj->head); + ECORE_FREE(sc, reg_elem, sizeof(*reg_elem)); + } + } + } + + return rc; +} + +static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc *sc, struct + ecore_vlan_mac_ramrod_params *p) +{ + struct ecore_exeq_elem *elem; + struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; + int restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags); + + /* Allocate the execution queue element */ + elem = ecore_exe_queue_alloc_elem(sc); + if (!elem) + return ECORE_NOMEM; + + /* Set the command 'length' */ + switch (p->user_req.cmd) { + case ECORE_VLAN_MAC_MOVE: + elem->cmd_len = 2; + break; + default: + elem->cmd_len = 1; + } + + /* Fill the object specific info */ + ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req, + sizeof(p->user_req)); + + /* Try to add a new command to the pending list */ + return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore); +} + +/** + * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. + * + * @sc: device handle + * @p: + * + */ +int ecore_config_vlan_mac(struct bnx2x_softc *sc, + struct ecore_vlan_mac_ramrod_params *p) +{ + int rc = ECORE_SUCCESS; + struct ecore_vlan_mac_obj *o = p->vlan_mac_obj; + unsigned long *ramrod_flags = &p->ramrod_flags; + int cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags); + struct ecore_raw_obj *raw = &o->raw; + + /* + * Add new elements to the execution list for commands that require it. + */ + if (!cont) { + rc = ecore_vlan_mac_push_new_cmd(sc, p); + if (rc) + return rc; + } + + /* If nothing will be executed further in this iteration we want to + * return PENDING if there are pending commands + */ + if (!ecore_exe_queue_empty(&o->exe_queue)) + rc = ECORE_PENDING; + + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { + ECORE_MSG + ("RAMROD_DRV_CLR_ONLY requested: clearing a pending bit."); + raw->clear_pending(raw); + } + + /* Execute commands if required */ + if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) || + ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) { + rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj, + &p->ramrod_flags); + if (rc < 0) + return rc; + } + + /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set + * then user want to wait until the last command is done. + */ + if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) { + /* Wait maximum for the current exe_queue length iterations plus + * one (for the current pending command). + */ + int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1; + + while (!ecore_exe_queue_empty(&o->exe_queue) && + max_iterations--) { + + /* Wait for the current command to complete */ + rc = raw->wait_comp(sc, raw); + if (rc) + return rc; + + /* Make a next step */ + rc = __ecore_vlan_mac_execute_step(sc, + p->vlan_mac_obj, + &p->ramrod_flags); + if (rc < 0) + return rc; + } + + return ECORE_SUCCESS; + } + + return rc; +} + +/** + * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec + * + * @sc: device handle + * @o: + * @vlan_mac_flags: + * @ramrod_flags: execution flags to be used for this deletion + * + * if the last operation has completed successfully and there are no + * more elements left, positive value if the last operation has completed + * successfully and there are more previously configured elements, negative + * value is current operation has failed. + */ +static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + unsigned long *vlan_mac_flags, + unsigned long *ramrod_flags) +{ + struct ecore_vlan_mac_registry_elem *pos = NULL; + int rc = 0, read_lock; + struct ecore_vlan_mac_ramrod_params p; + struct ecore_exe_queue_obj *exeq = &o->exe_queue; + struct ecore_exeq_elem *exeq_pos, *exeq_pos_n; + + /* Clear pending commands first */ + + ECORE_SPIN_LOCK_BH(&exeq->lock); + + ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n, + &exeq->exe_queue, link, + struct ecore_exeq_elem) { + if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == + *vlan_mac_flags) { + rc = exeq->remove(sc, exeq->owner, exeq_pos); + if (rc) { + PMD_DRV_LOG(ERR, "Failed to remove command"); + ECORE_SPIN_UNLOCK_BH(&exeq->lock); + return rc; + } + ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link, + &exeq->exe_queue); + ecore_exe_queue_free_elem(sc, exeq_pos); + } + } + + ECORE_SPIN_UNLOCK_BH(&exeq->lock); + + /* Prepare a command request */ + ECORE_MEMSET(&p, 0, sizeof(p)); + p.vlan_mac_obj = o; + p.ramrod_flags = *ramrod_flags; + p.user_req.cmd = ECORE_VLAN_MAC_DEL; + + /* Add all but the last VLAN-MAC to the execution queue without actually + * execution anything. + */ + ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags); + ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags); + ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags); + + ECORE_MSG("vlan_mac_del_all -- taking vlan_mac_lock (reader)"); + read_lock = ecore_vlan_mac_h_read_lock(sc, o); + if (read_lock != ECORE_SUCCESS) + return read_lock; + + ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link, + struct ecore_vlan_mac_registry_elem) { + if (pos->vlan_mac_flags == *vlan_mac_flags) { + p.user_req.vlan_mac_flags = pos->vlan_mac_flags; + ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u)); + rc = ecore_config_vlan_mac(sc, &p); + if (rc < 0) { + PMD_DRV_LOG(ERR, + "Failed to add a new DEL command"); + ecore_vlan_mac_h_read_unlock(sc, o); + return rc; + } + } + } + + ECORE_MSG("vlan_mac_del_all -- releasing vlan_mac_lock (reader)"); + ecore_vlan_mac_h_read_unlock(sc, o); + + p.ramrod_flags = *ramrod_flags; + ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags); + + return ecore_config_vlan_mac(sc, &p); +} + +static void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id, + uint32_t cid, uint8_t func_id, + void *rdata, + ecore_dma_addr_t rdata_mapping, int state, + unsigned long *pstate, ecore_obj_type type) +{ + raw->func_id = func_id; + raw->cid = cid; + raw->cl_id = cl_id; + raw->rdata = rdata; + raw->rdata_mapping = rdata_mapping; + raw->state = state; + raw->pstate = pstate; + raw->obj_type = type; + raw->check_pending = ecore_raw_check_pending; + raw->clear_pending = ecore_raw_clear_pending; + raw->set_pending = ecore_raw_set_pending; + raw->wait_comp = ecore_raw_wait; +} + +static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o, + uint8_t cl_id, uint32_t cid, + uint8_t func_id, void *rdata, + ecore_dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + ecore_obj_type type, + struct ecore_credit_pool_obj + *macs_pool, struct ecore_credit_pool_obj + *vlans_pool) +{ + ECORE_LIST_INIT(&o->head); + o->head_reader = 0; + o->head_exe_request = FALSE; + o->saved_ramrod_flags = 0; + + o->macs_pool = macs_pool; + o->vlans_pool = vlans_pool; + + o->delete_all = ecore_vlan_mac_del_all; + o->restore = ecore_vlan_mac_restore; + o->complete = ecore_complete_vlan_mac; + o->wait = ecore_wait_vlan_mac; + + ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, + state, pstate, type); +} + +void ecore_init_mac_obj(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *mac_obj, + uint8_t cl_id, uint32_t cid, uint8_t func_id, + void *rdata, ecore_dma_addr_t rdata_mapping, int state, + unsigned long *pstate, ecore_obj_type type, + struct ecore_credit_pool_obj *macs_pool) +{ + union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj; + + ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type, + macs_pool, NULL); + + /* CAM credit pool handling */ + mac_obj->get_credit = ecore_get_credit_mac; + mac_obj->put_credit = ecore_put_credit_mac; + mac_obj->get_cam_offset = ecore_get_cam_offset_mac; + mac_obj->put_cam_offset = ecore_put_cam_offset_mac; + + if (CHIP_IS_E1x(sc)) { + mac_obj->set_one_rule = ecore_set_one_mac_e1x; + mac_obj->check_del = ecore_check_mac_del; + mac_obj->check_add = ecore_check_mac_add; + mac_obj->check_move = ecore_check_move_always_err; + mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; + + /* Exe Queue */ + ecore_exe_queue_init(sc, + &mac_obj->exe_queue, 1, qable_obj, + ecore_validate_vlan_mac, + ecore_remove_vlan_mac, + ecore_optimize_vlan_mac, + ecore_execute_vlan_mac, + ecore_exeq_get_mac); + } else { + mac_obj->set_one_rule = ecore_set_one_mac_e2; + mac_obj->check_del = ecore_check_mac_del; + mac_obj->check_add = ecore_check_mac_add; + mac_obj->check_move = ecore_check_move; + mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + mac_obj->get_n_elements = ecore_get_n_elements; + + /* Exe Queue */ + ecore_exe_queue_init(sc, + &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, + qable_obj, ecore_validate_vlan_mac, + ecore_remove_vlan_mac, + ecore_optimize_vlan_mac, + ecore_execute_vlan_mac, + ecore_exeq_get_mac); + } +} + +/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ +static void __storm_memset_mac_filters(struct bnx2x_softc *sc, struct + tstorm_eth_mac_filter_config + *mac_filters, uint16_t pf_id) +{ + size_t size = sizeof(struct tstorm_eth_mac_filter_config); + + uint32_t addr = BAR_TSTRORM_INTMEM + + TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); + + ecore_storm_memset_struct(sc, addr, size, (uint32_t *) mac_filters); +} + +static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc, + struct ecore_rx_mode_ramrod_params *p) +{ + /* update the sc MAC filter structure */ + uint32_t mask = (1 << p->cl_id); + + struct tstorm_eth_mac_filter_config *mac_filters = + (struct tstorm_eth_mac_filter_config *)p->rdata; + + /* initial setting is drop-all */ + uint8_t drop_all_ucast = 1, drop_all_mcast = 1; + uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; + uint8_t unmatched_unicast = 0; + + /* In e1x there we only take into account rx accept flag since tx switching + * isn't enabled. */ + if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags)) + /* accept matched ucast */ + drop_all_ucast = 0; + + if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags)) + /* accept matched mcast */ + drop_all_mcast = 0; + + if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { + /* accept all mcast */ + drop_all_ucast = 0; + accp_all_ucast = 1; + } + if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { + /* accept all mcast */ + drop_all_mcast = 0; + accp_all_mcast = 1; + } + if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags)) + /* accept (all) bcast */ + accp_all_bcast = 1; + if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags)) + /* accept unmatched unicasts */ + unmatched_unicast = 1; + + mac_filters->ucast_drop_all = drop_all_ucast ? + mac_filters->ucast_drop_all | mask : + mac_filters->ucast_drop_all & ~mask; + + mac_filters->mcast_drop_all = drop_all_mcast ? + mac_filters->mcast_drop_all | mask : + mac_filters->mcast_drop_all & ~mask; + + mac_filters->ucast_accept_all = accp_all_ucast ? + mac_filters->ucast_accept_all | mask : + mac_filters->ucast_accept_all & ~mask; + + mac_filters->mcast_accept_all = accp_all_mcast ? + mac_filters->mcast_accept_all | mask : + mac_filters->mcast_accept_all & ~mask; + + mac_filters->bcast_accept_all = accp_all_bcast ? + mac_filters->bcast_accept_all | mask : + mac_filters->bcast_accept_all & ~mask; + + mac_filters->unmatched_unicast = unmatched_unicast ? + mac_filters->unmatched_unicast | mask : + mac_filters->unmatched_unicast & ~mask; + + ECORE_MSG("drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x" + "accp_mcast 0x%xaccp_bcast 0x%x", + mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, + mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, + mac_filters->bcast_accept_all); + + /* write the MAC filter structure */ + __storm_memset_mac_filters(sc, mac_filters, p->func_id); + + /* The operation is completed */ + ECORE_CLEAR_BIT(p->state, p->pstate); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); + + return ECORE_SUCCESS; +} + +/* Setup ramrod data */ +static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, struct eth_classify_header + *hdr, uint8_t rule_cnt) +{ + hdr->echo = ECORE_CPU_TO_LE32(cid); + hdr->rule_cnt = rule_cnt; +} + +static void ecore_rx_mode_set_cmd_state_e2(unsigned long *accept_flags, struct eth_filter_rules_cmd + *cmd, int clear_accept_all) +{ + uint16_t state; + + /* start with 'drop-all' */ + state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | + ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + + if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags)) + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + + if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags)) + state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + + if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) { + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; + } + + if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) { + state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + } + if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags)) + state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; + + if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) { + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; + } + if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags)) + state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; + + /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ + if (clear_accept_all) { + state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; + } + + cmd->state = ECORE_CPU_TO_LE16(state); +} + +static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc, + struct ecore_rx_mode_ramrod_params *p) +{ + struct eth_filter_rules_ramrod_data *data = p->rdata; + int rc; + uint8_t rule_idx = 0; + + /* Reset the ramrod data buffer */ + ECORE_MEMSET(data, 0, sizeof(*data)); + + /* Setup ramrod data */ + + /* Tx (internal switching) */ + if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = p->cl_id; + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_TX_CMD; + + ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags, + &(data->rules[rule_idx++]), + FALSE); + } + + /* Rx */ + if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = p->cl_id; + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_RX_CMD; + + ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags, + &(data->rules[rule_idx++]), + FALSE); + } + + /* If FCoE Queue configuration has been requested configure the Rx and + * internal switching modes for this queue in separate rules. + * + * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: + * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. + */ + if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { + /* Tx (internal switching) */ + if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc); + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_TX_CMD; + + ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags, + &(data->rules + [rule_idx++]), TRUE); + } + + /* Rx */ + if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc); + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_RX_CMD; + + ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags, + &(data->rules + [rule_idx++]), TRUE); + } + } + + /* Set the ramrod header (most importantly - number of rules to + * configure). + */ + ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); + + ECORE_MSG + ("About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx", + data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags); + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + /* Send a ramrod */ + rc = ecore_sp_post(sc, + RAMROD_CMD_ID_ETH_FILTER_RULES, + p->cid, p->rdata_mapping, ETH_CONNECTION_TYPE); + if (rc) + return rc; + + /* Ramrod completion is pending */ + return ECORE_PENDING; +} + +static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc *sc, + struct ecore_rx_mode_ramrod_params *p) +{ + return ecore_state_wait(sc, p->state, p->pstate); +} + +static int ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc *sc, + __rte_unused struct + ecore_rx_mode_ramrod_params *p) +{ + /* Do nothing */ + return ECORE_SUCCESS; +} + +int ecore_config_rx_mode(struct bnx2x_softc *sc, + struct ecore_rx_mode_ramrod_params *p) +{ + int rc; + + /* Configure the new classification in the chip */ + if (p->rx_mode_obj->config_rx_mode) { + rc = p->rx_mode_obj->config_rx_mode(sc, p); + if (rc < 0) + return rc; + + /* Wait for a ramrod completion if was requested */ + if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) { + rc = p->rx_mode_obj->wait_comp(sc, p); + if (rc) + return rc; + } + } else { + ECORE_MSG("ERROR: config_rx_mode is NULL"); + return -1; + } + + return rc; +} + +void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, struct ecore_rx_mode_obj *o) +{ + if (CHIP_IS_E1x(sc)) { + o->wait_comp = ecore_empty_rx_mode_wait; + o->config_rx_mode = ecore_set_rx_mode_e1x; + } else { + o->wait_comp = ecore_wait_rx_mode_comp_e2; + o->config_rx_mode = ecore_set_rx_mode_e2; + } +} + +/********************* Multicast verbs: SET, CLEAR ****************************/ +static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac) +{ + return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff; +} + +struct ecore_mcast_mac_elem { + ecore_list_entry_t link; + uint8_t mac[ETH_ALEN]; + uint8_t pad[2]; /* For a natural alignment of the following buffer */ +}; + +struct ecore_pending_mcast_cmd { + ecore_list_entry_t link; + int type; /* ECORE_MCAST_CMD_X */ + union { + ecore_list_t macs_head; + uint32_t macs_num; /* Needed for DEL command */ + int next_bin; /* Needed for RESTORE flow with aprox match */ + } data; + + int done; /* set to TRUE, when the command has been handled, + * practically used in 57712 handling only, where one pending + * command may be handled in a few operations. As long as for + * other chips every operation handling is completed in a + * single ramrod, there is no need to utilize this field. + */ +}; + +static int ecore_mcast_wait(struct bnx2x_softc *sc, struct ecore_mcast_obj *o) +{ + if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) || + o->raw.wait_comp(sc, &o->raw)) + return ECORE_TIMEOUT; + + return ECORE_SUCCESS; +} + +static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused, + struct ecore_mcast_obj *o, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd) +{ + int total_sz; + struct ecore_pending_mcast_cmd *new_cmd; + struct ecore_mcast_mac_elem *cur_mac = NULL; + struct ecore_mcast_list_elem *pos; + int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ? + p->mcast_list_len : 0); + + /* If the command is empty ("handle pending commands only"), break */ + if (!p->mcast_list_len) + return ECORE_SUCCESS; + + total_sz = sizeof(*new_cmd) + + macs_list_len * sizeof(struct ecore_mcast_mac_elem); + + /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ + new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc); + + if (!new_cmd) + return ECORE_NOMEM; + + ECORE_MSG("About to enqueue a new %d command. macs_list_len=%d", + cmd, macs_list_len); + + ECORE_LIST_INIT(&new_cmd->data.macs_head); + + new_cmd->type = cmd; + new_cmd->done = FALSE; + + switch (cmd) { + case ECORE_MCAST_CMD_ADD: + cur_mac = (struct ecore_mcast_mac_elem *) + ((uint8_t *) new_cmd + sizeof(*new_cmd)); + + /* Push the MACs of the current command into the pending command + * MACs list: FIFO + */ + ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link, + struct ecore_mcast_list_elem) { + ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN); + ECORE_LIST_PUSH_TAIL(&cur_mac->link, + &new_cmd->data.macs_head); + cur_mac++; + } + + break; + + case ECORE_MCAST_CMD_DEL: + new_cmd->data.macs_num = p->mcast_list_len; + break; + + case ECORE_MCAST_CMD_RESTORE: + new_cmd->data.next_bin = 0; + break; + + default: + ECORE_FREE(sc, new_cmd, total_sz); + PMD_DRV_LOG(ERR, "Unknown command: %d", cmd); + return ECORE_INVAL; + } + + /* Push the new pending command to the tail of the pending list: FIFO */ + ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head); + + o->set_sched(o); + + return ECORE_PENDING; +} + +/** + * ecore_mcast_get_next_bin - get the next set bin (index) + * + * @o: + * @last: index to start looking from (including) + * + * returns the next found (set) bin or a negative value if none is found. + */ +static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last) +{ + int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; + + for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) { + if (o->registry.aprox_match.vec[i]) + for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { + int cur_bit = j + BIT_VEC64_ELEM_SZ * i; + if (BIT_VEC64_TEST_BIT + (o->registry.aprox_match.vec, cur_bit)) { + return cur_bit; + } + } + inner_start = 0; + } + + /* None found */ + return -1; +} + +/** + * ecore_mcast_clear_first_bin - find the first set bin and clear it + * + * @o: + * + * returns the index of the found bin or -1 if none is found + */ +static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o) +{ + int cur_bit = ecore_mcast_get_next_bin(o, 0); + + if (cur_bit >= 0) + BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); + + return cur_bit; +} + +static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o) +{ + struct ecore_raw_obj *raw = &o->raw; + uint8_t rx_tx_flag = 0; + + if ((raw->obj_type == ECORE_OBJ_TYPE_TX) || + (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; + + if ((raw->obj_type == ECORE_OBJ_TYPE_RX) || + (raw->obj_type == ECORE_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; + + return rx_tx_flag; +} + +static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused, + struct ecore_mcast_obj *o, int idx, + union ecore_mcast_config_data *cfg_data, + enum ecore_mcast_cmd cmd) +{ + struct ecore_raw_obj *r = &o->raw; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(r->rdata); + uint8_t func_id = r->func_id; + uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o); + int bin; + + if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) + rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; + + data->rules[idx].cmd_general_data |= rx_tx_add_flag; + + /* Get a bin and update a bins' vector */ + switch (cmd) { + case ECORE_MCAST_CMD_ADD: + bin = ecore_mcast_bin_from_mac(cfg_data->mac); + BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); + break; + + case ECORE_MCAST_CMD_DEL: + /* If there were no more bins to clear + * (ecore_mcast_clear_first_bin() returns -1) then we would + * clear any (0xff) bin. + * See ecore_mcast_validate_e2() for explanation when it may + * happen. + */ + bin = ecore_mcast_clear_first_bin(o); + break; + + case ECORE_MCAST_CMD_RESTORE: + bin = cfg_data->bin; + break; + + default: + PMD_DRV_LOG(ERR, "Unknown command: %d", cmd); + return; + } + + ECORE_MSG("%s bin %d", + ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? + "Setting" : "Clearing"), bin); + + data->rules[idx].bin_id = (uint8_t) bin; + data->rules[idx].func_id = func_id; + data->rules[idx].engine_id = o->engine_id; +} + +/** + * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry + * + * @sc: device handle + * @o: + * @start_bin: index in the registry to start from (including) + * @rdata_idx: index in the ramrod data to start from + * + * returns last handled bin index or -1 if all bins have been handled + */ +static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc, + struct ecore_mcast_obj *o, + int start_bin, int *rdata_idx) +{ + int cur_bin, cnt = *rdata_idx; + union ecore_mcast_config_data cfg_data = { NULL }; + + /* go through the registry and configure the bins from it */ + for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0; + cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) { + + cfg_data.bin = (uint8_t) cur_bin; + o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_RESTORE); + + cnt++; + + ECORE_MSG("About to configure a bin %d", cur_bin); + + /* Break if we reached the maximum number + * of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *rdata_idx = cnt; + + return cur_bin; +} + +static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc, + struct ecore_mcast_obj *o, + struct ecore_pending_mcast_cmd + *cmd_pos, int *line_idx) +{ + struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n; + int cnt = *line_idx; + union ecore_mcast_config_data cfg_data = { NULL }; + + ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n, + &cmd_pos->data.macs_head, link, + struct ecore_mcast_mac_elem) { + + cfg_data.mac = &pmac_pos->mac[0]; + o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type); + + cnt++; + + ECORE_MSG + ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC", + pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], + pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]); + + ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link, + &cmd_pos->data.macs_head); + + /* Break if we reached the maximum number + * of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *line_idx = cnt; + + /* if no more MACs to configure - we are done */ + if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head)) + cmd_pos->done = TRUE; +} + +static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc, + struct ecore_mcast_obj *o, + struct ecore_pending_mcast_cmd + *cmd_pos, int *line_idx) +{ + int cnt = *line_idx; + + while (cmd_pos->data.macs_num) { + o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type); + + cnt++; + + cmd_pos->data.macs_num--; + + ECORE_MSG("Deleting MAC. %d left,cnt is %d", + cmd_pos->data.macs_num, cnt); + + /* Break if we reached the maximum + * number of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *line_idx = cnt; + + /* If we cleared all bins - we are done */ + if (!cmd_pos->data.macs_num) + cmd_pos->done = TRUE; +} + +static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc *sc, + struct ecore_mcast_obj *o, struct + ecore_pending_mcast_cmd + *cmd_pos, int *line_idx) +{ + cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin, + line_idx); + + if (cmd_pos->data.next_bin < 0) + /* If o->set_restore returned -1 we are done */ + cmd_pos->done = TRUE; + else + /* Start from the next bin next time */ + cmd_pos->data.next_bin++; +} + +static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct + ecore_mcast_ramrod_params + *p) +{ + struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n; + int cnt = 0; + struct ecore_mcast_obj *o = p->mcast_obj; + + ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n, + &o->pending_cmds_head, link, + struct ecore_pending_mcast_cmd) { + switch (cmd_pos->type) { + case ECORE_MCAST_CMD_ADD: + ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt); + break; + + case ECORE_MCAST_CMD_DEL: + ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt); + break; + + case ECORE_MCAST_CMD_RESTORE: + ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos, + &cnt); + break; + + default: + PMD_DRV_LOG(ERR, "Unknown command: %d", cmd_pos->type); + return ECORE_INVAL; + } + + /* If the command has been completed - remove it from the list + * and free the memory + */ + if (cmd_pos->done) { + ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, + &o->pending_cmds_head); + ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len); + } + + /* Break if we reached the maximum number of rules */ + if (cnt >= o->max_cmd_len) + break; + } + + return cnt; +} + +static void ecore_mcast_hdl_add(struct bnx2x_softc *sc, + struct ecore_mcast_obj *o, + struct ecore_mcast_ramrod_params *p, + int *line_idx) +{ + struct ecore_mcast_list_elem *mlist_pos; + union ecore_mcast_config_data cfg_data = { NULL }; + int cnt = *line_idx; + + ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link, + struct ecore_mcast_list_elem) { + cfg_data.mac = mlist_pos->mac; + o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD); + + cnt++; + + ECORE_MSG + ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC", + mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], + mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]); + } + + *line_idx = cnt; +} + +static void ecore_mcast_hdl_del(struct bnx2x_softc *sc, + struct ecore_mcast_obj *o, + struct ecore_mcast_ramrod_params *p, + int *line_idx) +{ + int cnt = *line_idx, i; + + for (i = 0; i < p->mcast_list_len; i++) { + o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL); + + cnt++; + + ECORE_MSG("Deleting MAC. %d left", p->mcast_list_len - i - 1); + } + + *line_idx = cnt; +} + +/** + * ecore_mcast_handle_current_cmd - + * + * @sc: device handle + * @p: + * @cmd: + * @start_cnt: first line in the ramrod data that may be used + * + * This function is called iff there is enough place for the current command in + * the ramrod data. + * Returns number of lines filled in the ramrod data in total. + */ +static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct + ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd, + int start_cnt) +{ + struct ecore_mcast_obj *o = p->mcast_obj; + int cnt = start_cnt; + + ECORE_MSG("p->mcast_list_len=%d", p->mcast_list_len); + + switch (cmd) { + case ECORE_MCAST_CMD_ADD: + ecore_mcast_hdl_add(sc, o, p, &cnt); + break; + + case ECORE_MCAST_CMD_DEL: + ecore_mcast_hdl_del(sc, o, p, &cnt); + break; + + case ECORE_MCAST_CMD_RESTORE: + o->hdl_restore(sc, o, 0, &cnt); + break; + + default: + PMD_DRV_LOG(ERR, "Unknown command: %d", cmd); + return ECORE_INVAL; + } + + /* The current command has been handled */ + p->mcast_list_len = 0; + + return cnt; +} + +static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd) +{ + struct ecore_mcast_obj *o = p->mcast_obj; + int reg_sz = o->get_registry_size(o); + + switch (cmd) { + /* DEL command deletes all currently configured MACs */ + case ECORE_MCAST_CMD_DEL: + o->set_registry_size(o, 0); + /* Don't break */ + + /* RESTORE command will restore the entire multicast configuration */ + case ECORE_MCAST_CMD_RESTORE: + /* Here we set the approximate amount of work to do, which in + * fact may be only less as some MACs in postponed ADD + * command(s) scheduled before this command may fall into + * the same bin and the actual number of bins set in the + * registry would be less than we estimated here. See + * ecore_mcast_set_one_rule_e2() for further details. + */ + p->mcast_list_len = reg_sz; + break; + + case ECORE_MCAST_CMD_ADD: + case ECORE_MCAST_CMD_CONT: + /* Here we assume that all new MACs will fall into new bins. + * However we will correct the real registry size after we + * handle all pending commands. + */ + o->set_registry_size(o, reg_sz + p->mcast_list_len); + break; + + default: + PMD_DRV_LOG(ERR, "Unknown command: %d", cmd); + return ECORE_INVAL; + } + + /* Increase the total number of MACs pending to be configured */ + o->total_pending_num += p->mcast_list_len; + + return ECORE_SUCCESS; +} + +static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + int old_num_bins) +{ + struct ecore_mcast_obj *o = p->mcast_obj; + + o->set_registry_size(o, old_num_bins); + o->total_pending_num -= p->mcast_list_len; +} + +/** + * ecore_mcast_set_rdata_hdr_e2 - sets a header values + * + * @sc: device handle + * @p: + * @len: number of rules to handle + */ +static void ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc + *sc, struct ecore_mcast_ramrod_params + *p, uint8_t len) +{ + struct ecore_raw_obj *r = &p->mcast_obj->raw; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(r->rdata); + + data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | + (ECORE_FILTER_MCAST_PENDING << + ECORE_SWCID_SHIFT)); + data->header.rule_cnt = len; +} + +/** + * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins + * + * @sc: device handle + * @o: + * + * Recalculate the actual number of set bins in the registry using Brian + * Kernighan's algorithm: it's execution complexity is as a number of set bins. + */ +static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o) +{ + int i, cnt = 0; + uint64_t elem; + + for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) { + elem = o->registry.aprox_match.vec[i]; + for (; elem; cnt++) + elem &= elem - 1; + } + + o->set_registry_size(o, cnt); + + return ECORE_SUCCESS; +} + +static int ecore_mcast_setup_e2(struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd) +{ + struct ecore_raw_obj *raw = &p->mcast_obj->raw; + struct ecore_mcast_obj *o = p->mcast_obj; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(raw->rdata); + int cnt = 0, rc; + + /* Reset the ramrod data buffer */ + ECORE_MEMSET(data, 0, sizeof(*data)); + + cnt = ecore_mcast_handle_pending_cmds_e2(sc, p); + + /* If there are no more pending commands - clear SCHEDULED state */ + if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head)) + o->clear_sched(o); + + /* The below may be TRUE iff there was enough room in ramrod + * data for all pending commands and for the current + * command. Otherwise the current command would have been added + * to the pending commands and p->mcast_list_len would have been + * zeroed. + */ + if (p->mcast_list_len > 0) + cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt); + + /* We've pulled out some MACs - update the total number of + * outstanding. + */ + o->total_pending_num -= cnt; + + /* send a ramrod */ + ECORE_DBG_BREAK_IF(o->total_pending_num < 0); + ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len); + + ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t) cnt); + + /* Update a registry size if there are no more pending operations. + * + * We don't want to change the value of the registry size if there are + * pending operations because we want it to always be equal to the + * exact or the approximate number (see ecore_mcast_validate_e2()) of + * set bins after the last requested operation in order to properly + * evaluate the size of the next DEL/RESTORE operation. + * + * Note that we update the registry itself during command(s) handling + * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we + * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but + * with a limited amount of update commands (per MAC/bin) and we don't + * know in this scope what the actual state of bins configuration is + * going to be after this ramrod. + */ + if (!o->total_pending_num) + ecore_mcast_refresh_registry_e2(o); + + /* If CLEAR_ONLY was requested - don't send a ramrod and clear + * RAMROD_PENDING status immediately. + */ + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + raw->clear_pending(raw); + return ECORE_SUCCESS; + } else { + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + /* Send a ramrod */ + rc = ecore_sp_post(sc, + RAMROD_CMD_ID_ETH_MULTICAST_RULES, + raw->cid, + raw->rdata_mapping, ETH_CONNECTION_TYPE); + if (rc) + return rc; + + /* Ramrod completion is pending */ + return ECORE_PENDING; + } +} + +static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd) +{ + /* Mark, that there is a work to do */ + if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE)) + p->mcast_list_len = 1; + + return ECORE_SUCCESS; +} + +static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc, + __rte_unused struct ecore_mcast_ramrod_params + *p, __rte_unused int old_num_bins) +{ + /* Do nothing */ +} + +#define ECORE_57711_SET_MC_FILTER(filter, bit) \ +do { \ + (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ +} while (0) + +static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused, + struct ecore_mcast_obj *o, + struct ecore_mcast_ramrod_params *p, + uint32_t * mc_filter) +{ + struct ecore_mcast_list_elem *mlist_pos; + int bit; + + ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link, + struct ecore_mcast_list_elem) { + bit = ecore_mcast_bin_from_mac(mlist_pos->mac); + ECORE_57711_SET_MC_FILTER(mc_filter, bit); + + ECORE_MSG + ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d", + mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], + mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], + bit); + + /* bookkeeping... */ + BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit); + } +} + +static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc + __rte_unused, + struct ecore_mcast_obj *o, + uint32_t * mc_filter) +{ + int bit; + + for (bit = ecore_mcast_get_next_bin(o, 0); + bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) { + ECORE_57711_SET_MC_FILTER(mc_filter, bit); + ECORE_MSG("About to set bin %d", bit); + } +} + +/* On 57711 we write the multicast MACs' approximate match + * table by directly into the TSTORM's internal RAM. So we don't + * really need to handle any tricks to make it work. + */ +static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd) +{ + int i; + struct ecore_mcast_obj *o = p->mcast_obj; + struct ecore_raw_obj *r = &o->raw; + + /* If CLEAR_ONLY has been requested - clear the registry + * and clear a pending bit. + */ + if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + uint32_t mc_filter[ECORE_MC_HASH_SIZE] = { 0 }; + + /* Set the multicast filter bits before writing it into + * the internal memory. + */ + switch (cmd) { + case ECORE_MCAST_CMD_ADD: + ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter); + break; + + case ECORE_MCAST_CMD_DEL: + ECORE_MSG("Invalidating multicast MACs configuration"); + + /* clear the registry */ + ECORE_MEMSET(o->registry.aprox_match.vec, 0, + sizeof(o->registry.aprox_match.vec)); + break; + + case ECORE_MCAST_CMD_RESTORE: + ecore_mcast_hdl_restore_e1h(sc, o, mc_filter); + break; + + default: + PMD_DRV_LOG(ERR, "Unknown command: %d", cmd); + return ECORE_INVAL; + } + + /* Set the mcast filter in the internal memory */ + for (i = 0; i < ECORE_MC_HASH_SIZE; i++) + REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]); + } else + /* clear the registry */ + ECORE_MEMSET(o->registry.aprox_match.vec, 0, + sizeof(o->registry.aprox_match.vec)); + + /* We are done */ + r->clear_pending(r); + + return ECORE_SUCCESS; +} + +static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o) +{ + return o->registry.aprox_match.num_bins_set; +} + +static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o, + int n) +{ + o->registry.aprox_match.num_bins_set = n; +} + +int ecore_config_mcast(struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd) +{ + struct ecore_mcast_obj *o = p->mcast_obj; + struct ecore_raw_obj *r = &o->raw; + int rc = 0, old_reg_size; + + /* This is needed to recover number of currently configured mcast macs + * in case of failure. + */ + old_reg_size = o->get_registry_size(o); + + /* Do some calculations and checks */ + rc = o->validate(sc, p, cmd); + if (rc) + return rc; + + /* Return if there is no work to do */ + if ((!p->mcast_list_len) && (!o->check_sched(o))) + return ECORE_SUCCESS; + + ECORE_MSG + ("o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d", + o->total_pending_num, p->mcast_list_len, o->max_cmd_len); + + /* Enqueue the current command to the pending list if we can't complete + * it in the current iteration + */ + if (r->check_pending(r) || + ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { + rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd); + if (rc < 0) + goto error_exit1; + + /* As long as the current command is in a command list we + * don't need to handle it separately. + */ + p->mcast_list_len = 0; + } + + if (!r->check_pending(r)) { + + /* Set 'pending' state */ + r->set_pending(r); + + /* Configure the new classification in the chip */ + rc = o->config_mcast(sc, p, cmd); + if (rc < 0) + goto error_exit2; + + /* Wait for a ramrod completion if was requested */ + if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) + rc = o->wait_comp(sc, o); + } + + return rc; + +error_exit2: + r->clear_pending(r); + +error_exit1: + o->revert(sc, p, old_reg_size); + + return rc; +} + +static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o) +{ + ECORE_SMP_MB_BEFORE_CLEAR_BIT(); + ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); +} + +static void ecore_mcast_set_sched(struct ecore_mcast_obj *o) +{ + ECORE_SMP_MB_BEFORE_CLEAR_BIT(); + ECORE_SET_BIT(o->sched_state, o->raw.pstate); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); +} + +static int ecore_mcast_check_sched(struct ecore_mcast_obj *o) +{ + return ! !ECORE_TEST_BIT(o->sched_state, o->raw.pstate); +} + +static int ecore_mcast_check_pending(struct ecore_mcast_obj *o) +{ + return o->raw.check_pending(&o->raw) || o->check_sched(o); +} + +void ecore_init_mcast_obj(struct bnx2x_softc *sc, + struct ecore_mcast_obj *mcast_obj, + uint8_t mcast_cl_id, uint32_t mcast_cid, + uint8_t func_id, uint8_t engine_id, void *rdata, + ecore_dma_addr_t rdata_mapping, int state, + unsigned long *pstate, ecore_obj_type type) +{ + ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj)); + + ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, + rdata, rdata_mapping, state, pstate, type); + + mcast_obj->engine_id = engine_id; + + ECORE_LIST_INIT(&mcast_obj->pending_cmds_head); + + mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED; + mcast_obj->check_sched = ecore_mcast_check_sched; + mcast_obj->set_sched = ecore_mcast_set_sched; + mcast_obj->clear_sched = ecore_mcast_clear_sched; + + if (CHIP_IS_E1H(sc)) { + mcast_obj->config_mcast = ecore_mcast_setup_e1h; + mcast_obj->enqueue_cmd = NULL; + mcast_obj->hdl_restore = NULL; + mcast_obj->check_pending = ecore_mcast_check_pending; + + /* 57711 doesn't send a ramrod, so it has unlimited credit + * for one command. + */ + mcast_obj->max_cmd_len = -1; + mcast_obj->wait_comp = ecore_mcast_wait; + mcast_obj->set_one_rule = NULL; + mcast_obj->validate = ecore_mcast_validate_e1h; + mcast_obj->revert = ecore_mcast_revert_e1h; + mcast_obj->get_registry_size = + ecore_mcast_get_registry_size_aprox; + mcast_obj->set_registry_size = + ecore_mcast_set_registry_size_aprox; + } else { + mcast_obj->config_mcast = ecore_mcast_setup_e2; + mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd; + mcast_obj->hdl_restore = ecore_mcast_handle_restore_cmd_e2; + mcast_obj->check_pending = ecore_mcast_check_pending; + mcast_obj->max_cmd_len = 16; + mcast_obj->wait_comp = ecore_mcast_wait; + mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2; + mcast_obj->validate = ecore_mcast_validate_e2; + mcast_obj->revert = ecore_mcast_revert_e2; + mcast_obj->get_registry_size = + ecore_mcast_get_registry_size_aprox; + mcast_obj->set_registry_size = + ecore_mcast_set_registry_size_aprox; + } +} + +/*************************** Credit handling **********************************/ + +/** + * atomic_add_ifless - add if the result is less than a given value. + * + * @v: pointer of type ecore_atomic_t + * @a: the amount to add to v... + * @u: ...if (v + a) is less than u. + * + * returns TRUE if (v + a) was less than u, and FALSE otherwise. + * + */ +static int __atomic_add_ifless(ecore_atomic_t * v, int a, int u) +{ + int c, old; + + c = ECORE_ATOMIC_READ(v); + for (;;) { + if (ECORE_UNLIKELY(c + a >= u)) + return FALSE; + + old = ECORE_ATOMIC_CMPXCHG((v), c, c + a); + if (ECORE_LIKELY(old == c)) + break; + c = old; + } + + return TRUE; +} + +/** + * atomic_dec_ifmoe - dec if the result is more or equal than a given value. + * + * @v: pointer of type ecore_atomic_t + * @a: the amount to dec from v... + * @u: ...if (v - a) is more or equal than u. + * + * returns TRUE if (v - a) was more or equal than u, and FALSE + * otherwise. + */ +static int __atomic_dec_ifmoe(ecore_atomic_t * v, int a, int u) +{ + int c, old; + + c = ECORE_ATOMIC_READ(v); + for (;;) { + if (ECORE_UNLIKELY(c - a < u)) + return FALSE; + + old = ECORE_ATOMIC_CMPXCHG((v), c, c - a); + if (ECORE_LIKELY(old == c)) + break; + c = old; + } + + return TRUE; +} + +static int ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt) +{ + int rc; + + ECORE_SMP_MB(); + rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); + ECORE_SMP_MB(); + + return rc; +} + +static int ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt) +{ + int rc; + + ECORE_SMP_MB(); + + /* Don't let to refill if credit + cnt > pool_sz */ + rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); + + ECORE_SMP_MB(); + + return rc; +} + +static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o) +{ + int cur_credit; + + ECORE_SMP_MB(); + cur_credit = ECORE_ATOMIC_READ(&o->credit); + + return cur_credit; +} + +static int ecore_credit_pool_always_TRUE(__rte_unused struct + ecore_credit_pool_obj *o, + __rte_unused int cnt) +{ + return TRUE; +} + +static int ecore_credit_pool_get_entry(struct ecore_credit_pool_obj *o, + int *offset) +{ + int idx, vec, i; + + *offset = -1; + + /* Find "internal cam-offset" then add to base for this object... */ + for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) { + + /* Skip the current vector if there are no free entries in it */ + if (!o->pool_mirror[vec]) + continue; + + /* If we've got here we are going to find a free entry */ + for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0; + i < BIT_VEC64_ELEM_SZ; idx++, i++) + + if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { + /* Got one!! */ + BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); + *offset = o->base_pool_offset + idx; + return TRUE; + } + } + + return FALSE; +} + +static int ecore_credit_pool_put_entry(struct ecore_credit_pool_obj *o, + int offset) +{ + if (offset < o->base_pool_offset) + return FALSE; + + offset -= o->base_pool_offset; + + if (offset >= o->pool_sz) + return FALSE; + + /* Return the entry to the pool */ + BIT_VEC64_SET_BIT(o->pool_mirror, offset); + + return TRUE; +} + +static int ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct + ecore_credit_pool_obj *o, + __rte_unused int offset) +{ + return TRUE; +} + +static int ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct + ecore_credit_pool_obj *o, + __rte_unused int *offset) +{ + *offset = -1; + return TRUE; +} + +/** + * ecore_init_credit_pool - initialize credit pool internals. + * + * @p: + * @base: Base entry in the CAM to use. + * @credit: pool size. + * + * If base is negative no CAM entries handling will be performed. + * If credit is negative pool operations will always succeed (unlimited pool). + * + */ +static void ecore_init_credit_pool(struct ecore_credit_pool_obj *p, + int base, int credit) +{ + /* Zero the object first */ + ECORE_MEMSET(p, 0, sizeof(*p)); + + /* Set the table to all 1s */ + ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); + + /* Init a pool as full */ + ECORE_ATOMIC_SET(&p->credit, credit); + + /* The total poll size */ + p->pool_sz = credit; + + p->base_pool_offset = base; + + /* Commit the change */ + ECORE_SMP_MB(); + + p->check = ecore_credit_pool_check; + + /* if pool credit is negative - disable the checks */ + if (credit >= 0) { + p->put = ecore_credit_pool_put; + p->get = ecore_credit_pool_get; + p->put_entry = ecore_credit_pool_put_entry; + p->get_entry = ecore_credit_pool_get_entry; + } else { + p->put = ecore_credit_pool_always_TRUE; + p->get = ecore_credit_pool_always_TRUE; + p->put_entry = ecore_credit_pool_put_entry_always_TRUE; + p->get_entry = ecore_credit_pool_get_entry_always_TRUE; + } + + /* If base is negative - disable entries handling */ + if (base < 0) { + p->put_entry = ecore_credit_pool_put_entry_always_TRUE; + p->get_entry = ecore_credit_pool_get_entry_always_TRUE; + } +} + +void ecore_init_mac_credit_pool(struct bnx2x_softc *sc, + struct ecore_credit_pool_obj *p, + uint8_t func_id, uint8_t func_num) +{ + +#define ECORE_CAM_SIZE_EMUL 5 + + int cam_sz; + + if (CHIP_IS_E1H(sc)) { + /* CAM credit is equally divided between all active functions + * on the PORT!. + */ + if ((func_num > 0)) { + if (!CHIP_REV_IS_SLOW(sc)) + cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num)); + else + cam_sz = ECORE_CAM_SIZE_EMUL; + ecore_init_credit_pool(p, func_id * cam_sz, cam_sz); + } else { + /* this should never happen! Block MAC operations. */ + ecore_init_credit_pool(p, 0, 0); + } + + } else { + + /* + * CAM credit is equaly divided between all active functions + * on the PATH. + */ + if ((func_num > 0)) { + if (!CHIP_REV_IS_SLOW(sc)) + cam_sz = (MAX_MAC_CREDIT_E2 / func_num); + else + cam_sz = ECORE_CAM_SIZE_EMUL; + + /* No need for CAM entries handling for 57712 and + * newer. + */ + ecore_init_credit_pool(p, -1, cam_sz); + } else { + /* this should never happen! Block MAC operations. */ + ecore_init_credit_pool(p, 0, 0); + } + } +} + +void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc, + struct ecore_credit_pool_obj *p, + uint8_t func_id, uint8_t func_num) +{ + if (CHIP_IS_E1x(sc)) { + /* There is no VLAN credit in HW on 57711 only + * MAC / MAC-VLAN can be set + */ + ecore_init_credit_pool(p, 0, -1); + } else { + /* CAM credit is equally divided between all active functions + * on the PATH. + */ + if (func_num > 0) { + int credit = MAX_VLAN_CREDIT_E2 / func_num; + ecore_init_credit_pool(p, func_id * credit, credit); + } else + /* this should never happen! Block VLAN operations. */ + ecore_init_credit_pool(p, 0, 0); + } +} + +/****************** RSS Configuration ******************/ + +/** + * ecore_setup_rss - configure RSS + * + * @sc: device handle + * @p: rss configuration + * + * sends on UPDATE ramrod for that matter. + */ +static int ecore_setup_rss(struct bnx2x_softc *sc, + struct ecore_config_rss_params *p) +{ + struct ecore_rss_config_obj *o = p->rss_obj; + struct ecore_raw_obj *r = &o->raw; + struct eth_rss_update_ramrod_data *data = + (struct eth_rss_update_ramrod_data *)(r->rdata); + uint8_t rss_mode = 0; + int rc; + + ECORE_MEMSET(data, 0, sizeof(*data)); + + ECORE_MSG("Configuring RSS"); + + /* Set an echo field */ + data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) | + (r->state << ECORE_SWCID_SHIFT)); + + /* RSS mode */ + if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_DISABLED; + else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_REGULAR; + + data->rss_mode = rss_mode; + + ECORE_MSG("rss_mode=%d", rss_mode); + + /* RSS capabilities */ + if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; + + if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; + + if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; + + if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; + + if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; + + if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; + + if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) { + data->udp_4tuple_dst_port_mask = + ECORE_CPU_TO_LE16(p->tunnel_mask); + data->udp_4tuple_dst_port_value = + ECORE_CPU_TO_LE16(p->tunnel_value); + } + + /* Hashing mask */ + data->rss_result_mask = p->rss_result_mask; + + /* RSS engine ID */ + data->rss_engine_id = o->engine_id; + + ECORE_MSG("rss_engine_id=%d", data->rss_engine_id); + + /* Indirection table */ + ECORE_MEMCPY(data->indirection_table, p->ind_table, + T_ETH_INDIRECTION_TABLE_SIZE); + + /* Remember the last configuration */ + ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + + /* RSS keys */ + if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) { + ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0], + sizeof(data->rss_key)); + data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; + } + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + /* Send a ramrod */ + rc = ecore_sp_post(sc, + RAMROD_CMD_ID_ETH_RSS_UPDATE, + r->cid, r->rdata_mapping, ETH_CONNECTION_TYPE); + + if (rc < 0) + return rc; + + return ECORE_PENDING; +} + +int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p) +{ + int rc; + struct ecore_rss_config_obj *o = p->rss_obj; + struct ecore_raw_obj *r = &o->raw; + + /* Do nothing if only driver cleanup was requested */ + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) + return ECORE_SUCCESS; + + r->set_pending(r); + + rc = o->config_rss(sc, p); + if (rc < 0) { + r->clear_pending(r); + return rc; + } + + if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) + rc = r->wait_comp(sc, r); + + return rc; +} + +void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj, + uint8_t cl_id, uint32_t cid, uint8_t func_id, + uint8_t engine_id, void *rdata, + ecore_dma_addr_t rdata_mapping, int state, + unsigned long *pstate, ecore_obj_type type) +{ + ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type); + + rss_obj->engine_id = engine_id; + rss_obj->config_rss = ecore_setup_rss; +} + +/********************** Queue state object ***********************************/ + +/** + * ecore_queue_state_change - perform Queue state change transition + * + * @sc: device handle + * @params: parameters to perform the transition + * + * returns 0 in case of successfully completed transition, negative error + * code in case of failure, positive (EBUSY) value if there is a completion + * to that is still pending (possible only if RAMROD_COMP_WAIT is + * not set in params->ramrod_flags for asynchronous commands). + * + */ +int ecore_queue_state_change(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + int rc, pending_bit; + unsigned long *pending = &o->pending; + + /* Check that the requested transition is legal */ + rc = o->check_transition(sc, o, params); + if (rc) { + PMD_DRV_LOG(ERR, "check transition returned an error. rc %d", + rc); + return ECORE_INVAL; + } + + /* Set "pending" bit */ + ECORE_MSG("pending bit was=%lx", o->pending); + pending_bit = o->set_pending(o, params); + ECORE_MSG("pending bit now=%lx", o->pending); + + /* Don't send a command if only driver cleanup was requested */ + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) + o->complete_cmd(sc, o, pending_bit); + else { + /* Send a ramrod */ + rc = o->send_cmd(sc, params); + if (rc) { + o->next_state = ECORE_Q_STATE_MAX; + ECORE_CLEAR_BIT(pending_bit, pending); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); + return rc; + } + + if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { + rc = o->wait_comp(sc, o, pending_bit); + if (rc) + return rc; + + return ECORE_SUCCESS; + } + } + + return ECORE_RET_PENDING(pending_bit, pending); +} + +static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj, + struct ecore_queue_state_params *params) +{ + enum ecore_queue_cmd cmd = params->cmd, bit; + + /* ACTIVATE and DEACTIVATE commands are implemented on top of + * UPDATE command. + */ + if ((cmd == ECORE_Q_CMD_ACTIVATE) || (cmd == ECORE_Q_CMD_DEACTIVATE)) + bit = ECORE_Q_CMD_UPDATE; + else + bit = cmd; + + ECORE_SET_BIT(bit, &obj->pending); + return bit; +} + +static int ecore_queue_wait_comp(struct bnx2x_softc *sc, + struct ecore_queue_sp_obj *o, + enum ecore_queue_cmd cmd) +{ + return ecore_state_wait(sc, cmd, &o->pending); +} + +/** + * ecore_queue_comp_cmd - complete the state change command. + * + * @sc: device handle + * @o: + * @cmd: + * + * Checks that the arrived completion is expected. + */ +static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused, + struct ecore_queue_sp_obj *o, + enum ecore_queue_cmd cmd) +{ + unsigned long cur_pending = o->pending; + + if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) { + PMD_DRV_LOG(ERR, + "Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d", + cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state, + cur_pending, o->next_state); + return ECORE_INVAL; + } + + if (o->next_tx_only >= o->max_cos) + /* >= because tx only must always be smaller than cos since the + * primary connection supports COS 0 + */ + PMD_DRV_LOG(ERR, + "illegal value for next tx_only: %d. max cos was %d", + o->next_tx_only, o->max_cos); + + ECORE_MSG("Completing command %d for queue %d, setting state to %d", + cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state); + + if (o->next_tx_only) /* print num tx-only if any exist */ + ECORE_MSG("primary cid %d: num tx-only cons %d", + o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only); + + o->state = o->next_state; + o->num_tx_only = o->next_tx_only; + o->next_state = ECORE_Q_STATE_MAX; + + /* It's important that o->state and o->next_state are + * updated before o->pending. + */ + wmb(); + + ECORE_CLEAR_BIT(cmd, &o->pending); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); + + return ECORE_SUCCESS; +} + +static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params + *cmd_params, + struct client_init_ramrod_data *data) +{ + struct ecore_queue_setup_params *params = &cmd_params->params.setup; + + /* Rx data */ + + /* IPv6 TPA supported for E2 and above only */ + data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6, + ¶ms->flags) * + CLIENT_INIT_RX_DATA_TPA_EN_IPV6; +} + +static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused, + struct ecore_queue_sp_obj *o, + struct ecore_general_setup_params + *params, struct client_init_general_data + *gen_data, unsigned long *flags) +{ + gen_data->client_id = o->cl_id; + + if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) { + gen_data->statistics_counter_id = params->stat_id; + gen_data->statistics_en_flg = 1; + gen_data->statistics_zero_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags); + } else + gen_data->statistics_counter_id = + DISABLE_STATISTIC_COUNTER_ID_VALUE; + + gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags); + gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, flags); + gen_data->sp_client_id = params->spcl_id; + gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu); + gen_data->func_id = o->func_id; + + gen_data->cos = params->cos; + + gen_data->traffic_type = + ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ? + LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; + + ECORE_MSG("flags: active %d, cos %d, stats en %d", + gen_data->activate_flg, gen_data->cos, + gen_data->statistics_en_flg); +} + +static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params *params, + struct client_init_tx_data *tx_data, + unsigned long *flags) +{ + tx_data->enforce_security_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags); + tx_data->default_vlan = ECORE_CPU_TO_LE16(params->default_vlan); + tx_data->default_vlan_flg = ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags); + tx_data->tx_switching_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags); + tx_data->anti_spoofing_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags); + tx_data->force_default_pri_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags); + tx_data->refuse_outband_vlan_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags); + tx_data->tunnel_non_lso_pcsum_location = + ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT : + CSUM_ON_BD; + + tx_data->tx_status_block_id = params->fw_sb_id; + tx_data->tx_sb_index_number = params->sb_cq_index; + tx_data->tss_leading_client_id = params->tss_leading_cl_id; + + tx_data->tx_bd_page_base.lo = + ECORE_CPU_TO_LE32(U64_LO(params->dscr_map)); + tx_data->tx_bd_page_base.hi = + ECORE_CPU_TO_LE32(U64_HI(params->dscr_map)); + + /* Don't configure any Tx switching mode during queue SETUP */ + tx_data->state = 0; +} + +static void ecore_q_fill_init_pause_data(struct rxq_pause_params *params, + struct client_init_rx_data *rx_data) +{ + /* flow control data */ + rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo); + rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi); + rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo); + rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi); + rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo); + rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi); + rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map); +} + +static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params *params, + struct client_init_rx_data *rx_data, + unsigned long *flags) +{ + rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) * + CLIENT_INIT_RX_DATA_TPA_EN_IPV4; + rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) * + CLIENT_INIT_RX_DATA_TPA_MODE; + rx_data->vmqueue_mode_en_flg = 0; + + rx_data->extra_data_over_sgl_en_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags); + rx_data->cache_line_alignment_log_size = params->cache_line_log; + rx_data->enable_dynamic_hc = ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags); + rx_data->client_qzone_id = params->cl_qzone_id; + rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz); + + /* Always start in DROP_ALL mode */ + rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | + CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); + + /* We don't set drop flags */ + rx_data->drop_ip_cs_err_flg = 0; + rx_data->drop_tcp_cs_err_flg = 0; + rx_data->drop_ttl0_flg = 0; + rx_data->drop_udp_cs_err_flg = 0; + rx_data->inner_vlan_removal_enable_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags); + rx_data->outer_vlan_removal_enable_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags); + rx_data->status_block_id = params->fw_sb_id; + rx_data->rx_sb_index_number = params->sb_cq_index; + rx_data->max_tpa_queues = params->max_tpa_queues; + rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz); + rx_data->bd_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->dscr_map)); + rx_data->bd_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->dscr_map)); + rx_data->cqe_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->rcq_map)); + rx_data->cqe_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->rcq_map)); + rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS, + flags); + + if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) { + rx_data->approx_mcast_engine_id = params->mcast_engine_id; + rx_data->is_approx_mcast = 1; + } + + rx_data->rss_engine_id = params->rss_engine_id; + + /* silent vlan removal */ + rx_data->silent_vlan_removal_flg = + ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags); + rx_data->silent_vlan_value = + ECORE_CPU_TO_LE16(params->silent_removal_value); + rx_data->silent_vlan_mask = + ECORE_CPU_TO_LE16(params->silent_removal_mask); +} + +/* initialize the general, tx and rx parts of a queue object */ +static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params + *cmd_params, + struct client_init_ramrod_data *data) +{ + ecore_q_fill_init_general_data(sc, cmd_params->q_obj, + &cmd_params->params.setup.gen_params, + &data->general, + &cmd_params->params.setup.flags); + + ecore_q_fill_init_tx_data(&cmd_params->params.setup.txq_params, + &data->tx, &cmd_params->params.setup.flags); + + ecore_q_fill_init_rx_data(&cmd_params->params.setup.rxq_params, + &data->rx, &cmd_params->params.setup.flags); + + ecore_q_fill_init_pause_data(&cmd_params->params.setup.pause_params, + &data->rx); +} + +/* initialize the general and tx parts of a tx-only queue object */ +static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params + *cmd_params, + struct tx_queue_init_ramrod_data *data) +{ + ecore_q_fill_init_general_data(sc, cmd_params->q_obj, + &cmd_params->params.tx_only.gen_params, + &data->general, + &cmd_params->params.tx_only.flags); + + ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params, + &data->tx, &cmd_params->params.tx_only.flags); + + ECORE_MSG("cid %d, tx bd page lo %x hi %x", + cmd_params->q_obj->cids[0], + data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi); +} + +/** + * ecore_q_init - init HW/FW queue + * + * @sc: device handle + * @params: + * + * HW/FW initial Queue configuration: + * - HC: Rx and Tx + * - CDU context validation + * + */ +static int ecore_q_init(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + struct ecore_queue_init_params *init = ¶ms->params.init; + uint16_t hc_usec; + uint8_t cos; + + /* Tx HC configuration */ + if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) && + ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) { + hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; + + ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id, + init->tx.sb_cq_index, + !ECORE_TEST_BIT + (ECORE_Q_FLG_HC_EN, + &init->tx.flags), hc_usec); + } + + /* Rx HC configuration */ + if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) && + ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) { + hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; + + ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id, + init->rx.sb_cq_index, + !ECORE_TEST_BIT + (ECORE_Q_FLG_HC_EN, + &init->rx.flags), hc_usec); + } + + /* Set CDU context validation values */ + for (cos = 0; cos < o->max_cos; cos++) { + ECORE_MSG("setting context validation. cid %d, cos %d", + o->cids[cos], cos); + ECORE_MSG("context pointer %p", init->cxts[cos]); + ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]); + } + + /* As no ramrod is sent, complete the command immediately */ + o->complete_cmd(sc, o, ECORE_Q_CMD_INIT); + + ECORE_MMIOWB(); + ECORE_SMP_MB(); + + return ECORE_SUCCESS; +} + +static int ecore_q_send_setup_e1x(struct bnx2x_softc *sc, struct ecore_queue_state_params + *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + struct client_init_ramrod_data *rdata = + (struct client_init_ramrod_data *)o->rdata; + ecore_dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; + + /* Clear the ramrod data */ + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + ecore_q_fill_setup_data_cmn(sc, params, rdata); + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + return ecore_sp_post(sc, + ramrod, + o->cids[ECORE_PRIMARY_CID_INDEX], + data_mapping, ETH_CONNECTION_TYPE); +} + +static int ecore_q_send_setup_e2(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + struct client_init_ramrod_data *rdata = + (struct client_init_ramrod_data *)o->rdata; + ecore_dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; + + /* Clear the ramrod data */ + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + ecore_q_fill_setup_data_cmn(sc, params, rdata); + ecore_q_fill_setup_data_e2(params, rdata); + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + return ecore_sp_post(sc, + ramrod, + o->cids[ECORE_PRIMARY_CID_INDEX], + data_mapping, ETH_CONNECTION_TYPE); +} + +static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params + *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + struct tx_queue_init_ramrod_data *rdata = + (struct tx_queue_init_ramrod_data *)o->rdata; + ecore_dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; + struct ecore_queue_setup_tx_only_params *tx_only_params = + ¶ms->params.tx_only; + uint8_t cid_index = tx_only_params->cid_index; + + if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type)) + ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP; + ECORE_MSG("sending forward tx-only ramrod"); + + if (cid_index >= o->max_cos) { + PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range", + o->cl_id, cid_index); + return ECORE_INVAL; + } + + ECORE_MSG("parameters received: cos: %d sp-id: %d", + tx_only_params->gen_params.cos, + tx_only_params->gen_params.spcl_id); + + /* Clear the ramrod data */ + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + ecore_q_fill_setup_tx_only(sc, params, rdata); + + ECORE_MSG + ("sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d", + o->cids[cid_index], rdata->general.client_id, + rdata->general.sp_client_id, rdata->general.cos); + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + return ecore_sp_post(sc, ramrod, o->cids[cid_index], + data_mapping, ETH_CONNECTION_TYPE); +} + +static void ecore_q_fill_update_data(struct ecore_queue_sp_obj *obj, + struct ecore_queue_update_params *params, + struct client_update_ramrod_data *data) +{ + /* Client ID of the client to update */ + data->client_id = obj->cl_id; + + /* Function ID of the client to update */ + data->func_id = obj->func_id; + + /* Default VLAN value */ + data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan); + + /* Inner VLAN stripping */ + data->inner_vlan_removal_enable_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); + data->inner_vlan_removal_change_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG, + ¶ms->update_flags); + + /* Outer VLAN stripping */ + data->outer_vlan_removal_enable_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); + data->outer_vlan_removal_change_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG, + ¶ms->update_flags); + + /* Drop packets that have source MAC that doesn't belong to this + * Queue. + */ + data->anti_spoofing_enable_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); + data->anti_spoofing_change_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG, + ¶ms->update_flags); + + /* Activate/Deactivate */ + data->activate_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, ¶ms->update_flags); + data->activate_change_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); + + /* Enable default VLAN */ + data->default_vlan_enable_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); + data->default_vlan_change_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG, + ¶ms->update_flags); + + /* silent vlan removal */ + data->silent_vlan_change_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG, + ¶ms->update_flags); + data->silent_vlan_removal_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM, + ¶ms->update_flags); + data->silent_vlan_value = + ECORE_CPU_TO_LE16(params->silent_removal_value); + data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask); + + /* tx switching */ + data->tx_switching_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags); + data->tx_switching_change_flg = + ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG, + ¶ms->update_flags); +} + +static int ecore_q_send_update(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + struct client_update_ramrod_data *rdata = + (struct client_update_ramrod_data *)o->rdata; + ecore_dma_addr_t data_mapping = o->rdata_mapping; + struct ecore_queue_update_params *update_params = + ¶ms->params.update; + uint8_t cid_index = update_params->cid_index; + + if (cid_index >= o->max_cos) { + PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range", + o->cl_id, cid_index); + return ECORE_INVAL; + } + + /* Clear the ramrod data */ + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + ecore_q_fill_update_data(o, update_params, rdata); + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, + o->cids[cid_index], data_mapping, + ETH_CONNECTION_TYPE); +} + +/** + * ecore_q_send_deactivate - send DEACTIVATE command + * + * @sc: device handle + * @params: + * + * implemented using the UPDATE command. + */ +static int ecore_q_send_deactivate(struct bnx2x_softc *sc, struct ecore_queue_state_params + *params) +{ + struct ecore_queue_update_params *update = ¶ms->params.update; + + ECORE_MEMSET(update, 0, sizeof(*update)); + + ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); + + return ecore_q_send_update(sc, params); +} + +/** + * ecore_q_send_activate - send ACTIVATE command + * + * @sc: device handle + * @params: + * + * implemented using the UPDATE command. + */ +static int ecore_q_send_activate(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_update_params *update = ¶ms->params.update; + + ECORE_MEMSET(update, 0, sizeof(*update)); + + ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags); + ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); + + return ecore_q_send_update(sc, params); +} + +static int ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc *sc, + __rte_unused struct + ecore_queue_state_params *params) +{ + /* Not implemented yet. */ + return -1; +} + +static int ecore_q_send_halt(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + + /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */ + ecore_dma_addr_t data_mapping = 0; + data_mapping = (ecore_dma_addr_t) o->cl_id; + + return ecore_sp_post(sc, + RAMROD_CMD_ID_ETH_HALT, + o->cids[ECORE_PRIMARY_CID_INDEX], + data_mapping, ETH_CONNECTION_TYPE); +} + +static int ecore_q_send_cfc_del(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + uint8_t cid_idx = params->params.cfc_del.cid_index; + + if (cid_idx >= o->max_cos) { + PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range", + o->cl_id, cid_idx); + return ECORE_INVAL; + } + + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL, + o->cids[cid_idx], 0, NONE_CONNECTION_TYPE); +} + +static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_state_params + *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + uint8_t cid_index = params->params.terminate.cid_index; + + if (cid_index >= o->max_cos) { + PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range", + o->cl_id, cid_index); + return ECORE_INVAL; + } + + return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE, + o->cids[cid_index], 0, ETH_CONNECTION_TYPE); +} + +static int ecore_q_send_empty(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + struct ecore_queue_sp_obj *o = params->q_obj; + + return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY, + o->cids[ECORE_PRIMARY_CID_INDEX], 0, + ETH_CONNECTION_TYPE); +} + +static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params + *params) +{ + switch (params->cmd) { + case ECORE_Q_CMD_INIT: + return ecore_q_init(sc, params); + case ECORE_Q_CMD_SETUP_TX_ONLY: + return ecore_q_send_setup_tx_only(sc, params); + case ECORE_Q_CMD_DEACTIVATE: + return ecore_q_send_deactivate(sc, params); + case ECORE_Q_CMD_ACTIVATE: + return ecore_q_send_activate(sc, params); + case ECORE_Q_CMD_UPDATE: + return ecore_q_send_update(sc, params); + case ECORE_Q_CMD_UPDATE_TPA: + return ecore_q_send_update_tpa(sc, params); + case ECORE_Q_CMD_HALT: + return ecore_q_send_halt(sc, params); + case ECORE_Q_CMD_CFC_DEL: + return ecore_q_send_cfc_del(sc, params); + case ECORE_Q_CMD_TERMINATE: + return ecore_q_send_terminate(sc, params); + case ECORE_Q_CMD_EMPTY: + return ecore_q_send_empty(sc, params); + default: + PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd); + return ECORE_INVAL; + } +} + +static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + switch (params->cmd) { + case ECORE_Q_CMD_SETUP: + return ecore_q_send_setup_e1x(sc, params); + case ECORE_Q_CMD_INIT: + case ECORE_Q_CMD_SETUP_TX_ONLY: + case ECORE_Q_CMD_DEACTIVATE: + case ECORE_Q_CMD_ACTIVATE: + case ECORE_Q_CMD_UPDATE: + case ECORE_Q_CMD_UPDATE_TPA: + case ECORE_Q_CMD_HALT: + case ECORE_Q_CMD_CFC_DEL: + case ECORE_Q_CMD_TERMINATE: + case ECORE_Q_CMD_EMPTY: + return ecore_queue_send_cmd_cmn(sc, params); + default: + PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd); + return ECORE_INVAL; + } +} + +static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params) +{ + switch (params->cmd) { + case ECORE_Q_CMD_SETUP: + return ecore_q_send_setup_e2(sc, params); + case ECORE_Q_CMD_INIT: + case ECORE_Q_CMD_SETUP_TX_ONLY: + case ECORE_Q_CMD_DEACTIVATE: + case ECORE_Q_CMD_ACTIVATE: + case ECORE_Q_CMD_UPDATE: + case ECORE_Q_CMD_UPDATE_TPA: + case ECORE_Q_CMD_HALT: + case ECORE_Q_CMD_CFC_DEL: + case ECORE_Q_CMD_TERMINATE: + case ECORE_Q_CMD_EMPTY: + return ecore_queue_send_cmd_cmn(sc, params); + default: + PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd); + return ECORE_INVAL; + } +} + +/** + * ecore_queue_chk_transition - check state machine of a regular Queue + * + * @sc: device handle + * @o: + * @params: + * + * (not Forwarding) + * It both checks if the requested command is legal in a current + * state and, if it's legal, sets a `next_state' in the object + * that will be used in the completion flow to set the `state' + * of the object. + * + * returns 0 if a requested command is a legal transition, + * ECORE_INVAL otherwise. + */ +static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused, + struct ecore_queue_sp_obj *o, + struct ecore_queue_state_params *params) +{ + enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX; + enum ecore_queue_cmd cmd = params->cmd; + struct ecore_queue_update_params *update_params = + ¶ms->params.update; + uint8_t next_tx_only = o->num_tx_only; + + /* Forget all pending for completion commands if a driver only state + * transition has been requested. + */ + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + o->pending = 0; + o->next_state = ECORE_Q_STATE_MAX; + } + + /* Don't allow a next state transition if we are in the middle of + * the previous one. + */ + if (o->pending) { + PMD_DRV_LOG(ERR, "Blocking transition since pending was %lx", + o->pending); + return ECORE_BUSY; + } + + switch (state) { + case ECORE_Q_STATE_RESET: + if (cmd == ECORE_Q_CMD_INIT) + next_state = ECORE_Q_STATE_INITIALIZED; + + break; + case ECORE_Q_STATE_INITIALIZED: + if (cmd == ECORE_Q_CMD_SETUP) { + if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, + ¶ms->params.setup.flags)) + next_state = ECORE_Q_STATE_ACTIVE; + else + next_state = ECORE_Q_STATE_INACTIVE; + } + + break; + case ECORE_Q_STATE_ACTIVE: + if (cmd == ECORE_Q_CMD_DEACTIVATE) + next_state = ECORE_Q_STATE_INACTIVE; + + else if ((cmd == ECORE_Q_CMD_EMPTY) || + (cmd == ECORE_Q_CMD_UPDATE_TPA)) + next_state = ECORE_Q_STATE_ACTIVE; + + else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { + next_state = ECORE_Q_STATE_MULTI_COS; + next_tx_only = 1; + } + + else if (cmd == ECORE_Q_CMD_HALT) + next_state = ECORE_Q_STATE_STOPPED; + + else if (cmd == ECORE_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, + &update_params->update_flags)) + next_state = ECORE_Q_STATE_INACTIVE; + else + next_state = ECORE_Q_STATE_ACTIVE; + } + + break; + case ECORE_Q_STATE_MULTI_COS: + if (cmd == ECORE_Q_CMD_TERMINATE) + next_state = ECORE_Q_STATE_MCOS_TERMINATED; + + else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { + next_state = ECORE_Q_STATE_MULTI_COS; + next_tx_only = o->num_tx_only + 1; + } + + else if ((cmd == ECORE_Q_CMD_EMPTY) || + (cmd == ECORE_Q_CMD_UPDATE_TPA)) + next_state = ECORE_Q_STATE_MULTI_COS; + + else if (cmd == ECORE_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, + &update_params->update_flags)) + next_state = ECORE_Q_STATE_INACTIVE; + else + next_state = ECORE_Q_STATE_MULTI_COS; + } + + break; + case ECORE_Q_STATE_MCOS_TERMINATED: + if (cmd == ECORE_Q_CMD_CFC_DEL) { + next_tx_only = o->num_tx_only - 1; + if (next_tx_only == 0) + next_state = ECORE_Q_STATE_ACTIVE; + else + next_state = ECORE_Q_STATE_MULTI_COS; + } + + break; + case ECORE_Q_STATE_INACTIVE: + if (cmd == ECORE_Q_CMD_ACTIVATE) + next_state = ECORE_Q_STATE_ACTIVE; + + else if ((cmd == ECORE_Q_CMD_EMPTY) || + (cmd == ECORE_Q_CMD_UPDATE_TPA)) + next_state = ECORE_Q_STATE_INACTIVE; + + else if (cmd == ECORE_Q_CMD_HALT) + next_state = ECORE_Q_STATE_STOPPED; + + else if (cmd == ECORE_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, + &update_params->update_flags)) { + if (o->num_tx_only == 0) + next_state = ECORE_Q_STATE_ACTIVE; + else /* tx only queues exist for this queue */ + next_state = ECORE_Q_STATE_MULTI_COS; + } else + next_state = ECORE_Q_STATE_INACTIVE; + } + + break; + case ECORE_Q_STATE_STOPPED: + if (cmd == ECORE_Q_CMD_TERMINATE) + next_state = ECORE_Q_STATE_TERMINATED; + + break; + case ECORE_Q_STATE_TERMINATED: + if (cmd == ECORE_Q_CMD_CFC_DEL) + next_state = ECORE_Q_STATE_RESET; + + break; + default: + PMD_DRV_LOG(ERR, "Illegal state: %d", state); + } + + /* Transition is assured */ + if (next_state != ECORE_Q_STATE_MAX) { + ECORE_MSG("Good state transition: %d(%d)->%d", + state, cmd, next_state); + o->next_state = next_state; + o->next_tx_only = next_tx_only; + return ECORE_SUCCESS; + } + + ECORE_MSG("Bad state transition request: %d %d", state, cmd); + + return ECORE_INVAL; +} + +/** + * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue. + * + * @sc: device handle + * @o: + * @params: + * + * It both checks if the requested command is legal in a current + * state and, if it's legal, sets a `next_state' in the object + * that will be used in the completion flow to set the `state' + * of the object. + * + * returns 0 if a requested command is a legal transition, + * ECORE_INVAL otherwise. + */ +static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused, + struct ecore_queue_sp_obj *o, + struct ecore_queue_state_params + *params) +{ + enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX; + enum ecore_queue_cmd cmd = params->cmd; + + switch (state) { + case ECORE_Q_STATE_RESET: + if (cmd == ECORE_Q_CMD_INIT) + next_state = ECORE_Q_STATE_INITIALIZED; + + break; + case ECORE_Q_STATE_INITIALIZED: + if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) { + if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, + ¶ms->params.tx_only.flags)) + next_state = ECORE_Q_STATE_ACTIVE; + else + next_state = ECORE_Q_STATE_INACTIVE; + } + + break; + case ECORE_Q_STATE_ACTIVE: + case ECORE_Q_STATE_INACTIVE: + if (cmd == ECORE_Q_CMD_CFC_DEL) + next_state = ECORE_Q_STATE_RESET; + + break; + default: + PMD_DRV_LOG(ERR, "Illegal state: %d", state); + } + + /* Transition is assured */ + if (next_state != ECORE_Q_STATE_MAX) { + ECORE_MSG("Good state transition: %d(%d)->%d", + state, cmd, next_state); + o->next_state = next_state; + return ECORE_SUCCESS; + } + + ECORE_MSG("Bad state transition request: %d %d", state, cmd); + return ECORE_INVAL; +} + +void ecore_init_queue_obj(struct bnx2x_softc *sc, + struct ecore_queue_sp_obj *obj, + uint8_t cl_id, uint32_t * cids, uint8_t cid_cnt, + uint8_t func_id, void *rdata, + ecore_dma_addr_t rdata_mapping, unsigned long type) +{ + ECORE_MEMSET(obj, 0, sizeof(*obj)); + + /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */ + ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt); + + rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); + obj->max_cos = cid_cnt; + obj->cl_id = cl_id; + obj->func_id = func_id; + obj->rdata = rdata; + obj->rdata_mapping = rdata_mapping; + obj->type = type; + obj->next_state = ECORE_Q_STATE_MAX; + + if (CHIP_IS_E1x(sc)) + obj->send_cmd = ecore_queue_send_cmd_e1x; + else + obj->send_cmd = ecore_queue_send_cmd_e2; + + if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type)) + obj->check_transition = ecore_queue_chk_fwd_transition; + else + obj->check_transition = ecore_queue_chk_transition; + + obj->complete_cmd = ecore_queue_comp_cmd; + obj->wait_comp = ecore_queue_wait_comp; + obj->set_pending = ecore_queue_set_pending; +} + +/********************** Function state object *********************************/ +enum ecore_func_state ecore_func_get_state(__rte_unused struct bnx2x_softc *sc, + struct ecore_func_sp_obj *o) +{ + /* in the middle of transaction - return INVALID state */ + if (o->pending) + return ECORE_F_STATE_MAX; + + /* unsure the order of reading of o->pending and o->state + * o->pending should be read first + */ + rmb(); + + return o->state; +} + +static int ecore_func_wait_comp(struct bnx2x_softc *sc, + struct ecore_func_sp_obj *o, + enum ecore_func_cmd cmd) +{ + return ecore_state_wait(sc, cmd, &o->pending); +} + +/** + * ecore_func_state_change_comp - complete the state machine transition + * + * @sc: device handle + * @o: + * @cmd: + * + * Called on state change transition. Completes the state + * machine transition only - no HW interaction. + */ +static int +ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused, + struct ecore_func_sp_obj *o, + enum ecore_func_cmd cmd) +{ + unsigned long cur_pending = o->pending; + + if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) { + PMD_DRV_LOG(ERR, + "Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d", + cmd, ECORE_FUNC_ID(sc), o->state, cur_pending, + o->next_state); + return ECORE_INVAL; + } + + ECORE_MSG("Completing command %d for func %d, setting state to %d", + cmd, ECORE_FUNC_ID(sc), o->next_state); + + o->state = o->next_state; + o->next_state = ECORE_F_STATE_MAX; + + /* It's important that o->state and o->next_state are + * updated before o->pending. + */ + wmb(); + + ECORE_CLEAR_BIT(cmd, &o->pending); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); + + return ECORE_SUCCESS; +} + +/** + * ecore_func_comp_cmd - complete the state change command + * + * @sc: device handle + * @o: + * @cmd: + * + * Checks that the arrived completion is expected. + */ +static int ecore_func_comp_cmd(struct bnx2x_softc *sc, + struct ecore_func_sp_obj *o, + enum ecore_func_cmd cmd) +{ + /* Complete the state machine part first, check if it's a + * legal completion. + */ + int rc = ecore_func_state_change_comp(sc, o, cmd); + return rc; +} + +/** + * ecore_func_chk_transition - perform function state machine transition + * + * @sc: device handle + * @o: + * @params: + * + * It both checks if the requested command is legal in a current + * state and, if it's legal, sets a `next_state' in the object + * that will be used in the completion flow to set the `state' + * of the object. + * + * returns 0 if a requested command is a legal transition, + * ECORE_INVAL otherwise. + */ +static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused, + struct ecore_func_sp_obj *o, + struct ecore_func_state_params *params) +{ + enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX; + enum ecore_func_cmd cmd = params->cmd; + + /* Forget all pending for completion commands if a driver only state + * transition has been requested. + */ + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + o->pending = 0; + o->next_state = ECORE_F_STATE_MAX; + } + + /* Don't allow a next state transition if we are in the middle of + * the previous one. + */ + if (o->pending) + return ECORE_BUSY; + + switch (state) { + case ECORE_F_STATE_RESET: + if (cmd == ECORE_F_CMD_HW_INIT) + next_state = ECORE_F_STATE_INITIALIZED; + + break; + case ECORE_F_STATE_INITIALIZED: + if (cmd == ECORE_F_CMD_START) + next_state = ECORE_F_STATE_STARTED; + + else if (cmd == ECORE_F_CMD_HW_RESET) + next_state = ECORE_F_STATE_RESET; + + break; + case ECORE_F_STATE_STARTED: + if (cmd == ECORE_F_CMD_STOP) + next_state = ECORE_F_STATE_INITIALIZED; + /* afex ramrods can be sent only in started mode, and only + * if not pending for function_stop ramrod completion + * for these events - next state remained STARTED. + */ + else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) && + (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) + next_state = ECORE_F_STATE_STARTED; + + else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) && + (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) + next_state = ECORE_F_STATE_STARTED; + + /* Switch_update ramrod can be sent in either started or + * tx_stopped state, and it doesn't change the state. + */ + else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) && + (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) + next_state = ECORE_F_STATE_STARTED; + + else if (cmd == ECORE_F_CMD_TX_STOP) + next_state = ECORE_F_STATE_TX_STOPPED; + + break; + case ECORE_F_STATE_TX_STOPPED: + if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) && + (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending))) + next_state = ECORE_F_STATE_TX_STOPPED; + + else if (cmd == ECORE_F_CMD_TX_START) + next_state = ECORE_F_STATE_STARTED; + + break; + default: + PMD_DRV_LOG(ERR, "Unknown state: %d", state); + } + + /* Transition is assured */ + if (next_state != ECORE_F_STATE_MAX) { + ECORE_MSG("Good function state transition: %d(%d)->%d", + state, cmd, next_state); + o->next_state = next_state; + return ECORE_SUCCESS; + } + + ECORE_MSG("Bad function state transition request: %d %d", state, cmd); + + return ECORE_INVAL; +} + +/** + * ecore_func_init_func - performs HW init at function stage + * + * @sc: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only + * HW blocks. + */ +static int ecore_func_init_func(struct bnx2x_softc *sc, + const struct ecore_func_sp_drv_ops *drv) +{ + return drv->init_hw_func(sc); +} + +/** + * ecore_func_init_port - performs HW init at port stage + * + * @sc: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and + * FUNCTION-only HW blocks. + * + */ +static int ecore_func_init_port(struct bnx2x_softc *sc, + const struct ecore_func_sp_drv_ops *drv) +{ + int rc = drv->init_hw_port(sc); + if (rc) + return rc; + + return ecore_func_init_func(sc, drv); +} + +/** + * ecore_func_init_cmn_chip - performs HW init at chip-common stage + * + * @sc: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, + * PORT-only and FUNCTION-only HW blocks. + */ +static int ecore_func_init_cmn_chip(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops + *drv) +{ + int rc = drv->init_hw_cmn_chip(sc); + if (rc) + return rc; + + return ecore_func_init_port(sc, drv); +} + +/** + * ecore_func_init_cmn - performs HW init at common stage + * + * @sc: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, + * PORT-only and FUNCTION-only HW blocks. + */ +static int ecore_func_init_cmn(struct bnx2x_softc *sc, + const struct ecore_func_sp_drv_ops *drv) +{ + int rc = drv->init_hw_cmn(sc); + if (rc) + return rc; + + return ecore_func_init_port(sc, drv); +} + +static int ecore_func_hw_init(struct bnx2x_softc *sc, + struct ecore_func_state_params *params) +{ + uint32_t load_code = params->params.hw_init.load_phase; + struct ecore_func_sp_obj *o = params->f_obj; + const struct ecore_func_sp_drv_ops *drv = o->drv; + int rc = 0; + + ECORE_MSG("function %d load_code %x", + ECORE_ABS_FUNC_ID(sc), load_code); + + /* Prepare FW */ + rc = drv->init_fw(sc); + if (rc) { + PMD_DRV_LOG(ERR, "Error loading firmware"); + goto init_err; + } + + /* Handle the beginning of COMMON_XXX pases separately... */ + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: + rc = ecore_func_init_cmn_chip(sc, drv); + if (rc) + goto init_err; + + break; + case FW_MSG_CODE_DRV_LOAD_COMMON: + rc = ecore_func_init_cmn(sc, drv); + if (rc) + goto init_err; + + break; + case FW_MSG_CODE_DRV_LOAD_PORT: + rc = ecore_func_init_port(sc, drv); + if (rc) + goto init_err; + + break; + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + rc = ecore_func_init_func(sc, drv); + if (rc) + goto init_err; + + break; + default: + PMD_DRV_LOG(ERR, "Unknown load_code (0x%x) from MCP", + load_code); + rc = ECORE_INVAL; + } + +init_err: + /* In case of success, complete the command immediately: no ramrods + * have been sent. + */ + if (!rc) + o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT); + + return rc; +} + +/** + * ecore_func_reset_func - reset HW at function stage + * + * @sc: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only + * FUNCTION-only HW blocks. + */ +static void ecore_func_reset_func(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops + *drv) +{ + drv->reset_hw_func(sc); +} + +/** + * ecore_func_reset_port - reser HW at port stage + * + * @sc: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset + * FUNCTION-only and PORT-only HW blocks. + * + * !!!IMPORTANT!!! + * + * It's important to call reset_port before reset_func() as the last thing + * reset_func does is pf_disable() thus disabling PGLUE_B, which + * makes impossible any DMAE transactions. + */ +static void ecore_func_reset_port(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops + *drv) +{ + drv->reset_hw_port(sc); + ecore_func_reset_func(sc, drv); +} + +/** + * ecore_func_reset_cmn - reser HW at common stage + * + * @sc: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and + * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, + * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. + */ +static void ecore_func_reset_cmn(struct bnx2x_softc *sc, + const struct ecore_func_sp_drv_ops *drv) +{ + ecore_func_reset_port(sc, drv); + drv->reset_hw_cmn(sc); +} + +static int ecore_func_hw_reset(struct bnx2x_softc *sc, + struct ecore_func_state_params *params) +{ + uint32_t reset_phase = params->params.hw_reset.reset_phase; + struct ecore_func_sp_obj *o = params->f_obj; + const struct ecore_func_sp_drv_ops *drv = o->drv; + + ECORE_MSG("function %d reset_phase %x", ECORE_ABS_FUNC_ID(sc), + reset_phase); + + switch (reset_phase) { + case FW_MSG_CODE_DRV_UNLOAD_COMMON: + ecore_func_reset_cmn(sc, drv); + break; + case FW_MSG_CODE_DRV_UNLOAD_PORT: + ecore_func_reset_port(sc, drv); + break; + case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: + ecore_func_reset_func(sc, drv); + break; + default: + PMD_DRV_LOG(ERR, "Unknown reset_phase (0x%x) from MCP", + reset_phase); + break; + } + + /* Complete the command immediately: no ramrods have been sent. */ + o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET); + + return ECORE_SUCCESS; +} + +static int ecore_func_send_start(struct bnx2x_softc *sc, + struct ecore_func_state_params *params) +{ + struct ecore_func_sp_obj *o = params->f_obj; + struct function_start_data *rdata = + (struct function_start_data *)o->rdata; + ecore_dma_addr_t data_mapping = o->rdata_mapping; + struct ecore_func_start_params *start_params = ¶ms->params.start; + + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->function_mode = (uint8_t) start_params->mf_mode; + rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag); + rdata->path_id = ECORE_PATH_ID(sc); + rdata->network_cos_mode = start_params->network_cos_mode; + rdata->gre_tunnel_mode = start_params->gre_tunnel_mode; + rdata->gre_tunnel_rss = start_params->gre_tunnel_rss; + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, + data_mapping, NONE_CONNECTION_TYPE); +} + +static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_func_state_params + *params) +{ + struct ecore_func_sp_obj *o = params->f_obj; + struct function_update_data *rdata = + (struct function_update_data *)o->rdata; + ecore_dma_addr_t data_mapping = o->rdata_mapping; + struct ecore_func_switch_update_params *switch_update_params = + ¶ms->params.switch_update; + + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->tx_switch_suspend_change_flg = 1; + rdata->tx_switch_suspend = switch_update_params->suspend; + rdata->echo = SWITCH_UPDATE; + + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, + data_mapping, NONE_CONNECTION_TYPE); +} + +static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func_state_params + *params) +{ + struct ecore_func_sp_obj *o = params->f_obj; + struct function_update_data *rdata = + (struct function_update_data *)o->afex_rdata; + ecore_dma_addr_t data_mapping = o->afex_rdata_mapping; + struct ecore_func_afex_update_params *afex_update_params = + ¶ms->params.afex_update; + + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->vif_id_change_flg = 1; + rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id); + rdata->afex_default_vlan_change_flg = 1; + rdata->afex_default_vlan = + ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan); + rdata->allowed_priorities_change_flg = 1; + rdata->allowed_priorities = afex_update_params->allowed_priorities; + rdata->echo = AFEX_UPDATE; + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + ECORE_MSG("afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x", + rdata->vif_id, + rdata->afex_default_vlan, rdata->allowed_priorities); + + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, + data_mapping, NONE_CONNECTION_TYPE); +} + +static +inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc, + struct ecore_func_state_params *params) +{ + struct ecore_func_sp_obj *o = params->f_obj; + struct afex_vif_list_ramrod_data *rdata = + (struct afex_vif_list_ramrod_data *)o->afex_rdata; + struct ecore_func_afex_viflists_params *afex_vif_params = + ¶ms->params.afex_viflists; + uint64_t *p_rdata = (uint64_t *) rdata; + + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->vif_list_index = + ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index); + rdata->func_bit_map = afex_vif_params->func_bit_map; + rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command; + rdata->func_to_clear = afex_vif_params->func_to_clear; + + /* send in echo type of sub command */ + rdata->echo = afex_vif_params->afex_vif_list_command; + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside ecore_sp_post()). + */ + + ECORE_MSG + ("afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x", + rdata->afex_vif_list_command, rdata->vif_list_index, + rdata->func_bit_map, rdata->func_to_clear); + + /* this ramrod sends data directly and not through DMA mapping */ + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0, + *p_rdata, NONE_CONNECTION_TYPE); +} + +static int ecore_func_send_stop(struct bnx2x_softc *sc, __rte_unused struct + ecore_func_state_params *params) +{ + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, + NONE_CONNECTION_TYPE); +} + +static int ecore_func_send_tx_stop(struct bnx2x_softc *sc, __rte_unused struct + ecore_func_state_params *params) +{ + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, + NONE_CONNECTION_TYPE); +} + +static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_state_params + *params) +{ + struct ecore_func_sp_obj *o = params->f_obj; + struct flow_control_configuration *rdata = + (struct flow_control_configuration *)o->rdata; + ecore_dma_addr_t data_mapping = o->rdata_mapping; + struct ecore_func_tx_start_params *tx_start_params = + ¶ms->params.tx_start; + uint32_t i; + + ECORE_MEMSET(rdata, 0, sizeof(*rdata)); + + rdata->dcb_enabled = tx_start_params->dcb_enabled; + rdata->dcb_version = tx_start_params->dcb_version; + rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0; + + for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) + rdata->traffic_type_to_priority_cos[i] = + tx_start_params->traffic_type_to_priority_cos[i]; + + return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, + data_mapping, NONE_CONNECTION_TYPE); +} + +static int ecore_func_send_cmd(struct bnx2x_softc *sc, + struct ecore_func_state_params *params) +{ + switch (params->cmd) { + case ECORE_F_CMD_HW_INIT: + return ecore_func_hw_init(sc, params); + case ECORE_F_CMD_START: + return ecore_func_send_start(sc, params); + case ECORE_F_CMD_STOP: + return ecore_func_send_stop(sc, params); + case ECORE_F_CMD_HW_RESET: + return ecore_func_hw_reset(sc, params); + case ECORE_F_CMD_AFEX_UPDATE: + return ecore_func_send_afex_update(sc, params); + case ECORE_F_CMD_AFEX_VIFLISTS: + return ecore_func_send_afex_viflists(sc, params); + case ECORE_F_CMD_TX_STOP: + return ecore_func_send_tx_stop(sc, params); + case ECORE_F_CMD_TX_START: + return ecore_func_send_tx_start(sc, params); + case ECORE_F_CMD_SWITCH_UPDATE: + return ecore_func_send_switch_update(sc, params); + default: + PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd); + return ECORE_INVAL; + } +} + +void ecore_init_func_obj(__rte_unused struct bnx2x_softc *sc, + struct ecore_func_sp_obj *obj, + void *rdata, ecore_dma_addr_t rdata_mapping, + void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping, + struct ecore_func_sp_drv_ops *drv_iface) +{ + ECORE_MEMSET(obj, 0, sizeof(*obj)); + + ECORE_MUTEX_INIT(&obj->one_pending_mutex); + + obj->rdata = rdata; + obj->rdata_mapping = rdata_mapping; + obj->afex_rdata = afex_rdata; + obj->afex_rdata_mapping = afex_rdata_mapping; + obj->send_cmd = ecore_func_send_cmd; + obj->check_transition = ecore_func_chk_transition; + obj->complete_cmd = ecore_func_comp_cmd; + obj->wait_comp = ecore_func_wait_comp; + obj->drv = drv_iface; +} + +/** + * ecore_func_state_change - perform Function state change transition + * + * @sc: device handle + * @params: parameters to perform the transaction + * + * returns 0 in case of successfully completed transition, + * negative error code in case of failure, positive + * (EBUSY) value if there is a completion to that is + * still pending (possible only if RAMROD_COMP_WAIT is + * not set in params->ramrod_flags for asynchronous + * commands). + */ +int ecore_func_state_change(struct bnx2x_softc *sc, + struct ecore_func_state_params *params) +{ + struct ecore_func_sp_obj *o = params->f_obj; + int rc, cnt = 300; + enum ecore_func_cmd cmd = params->cmd; + unsigned long *pending = &o->pending; + + ECORE_MUTEX_LOCK(&o->one_pending_mutex); + + /* Check that the requested transition is legal */ + rc = o->check_transition(sc, o, params); + if ((rc == ECORE_BUSY) && + (ECORE_TEST_BIT(RAMROD_RETRY, ¶ms->ramrod_flags))) { + while ((rc == ECORE_BUSY) && (--cnt > 0)) { + ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); + ECORE_MSLEEP(10); + ECORE_MUTEX_LOCK(&o->one_pending_mutex); + rc = o->check_transition(sc, o, params); + } + if (rc == ECORE_BUSY) { + ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); + PMD_DRV_LOG(ERR, + "timeout waiting for previous ramrod completion"); + return rc; + } + } else if (rc) { + ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); + return rc; + } + + /* Set "pending" bit */ + ECORE_SET_BIT(cmd, pending); + + /* Don't send a command if only driver cleanup was requested */ + if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + ecore_func_state_change_comp(sc, o, cmd); + ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); + } else { + /* Send a ramrod */ + rc = o->send_cmd(sc, params); + + ECORE_MUTEX_UNLOCK(&o->one_pending_mutex); + + if (rc) { + o->next_state = ECORE_F_STATE_MAX; + ECORE_CLEAR_BIT(cmd, pending); + ECORE_SMP_MB_AFTER_CLEAR_BIT(); + return rc; + } + + if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { + rc = o->wait_comp(sc, o, cmd); + if (rc) + return rc; + + return ECORE_SUCCESS; + } + } + + return ECORE_RET_PENDING(cmd, pending); +} + +/****************************************************************************** + * Description: + * Calculates crc 8 on a word value: polynomial 0-1-2-8 + * Code was translated from Verilog. + * Return: + *****************************************************************************/ +uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc) +{ + uint8_t D[32]; + uint8_t NewCRC[8]; + uint8_t C[8]; + uint8_t crc_res; + uint8_t i; + + /* split the data into 31 bits */ + for (i = 0; i < 32; i++) { + D[i] = (uint8_t) (data & 1); + data = data >> 1; + } + + /* split the crc into 8 bits */ + for (i = 0; i < 8; i++) { + C[i] = crc & 1; + crc = crc >> 1; + } + + NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^ + D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^ + C[6] ^ C[7]; + NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^ + D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^ + D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6]; + NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^ + D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^ + C[0] ^ C[1] ^ C[4] ^ C[5]; + NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^ + D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^ + C[1] ^ C[2] ^ C[5] ^ C[6]; + NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^ + D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^ + C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7]; + NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^ + D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^ + C[3] ^ C[4] ^ C[7]; + NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^ + D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ C[5]; + NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^ + D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ C[6]; + + crc_res = 0; + for (i = 0; i < 8; i++) { + crc_res |= (NewCRC[i] << i); + } + + return crc_res; +} + +uint32_t +ecore_calc_crc32(uint32_t crc, uint8_t const *p, uint32_t len, uint32_t magic) +{ + int i; + while (len--) { + crc ^= *p++; + for (i = 0; i < 8; i++) + crc = (crc >> 1) ^ ((crc & 1) ? magic : 0); + } + return crc; +} diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h new file mode 100644 index 00000000..9c1f55df --- /dev/null +++ b/drivers/net/bnx2x/ecore_sp.h @@ -0,0 +1,1768 @@ +/*- + * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#ifndef ECORE_SP_H +#define ECORE_SP_H + +#include <rte_byteorder.h> + +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN RTE_LITTLE_ENDIAN +#endif +#undef __BIG_ENDIAN +#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN +#ifndef __BIG_ENDIAN +#define __BIG_ENDIAN RTE_BIG_ENDIAN +#endif +#undef __LITTLE_ENDIAN +#endif + +#include "ecore_mfw_req.h" +#include "ecore_fw_defs.h" +#include "ecore_hsi.h" +#include "ecore_reg.h" + +struct bnx2x_softc; +typedef phys_addr_t ecore_dma_addr_t; /* expected to be 64 bit wide */ +typedef volatile int ecore_atomic_t; + + +#define ETH_ALEN ETHER_ADDR_LEN /* 6 */ + +#define ECORE_SWCID_SHIFT 17 +#define ECORE_SWCID_MASK ((0x1 << ECORE_SWCID_SHIFT) - 1) + +#define ECORE_MC_HASH_SIZE 8 +#define ECORE_MC_HASH_OFFSET(sc, i) \ + (BAR_TSTRORM_INTMEM + \ + TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(FUNC_ID(sc)) + i*4) + +#define ECORE_MAX_MULTICAST 64 +#define ECORE_MAX_EMUL_MULTI 1 + +#define IRO sc->iro_array + +typedef rte_spinlock_t ECORE_MUTEX; +#define ECORE_MUTEX_INIT(_mutex) rte_spinlock_init(_mutex) +#define ECORE_MUTEX_LOCK(_mutex) rte_spinlock_lock(_mutex) +#define ECORE_MUTEX_UNLOCK(_mutex) rte_spinlock_unlock(_mutex) + +typedef rte_spinlock_t ECORE_MUTEX_SPIN; +#define ECORE_SPIN_LOCK_INIT(_spin, _sc) rte_spinlock_init(_spin) +#define ECORE_SPIN_LOCK_BH(_spin) rte_spinlock_lock(_spin) /* bh = bottom-half */ +#define ECORE_SPIN_UNLOCK_BH(_spin) rte_spinlock_unlock(_spin) /* bh = bottom-half */ + +#define ECORE_SMP_MB_AFTER_CLEAR_BIT() mb() +#define ECORE_SMP_MB_BEFORE_CLEAR_BIT() mb() +#define ECORE_SMP_MB() mb() +#define ECORE_SMP_RMB() rmb() +#define ECORE_SMP_WMB() wmb() +#define ECORE_MMIOWB() wmb() + +#define ECORE_SET_BIT_NA(bit, var) (*var |= (1 << bit)) +#define ECORE_CLEAR_BIT_NA(bit, var) (*var &= ~(1 << bit)) + +#define ECORE_TEST_BIT(bit, var) bnx2x_test_bit(bit, var) +#define ECORE_SET_BIT(bit, var) bnx2x_set_bit(bit, var) +#define ECORE_CLEAR_BIT(bit, var) bnx2x_clear_bit(bit, var) +#define ECORE_TEST_AND_CLEAR_BIT(bit, var) bnx2x_test_and_clear_bit(bit, var) + +#define atomic_load_acq_int (int)* +#define atomic_store_rel_int(a, v) (*a = v) +#define atomic_cmpset_acq_int(a, o, n) ((*a = (o & (n)) | (n)) ^ o) + +#define atomic_load_acq_long (long)* +#define atomic_store_rel_long(a, v) (*a = v) +#define atomic_set_acq_long(a, v) (*a |= v) +#define atomic_clear_acq_long(a, v) (*a &= ~v) +#define atomic_cmpset_acq_long(a, o, n) ((*a = (o & (n)) | (n)) ^ o) +#define atomic_subtract_acq_long(a, v) (*a -= v) +#define atomic_add_acq_long(a, v) (*a += v) + +#define ECORE_ATOMIC_READ(a) atomic_load_acq_int((volatile int *)a) +#define ECORE_ATOMIC_SET(a, v) atomic_store_rel_int((volatile int *)a, v) +#define ECORE_ATOMIC_CMPXCHG(a, o, n) bnx2x_cmpxchg((volatile int *)a, o, n) + +#define ECORE_RET_PENDING(pending_bit, pending) \ + (ECORE_TEST_BIT(pending_bit, pending) ? ECORE_PENDING : ECORE_SUCCESS) + +#define ECORE_SET_FLAG(value, mask, flag) \ + do { \ + (value) &= ~(mask); \ + (value) |= ((flag) << (mask##_SHIFT)); \ + } while (0) + +#define ECORE_GET_FLAG(value, mask) \ + (((value) &= (mask)) >> (mask##_SHIFT)) + +#define ECORE_MIGHT_SLEEP() + +#define ECORE_FCOE_CID(sc) ((sc)->fp[FCOE_IDX(sc)].cl_id) + +#define ECORE_MEMCMP(_a, _b, _s) memcmp(_a, _b, _s) +#define ECORE_MEMCPY(_a, _b, _s) (void)rte_memcpy(_a, _b, _s) +#define ECORE_MEMSET(_a, _c, _s) memset(_a, _c, _s) + +#define ECORE_CPU_TO_LE16(x) htole16(x) +#define ECORE_CPU_TO_LE32(x) htole32(x) + +#define ECORE_WAIT(_s, _t) DELAY(1000) +#define ECORE_MSLEEP(_t) DELAY((_t) * 1000) + +#define ECORE_LIKELY(x) likely(x) +#define ECORE_UNLIKELY(x) unlikely(x) + +#define ECORE_ZALLOC(_size, _flags, _sc) \ + rte_zmalloc("", _size, RTE_CACHE_LINE_SIZE) + +#define ECORE_CALLOC(_len, _size, _flags, _sc) \ + rte_calloc("", _len, _size, RTE_CACHE_LINE_SIZE) + +#define ECORE_FREE(_s, _buf, _size) \ + rte_free(_buf) + +#define SC_ILT(sc) ((sc)->ilt) +#define ILOG2(x) bnx2x_ilog2(x) + +#define ECORE_ILT_ZALLOC(x, y, size, str) \ + do { \ + x = rte_malloc("", sizeof(struct bnx2x_dma), RTE_CACHE_LINE_SIZE); \ + if (x) { \ + if (bnx2x_dma_alloc((struct bnx2x_softc *)sc, \ + size, (struct bnx2x_dma *)x, \ + str, RTE_CACHE_LINE_SIZE) != 0) { \ + rte_free(x); \ + x = NULL; \ + *y = 0; \ + } else { \ + *y = ((struct bnx2x_dma *)x)->paddr; \ + } \ + } \ + } while (0) + +#define ECORE_ILT_FREE(x, y, size) \ + do { \ + if (x) { \ + rte_free(x); \ + x = NULL; \ + y = 0; \ + } \ + } while (0) + +#define ECORE_IS_VALID_ETHER_ADDR(_mac) TRUE + +#define ECORE_IS_MF_SD_MODE IS_MF_SD_MODE +#define ECORE_IS_MF_SI_MODE IS_MF_SI_MODE +#define ECORE_IS_MF_AFEX_MODE IS_MF_AFEX_MODE + +#define ECORE_SET_CTX_VALIDATION bnx2x_set_ctx_validation + +#define ECORE_UPDATE_COALESCE_SB_INDEX bnx2x_update_coalesce_sb_index + +#define ECORE_ALIGN(x, a) ((((x) + (a) - 1) / (a)) * (a)) + +#define ECORE_REG_WR_DMAE_LEN REG_WR_DMAE_LEN + +#define ECORE_PATH_ID SC_PATH +#define ECORE_PORT_ID SC_PORT +#define ECORE_FUNC_ID SC_FUNC +#define ECORE_ABS_FUNC_ID SC_ABS_FUNC + +#define CRCPOLY_LE 0xedb88320 +uint32_t ecore_calc_crc32(uint32_t crc, uint8_t const *p, + uint32_t len, uint32_t magic); + +uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc); + + +static inline uint32_t +ECORE_CRC32_LE(uint32_t seed, uint8_t *mac, uint32_t len) +{ + return ecore_calc_crc32(seed, mac, len, CRCPOLY_LE); +} + +#define ecore_sp_post(_sc, _a, _b, _c, _d) \ + bnx2x_sp_post(_sc, _a, _b, U64_HI(_c), U64_LO(_c), _d) + +#define ECORE_DBG_BREAK_IF(exp) \ + do { \ + if (unlikely(exp)) { \ + rte_panic("ECORE"); \ + } \ + } while (0) + +#define ECORE_BUG() \ + do { \ + rte_panic("BUG (%s:%d)", __FILE__, __LINE__); \ + } while(0); + +#define ECORE_BUG_ON(exp) \ + do { \ + if (likely(exp)) { \ + rte_panic("BUG_ON (%s:%d)", __FILE__, __LINE__); \ + } \ + } while (0) + + +#define ECORE_MSG(m, ...) \ + PMD_DRV_LOG(DEBUG, m, ##__VA_ARGS__) + +typedef struct _ecore_list_entry_t +{ + struct _ecore_list_entry_t *next, *prev; +} ecore_list_entry_t; + +typedef struct ecore_list_t +{ + ecore_list_entry_t *head, *tail; + unsigned long cnt; +} ecore_list_t; + +/* initialize the list */ +#define ECORE_LIST_INIT(_list) \ + do { \ + (_list)->head = NULL; \ + (_list)->tail = NULL; \ + (_list)->cnt = 0; \ + } while (0) + +/* return TRUE if the element is the last on the list */ +#define ECORE_LIST_IS_LAST(_elem, _list) \ + (_elem == (_list)->tail) + +/* return TRUE if the list is empty */ +#define ECORE_LIST_IS_EMPTY(_list) \ + ((_list)->cnt == 0) + +/* return the first element */ +#define ECORE_LIST_FIRST_ENTRY(_list, cast, _link) \ + (cast *)((_list)->head) + +/* return the next element */ +#define ECORE_LIST_NEXT(_elem, _link, cast) \ + (cast *)((&((_elem)->_link))->next) + +/* push an element on the head of the list */ +#define ECORE_LIST_PUSH_HEAD(_elem, _list) \ + do { \ + (_elem)->prev = (ecore_list_entry_t *)0; \ + (_elem)->next = (_list)->head; \ + if ((_list)->tail == (ecore_list_entry_t *)0) { \ + (_list)->tail = (_elem); \ + } else { \ + (_list)->head->prev = (_elem); \ + } \ + (_list)->head = (_elem); \ + (_list)->cnt++; \ + } while (0) + +/* push an element on the tail of the list */ +#define ECORE_LIST_PUSH_TAIL(_elem, _list) \ + do { \ + (_elem)->next = (ecore_list_entry_t *)0; \ + (_elem)->prev = (_list)->tail; \ + if ((_list)->tail) { \ + (_list)->tail->next = (_elem); \ + } else { \ + (_list)->head = (_elem); \ + } \ + (_list)->tail = (_elem); \ + (_list)->cnt++; \ + } while (0) + +/* push list1 on the head of list2 and return with list1 as empty */ +#define ECORE_LIST_SPLICE_INIT(_list1, _list2) \ + do { \ + (_list1)->tail->next = (_list2)->head; \ + if ((_list2)->head) { \ + (_list2)->head->prev = (_list1)->tail; \ + } else { \ + (_list2)->tail = (_list1)->tail; \ + } \ + (_list2)->head = (_list1)->head; \ + (_list2)->cnt += (_list1)->cnt; \ + (_list1)->head = NULL; \ + (_list1)->tail = NULL; \ + (_list1)->cnt = 0; \ + } while (0) + +/* remove an element from the list */ +#define ECORE_LIST_REMOVE_ENTRY(_elem, _list) \ + do { \ + if ((_list)->head == (_elem)) { \ + if ((_list)->head) { \ + (_list)->head = (_list)->head->next; \ + if ((_list)->head) { \ + (_list)->head->prev = (ecore_list_entry_t *)0; \ + } else { \ + (_list)->tail = (ecore_list_entry_t *)0; \ + } \ + (_list)->cnt--; \ + } \ + } else if ((_list)->tail == (_elem)) { \ + if ((_list)->tail) { \ + (_list)->tail = (_list)->tail->prev; \ + if ((_list)->tail) { \ + (_list)->tail->next = (ecore_list_entry_t *)0; \ + } else { \ + (_list)->head = (ecore_list_entry_t *)0; \ + } \ + (_list)->cnt--; \ + } \ + } else { \ + (_elem)->prev->next = (_elem)->next; \ + (_elem)->next->prev = (_elem)->prev; \ + (_list)->cnt--; \ + } \ + } while (0) + +/* walk the list */ +#define ECORE_LIST_FOR_EACH_ENTRY(pos, _list, _link, cast) \ + for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _link); \ + pos; \ + pos = ECORE_LIST_NEXT(pos, _link, cast)) + +/* walk the list (safely) */ +#define ECORE_LIST_FOR_EACH_ENTRY_SAFE(pos, n, _list, _link, cast) \ + for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _lint), \ + n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL; \ + pos != NULL; \ + pos = (cast *)n, \ + n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL) + + +/* Manipulate a bit vector defined as an array of uint64_t */ + +/* Number of bits in one sge_mask array element */ +#define BIT_VEC64_ELEM_SZ 64 +#define BIT_VEC64_ELEM_SHIFT 6 +#define BIT_VEC64_ELEM_MASK ((uint64_t)BIT_VEC64_ELEM_SZ - 1) + +#define __BIT_VEC64_SET_BIT(el, bit) \ + do { \ + el = ((el) | ((uint64_t)0x1 << (bit))); \ + } while (0) + +#define __BIT_VEC64_CLEAR_BIT(el, bit) \ + do { \ + el = ((el) & (~((uint64_t)0x1 << (bit)))); \ + } while (0) + +#define BIT_VEC64_SET_BIT(vec64, idx) \ + __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + +#define BIT_VEC64_CLEAR_BIT(vec64, idx) \ + __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + +#define BIT_VEC64_TEST_BIT(vec64, idx) \ + (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \ + ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1) + +/* + * Creates a bitmask of all ones in less significant bits. + * idx - index of the most significant bit in the created mask + */ +#define BIT_VEC64_ONES_MASK(idx) \ + (((uint64_t)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) +#define BIT_VEC64_ELEM_ONE_MASK ((uint64_t)(~0)) + +/* fill in a MAC address the way the FW likes it */ +static inline void +ecore_set_fw_mac_addr(uint16_t *fw_hi, + uint16_t *fw_mid, + uint16_t *fw_lo, + uint8_t *mac) +{ + ((uint8_t *)fw_hi)[0] = mac[1]; + ((uint8_t *)fw_hi)[1] = mac[0]; + ((uint8_t *)fw_mid)[0] = mac[3]; + ((uint8_t *)fw_mid)[1] = mac[2]; + ((uint8_t *)fw_lo)[0] = mac[5]; + ((uint8_t *)fw_lo)[1] = mac[4]; +} + + +enum ecore_status_t { + ECORE_EXISTS = -6, + ECORE_IO = -5, + ECORE_TIMEOUT = -4, + ECORE_INVAL = -3, + ECORE_BUSY = -2, + ECORE_NOMEM = -1, + ECORE_SUCCESS = 0, + /* PENDING is not an error and should be positive */ + ECORE_PENDING = 1, +}; + +enum { + SWITCH_UPDATE, + AFEX_UPDATE, +}; + + + + +struct bnx2x_softc; +struct eth_context; + +/* Bits representing general command's configuration */ +enum { + RAMROD_TX, + RAMROD_RX, + /* Wait until all pending commands complete */ + RAMROD_COMP_WAIT, + /* Don't send a ramrod, only update a registry */ + RAMROD_DRV_CLR_ONLY, + /* Configure HW according to the current object state */ + RAMROD_RESTORE, + /* Execute the next command now */ + RAMROD_EXEC, + /* Don't add a new command and continue execution of posponed + * commands. If not set a new command will be added to the + * pending commands list. + */ + RAMROD_CONT, + /* If there is another pending ramrod, wait until it finishes and + * re-try to submit this one. This flag can be set only in sleepable + * context, and should not be set from the context that completes the + * ramrods as deadlock will occur. + */ + RAMROD_RETRY, +}; + +typedef enum { + ECORE_OBJ_TYPE_RX, + ECORE_OBJ_TYPE_TX, + ECORE_OBJ_TYPE_RX_TX, +} ecore_obj_type; + +/* Public slow path states */ +enum { + ECORE_FILTER_MAC_PENDING, + ECORE_FILTER_VLAN_PENDING, + ECORE_FILTER_VLAN_MAC_PENDING, + ECORE_FILTER_RX_MODE_PENDING, + ECORE_FILTER_RX_MODE_SCHED, + ECORE_FILTER_ISCSI_ETH_START_SCHED, + ECORE_FILTER_ISCSI_ETH_STOP_SCHED, + ECORE_FILTER_FCOE_ETH_START_SCHED, + ECORE_FILTER_FCOE_ETH_STOP_SCHED, + ECORE_FILTER_MCAST_PENDING, + ECORE_FILTER_MCAST_SCHED, + ECORE_FILTER_RSS_CONF_PENDING, + ECORE_AFEX_FCOE_Q_UPDATE_PENDING, + ECORE_AFEX_PENDING_VIFSET_MCP_ACK +}; + +struct ecore_raw_obj { + uint8_t func_id; + + /* Queue params */ + uint8_t cl_id; + uint32_t cid; + + /* Ramrod data buffer params */ + void *rdata; + ecore_dma_addr_t rdata_mapping; + + /* Ramrod state params */ + int state; /* "ramrod is pending" state bit */ + unsigned long *pstate; /* pointer to state buffer */ + + ecore_obj_type obj_type; + + int (*wait_comp)(struct bnx2x_softc *sc, + struct ecore_raw_obj *o); + + int (*check_pending)(struct ecore_raw_obj *o); + void (*clear_pending)(struct ecore_raw_obj *o); + void (*set_pending)(struct ecore_raw_obj *o); +}; + +/************************* VLAN-MAC commands related parameters ***************/ +struct ecore_mac_ramrod_data { + uint8_t mac[ETH_ALEN]; + uint8_t is_inner_mac; +}; + +struct ecore_vlan_ramrod_data { + uint16_t vlan; +}; + +struct ecore_vlan_mac_ramrod_data { + uint8_t mac[ETH_ALEN]; + uint8_t is_inner_mac; + uint16_t vlan; +}; + +union ecore_classification_ramrod_data { + struct ecore_mac_ramrod_data mac; + struct ecore_vlan_ramrod_data vlan; + struct ecore_vlan_mac_ramrod_data vlan_mac; +}; + +/* VLAN_MAC commands */ +enum ecore_vlan_mac_cmd { + ECORE_VLAN_MAC_ADD, + ECORE_VLAN_MAC_DEL, + ECORE_VLAN_MAC_MOVE, +}; + +struct ecore_vlan_mac_data { + /* Requested command: ECORE_VLAN_MAC_XX */ + enum ecore_vlan_mac_cmd cmd; + /* used to contain the data related vlan_mac_flags bits from + * ramrod parameters. + */ + unsigned long vlan_mac_flags; + + /* Needed for MOVE command */ + struct ecore_vlan_mac_obj *target_obj; + + union ecore_classification_ramrod_data u; +}; + +/*************************** Exe Queue obj ************************************/ +union ecore_exe_queue_cmd_data { + struct ecore_vlan_mac_data vlan_mac; + + struct { + } mcast; +}; + +struct ecore_exeq_elem { + ecore_list_entry_t link; + + /* Length of this element in the exe_chunk. */ + int cmd_len; + + union ecore_exe_queue_cmd_data cmd_data; +}; + +union ecore_qable_obj; + +union ecore_exeq_comp_elem { + union event_ring_elem *elem; +}; + +struct ecore_exe_queue_obj; + +typedef int (*exe_q_validate)(struct bnx2x_softc *sc, + union ecore_qable_obj *o, + struct ecore_exeq_elem *elem); + +typedef int (*exe_q_remove)(struct bnx2x_softc *sc, + union ecore_qable_obj *o, + struct ecore_exeq_elem *elem); + +/* Return positive if entry was optimized, 0 - if not, negative + * in case of an error. + */ +typedef int (*exe_q_optimize)(struct bnx2x_softc *sc, + union ecore_qable_obj *o, + struct ecore_exeq_elem *elem); +typedef int (*exe_q_execute)(struct bnx2x_softc *sc, + union ecore_qable_obj *o, + ecore_list_t *exe_chunk, + unsigned long *ramrod_flags); +typedef struct ecore_exeq_elem * + (*exe_q_get)(struct ecore_exe_queue_obj *o, + struct ecore_exeq_elem *elem); + +struct ecore_exe_queue_obj { + /* Commands pending for an execution. */ + ecore_list_t exe_queue; + + /* Commands pending for an completion. */ + ecore_list_t pending_comp; + + ECORE_MUTEX_SPIN lock; + + /* Maximum length of commands' list for one execution */ + int exe_chunk_len; + + union ecore_qable_obj *owner; + + /****** Virtual functions ******/ + /** + * Called before commands execution for commands that are really + * going to be executed (after 'optimize'). + * + * Must run under exe_queue->lock + */ + exe_q_validate validate; + + /** + * Called before removing pending commands, cleaning allocated + * resources (e.g., credits from validate) + */ + exe_q_remove remove; + + /** + * This will try to cancel the current pending commands list + * considering the new command. + * + * Returns the number of optimized commands or a negative error code + * + * Must run under exe_queue->lock + */ + exe_q_optimize optimize; + + /** + * Run the next commands chunk (owner specific). + */ + exe_q_execute execute; + + /** + * Return the exe_queue element containing the specific command + * if any. Otherwise return NULL. + */ + exe_q_get get; +}; +/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ +/* + * Element in the VLAN_MAC registry list having all current configured + * rules. + */ +struct ecore_vlan_mac_registry_elem { + ecore_list_entry_t link; + + /* Used to store the cam offset used for the mac/vlan/vlan-mac. + * Relevant for 57711 only. VLANs and MACs share the + * same CAM for these chips. + */ + int cam_offset; + + /* Needed for DEL and RESTORE flows */ + unsigned long vlan_mac_flags; + + union ecore_classification_ramrod_data u; +}; + +/* Bits representing VLAN_MAC commands specific flags */ +enum { + ECORE_UC_LIST_MAC, + ECORE_ETH_MAC, + ECORE_ISCSI_ETH_MAC, + ECORE_NETQ_ETH_MAC, + ECORE_DONT_CONSUME_CAM_CREDIT, + ECORE_DONT_CONSUME_CAM_CREDIT_DEST, +}; + +struct ecore_vlan_mac_ramrod_params { + /* Object to run the command from */ + struct ecore_vlan_mac_obj *vlan_mac_obj; + + /* General command flags: COMP_WAIT, etc. */ + unsigned long ramrod_flags; + + /* Command specific configuration request */ + struct ecore_vlan_mac_data user_req; +}; + +struct ecore_vlan_mac_obj { + struct ecore_raw_obj raw; + + /* Bookkeeping list: will prevent the addition of already existing + * entries. + */ + ecore_list_t head; + /* Implement a simple reader/writer lock on the head list. + * all these fields should only be accessed under the exe_queue lock + */ + uint8_t head_reader; /* Num. of readers accessing head list */ + int head_exe_request; /* Pending execution request. */ + unsigned long saved_ramrod_flags; /* Ramrods of pending execution */ + + /* Execution queue interface instance */ + struct ecore_exe_queue_obj exe_queue; + + /* MACs credit pool */ + struct ecore_credit_pool_obj *macs_pool; + + /* VLANs credit pool */ + struct ecore_credit_pool_obj *vlans_pool; + + /* RAMROD command to be used */ + int ramrod_cmd; + + /* copy first n elements onto preallocated buffer + * + * @param n number of elements to get + * @param buf buffer preallocated by caller into which elements + * will be copied. Note elements are 4-byte aligned + * so buffer size must be able to accommodate the + * aligned elements. + * + * @return number of copied bytes + */ + + int (*get_n_elements)(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, int n, uint8_t *base, + uint8_t stride, uint8_t size); + + /** + * Checks if ADD-ramrod with the given params may be performed. + * + * @return zero if the element may be added + */ + + int (*check_add)(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + union ecore_classification_ramrod_data *data); + + /** + * Checks if DEL-ramrod with the given params may be performed. + * + * @return TRUE if the element may be deleted + */ + struct ecore_vlan_mac_registry_elem * + (*check_del)(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + union ecore_classification_ramrod_data *data); + + /** + * Checks if DEL-ramrod with the given params may be performed. + * + * @return TRUE if the element may be deleted + */ + int (*check_move)(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *src_o, + struct ecore_vlan_mac_obj *dst_o, + union ecore_classification_ramrod_data *data); + + /** + * Update the relevant credit object(s) (consume/return + * correspondingly). + */ + int (*get_credit)(struct ecore_vlan_mac_obj *o); + int (*put_credit)(struct ecore_vlan_mac_obj *o); + int (*get_cam_offset)(struct ecore_vlan_mac_obj *o, int *offset); + int (*put_cam_offset)(struct ecore_vlan_mac_obj *o, int offset); + + /** + * Configures one rule in the ramrod data buffer. + */ + void (*set_one_rule)(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + struct ecore_exeq_elem *elem, int rule_idx, + int cam_offset); + + /** + * Delete all configured elements having the given + * vlan_mac_flags specification. Assumes no pending for + * execution commands. Will schedule all all currently + * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags + * specification for deletion and will use the given + * ramrod_flags for the last DEL operation. + * + * @param sc + * @param o + * @param ramrod_flags RAMROD_XX flags + * + * @return 0 if the last operation has completed successfully + * and there are no more elements left, positive value + * if there are pending for completion commands, + * negative value in case of failure. + */ + int (*delete_all)(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o, + unsigned long *vlan_mac_flags, + unsigned long *ramrod_flags); + + /** + * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously + * configured elements list. + * + * @param sc + * @param p Command parameters (RAMROD_COMP_WAIT bit in + * ramrod_flags is only taken into an account) + * @param ppos a pointer to the cookie that should be given back in the + * next call to make function handle the next element. If + * *ppos is set to NULL it will restart the iterator. + * If returned *ppos == NULL this means that the last + * element has been handled. + * + * @return int + */ + int (*restore)(struct bnx2x_softc *sc, + struct ecore_vlan_mac_ramrod_params *p, + struct ecore_vlan_mac_registry_elem **ppos); + + /** + * Should be called on a completion arrival. + * + * @param sc + * @param o + * @param cqe Completion element we are handling + * @param ramrod_flags if RAMROD_CONT is set the next bulk of + * pending commands will be executed. + * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE + * may also be set if needed. + * + * @return 0 if there are neither pending nor waiting for + * completion commands. Positive value if there are + * pending for execution or for completion commands. + * Negative value in case of an error (including an + * error in the cqe). + */ + int (*complete)(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *o, + union event_ring_elem *cqe, + unsigned long *ramrod_flags); + + /** + * Wait for completion of all commands. Don't schedule new ones, + * just wait. It assumes that the completion code will schedule + * for new commands. + */ + int (*wait)(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *o); +}; + +enum { + ECORE_LLH_CAM_ISCSI_ETH_LINE = 0, + ECORE_LLH_CAM_ETH_LINE, + ECORE_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 +}; + +/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ + +/* RX_MODE ramrod special flags: set in rx_mode_flags field in + * a ecore_rx_mode_ramrod_params. + */ +enum { + ECORE_RX_MODE_FCOE_ETH, + ECORE_RX_MODE_ISCSI_ETH, +}; + +enum { + ECORE_ACCEPT_UNICAST, + ECORE_ACCEPT_MULTICAST, + ECORE_ACCEPT_ALL_UNICAST, + ECORE_ACCEPT_ALL_MULTICAST, + ECORE_ACCEPT_BROADCAST, + ECORE_ACCEPT_UNMATCHED, + ECORE_ACCEPT_ANY_VLAN +}; + +struct ecore_rx_mode_ramrod_params { + struct ecore_rx_mode_obj *rx_mode_obj; + unsigned long *pstate; + int state; + uint8_t cl_id; + uint32_t cid; + uint8_t func_id; + unsigned long ramrod_flags; + unsigned long rx_mode_flags; + + /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to + * a tstorm_eth_mac_filter_config (e1x). + */ + void *rdata; + ecore_dma_addr_t rdata_mapping; + + /* Rx mode settings */ + unsigned long rx_accept_flags; + + /* internal switching settings */ + unsigned long tx_accept_flags; +}; + +struct ecore_rx_mode_obj { + int (*config_rx_mode)(struct bnx2x_softc *sc, + struct ecore_rx_mode_ramrod_params *p); + + int (*wait_comp)(struct bnx2x_softc *sc, + struct ecore_rx_mode_ramrod_params *p); +}; + +/********************** Set multicast group ***********************************/ + +struct ecore_mcast_list_elem { + ecore_list_entry_t link; + uint8_t *mac; +}; + +union ecore_mcast_config_data { + uint8_t *mac; + uint8_t bin; /* used in a RESTORE flow */ +}; + +struct ecore_mcast_ramrod_params { + struct ecore_mcast_obj *mcast_obj; + + /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */ + unsigned long ramrod_flags; + + ecore_list_t mcast_list; /* list of struct ecore_mcast_list_elem */ + int mcast_list_len; +}; + +enum ecore_mcast_cmd { + ECORE_MCAST_CMD_ADD, + ECORE_MCAST_CMD_CONT, + ECORE_MCAST_CMD_DEL, + ECORE_MCAST_CMD_RESTORE, +}; + +struct ecore_mcast_obj { + struct ecore_raw_obj raw; + + union { + struct { + #define ECORE_MCAST_BINS_NUM 256 + #define ECORE_MCAST_VEC_SZ (ECORE_MCAST_BINS_NUM / 64) + uint64_t vec[ECORE_MCAST_VEC_SZ]; + + /** Number of BINs to clear. Should be updated + * immediately when a command arrives in order to + * properly create DEL commands. + */ + int num_bins_set; + } aprox_match; + + struct { + ecore_list_t macs; + int num_macs_set; + } exact_match; + } registry; + + /* Pending commands */ + ecore_list_t pending_cmds_head; + + /* A state that is set in raw.pstate, when there are pending commands */ + int sched_state; + + /* Maximal number of mcast MACs configured in one command */ + int max_cmd_len; + + /* Total number of currently pending MACs to configure: both + * in the pending commands list and in the current command. + */ + int total_pending_num; + + uint8_t engine_id; + + /** + * @param cmd command to execute (ECORE_MCAST_CMD_X, see above) + */ + int (*config_mcast)(struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd); + + /** + * Fills the ramrod data during the RESTORE flow. + * + * @param sc + * @param o + * @param start_idx Registry index to start from + * @param rdata_idx Index in the ramrod data to start from + * + * @return -1 if we handled the whole registry or index of the last + * handled registry element. + */ + int (*hdl_restore)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o, + int start_bin, int *rdata_idx); + + int (*enqueue_cmd)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd); + + void (*set_one_rule)(struct bnx2x_softc *sc, + struct ecore_mcast_obj *o, int idx, + union ecore_mcast_config_data *cfg_data, + enum ecore_mcast_cmd cmd); + + /** Checks if there are more mcast MACs to be set or a previous + * command is still pending. + */ + int (*check_pending)(struct ecore_mcast_obj *o); + + /** + * Set/Clear/Check SCHEDULED state of the object + */ + void (*set_sched)(struct ecore_mcast_obj *o); + void (*clear_sched)(struct ecore_mcast_obj *o); + int (*check_sched)(struct ecore_mcast_obj *o); + + /* Wait until all pending commands complete */ + int (*wait_comp)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o); + + /** + * Handle the internal object counters needed for proper + * commands handling. Checks that the provided parameters are + * feasible. + */ + int (*validate)(struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd); + + /** + * Restore the values of internal counters in case of a failure. + */ + void (*revert)(struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + int old_num_bins); + + int (*get_registry_size)(struct ecore_mcast_obj *o); + void (*set_registry_size)(struct ecore_mcast_obj *o, int n); +}; + +/*************************** Credit handling **********************************/ +struct ecore_credit_pool_obj { + + /* Current amount of credit in the pool */ + ecore_atomic_t credit; + + /* Maximum allowed credit. put() will check against it. */ + int pool_sz; + + /* Allocate a pool table statically. + * + * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272) + * + * The set bit in the table will mean that the entry is available. + */ +#define ECORE_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) + uint64_t pool_mirror[ECORE_POOL_VEC_SIZE]; + + /* Base pool offset (initialized differently */ + int base_pool_offset; + + /** + * Get the next free pool entry. + * + * @return TRUE if there was a free entry in the pool + */ + int (*get_entry)(struct ecore_credit_pool_obj *o, int *entry); + + /** + * Return the entry back to the pool. + * + * @return TRUE if entry is legal and has been successfully + * returned to the pool. + */ + int (*put_entry)(struct ecore_credit_pool_obj *o, int entry); + + /** + * Get the requested amount of credit from the pool. + * + * @param cnt Amount of requested credit + * @return TRUE if the operation is successful + */ + int (*get)(struct ecore_credit_pool_obj *o, int cnt); + + /** + * Returns the credit to the pool. + * + * @param cnt Amount of credit to return + * @return TRUE if the operation is successful + */ + int (*put)(struct ecore_credit_pool_obj *o, int cnt); + + /** + * Reads the current amount of credit. + */ + int (*check)(struct ecore_credit_pool_obj *o); +}; + +/*************************** RSS configuration ********************************/ +enum { + /* RSS_MODE bits are mutually exclusive */ + ECORE_RSS_MODE_DISABLED, + ECORE_RSS_MODE_REGULAR, + + ECORE_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ + + ECORE_RSS_IPV4, + ECORE_RSS_IPV4_TCP, + ECORE_RSS_IPV4_UDP, + ECORE_RSS_IPV6, + ECORE_RSS_IPV6_TCP, + ECORE_RSS_IPV6_UDP, + + ECORE_RSS_TUNNELING, +}; + +struct ecore_config_rss_params { + struct ecore_rss_config_obj *rss_obj; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* ECORE_RSS_X bits */ + unsigned long rss_flags; + + /* Number hash bits to take into an account */ + uint8_t rss_result_mask; + + /* Indirection table */ + uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + + /* RSS hash values */ + uint32_t rss_key[10]; + + /* valid only iff ECORE_RSS_UPDATE_TOE is set */ + uint16_t toe_rss_bitmap; + + /* valid iff ECORE_RSS_TUNNELING is set */ + uint16_t tunnel_value; + uint16_t tunnel_mask; +}; + +struct ecore_rss_config_obj { + struct ecore_raw_obj raw; + + /* RSS engine to use */ + uint8_t engine_id; + + /* Last configured indirection table */ + uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + + /* flags for enabling 4-tupple hash on UDP */ + uint8_t udp_rss_v4; + uint8_t udp_rss_v6; + + int (*config_rss)(struct bnx2x_softc *sc, + struct ecore_config_rss_params *p); +}; + +/*********************** Queue state update ***********************************/ + +/* UPDATE command options */ +enum { + ECORE_Q_UPDATE_IN_VLAN_REM, + ECORE_Q_UPDATE_IN_VLAN_REM_CHNG, + ECORE_Q_UPDATE_OUT_VLAN_REM, + ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG, + ECORE_Q_UPDATE_ANTI_SPOOF, + ECORE_Q_UPDATE_ANTI_SPOOF_CHNG, + ECORE_Q_UPDATE_ACTIVATE, + ECORE_Q_UPDATE_ACTIVATE_CHNG, + ECORE_Q_UPDATE_DEF_VLAN_EN, + ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG, + ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG, + ECORE_Q_UPDATE_SILENT_VLAN_REM, + ECORE_Q_UPDATE_TX_SWITCHING_CHNG, + ECORE_Q_UPDATE_TX_SWITCHING, +}; + +/* Allowed Queue states */ +enum ecore_q_state { + ECORE_Q_STATE_RESET, + ECORE_Q_STATE_INITIALIZED, + ECORE_Q_STATE_ACTIVE, + ECORE_Q_STATE_MULTI_COS, + ECORE_Q_STATE_MCOS_TERMINATED, + ECORE_Q_STATE_INACTIVE, + ECORE_Q_STATE_STOPPED, + ECORE_Q_STATE_TERMINATED, + ECORE_Q_STATE_FLRED, + ECORE_Q_STATE_MAX, +}; + +/* Allowed Queue states */ +enum ecore_q_logical_state { + ECORE_Q_LOGICAL_STATE_ACTIVE, + ECORE_Q_LOGICAL_STATE_STOPPED, +}; + +/* Allowed commands */ +enum ecore_queue_cmd { + ECORE_Q_CMD_INIT, + ECORE_Q_CMD_SETUP, + ECORE_Q_CMD_SETUP_TX_ONLY, + ECORE_Q_CMD_DEACTIVATE, + ECORE_Q_CMD_ACTIVATE, + ECORE_Q_CMD_UPDATE, + ECORE_Q_CMD_UPDATE_TPA, + ECORE_Q_CMD_HALT, + ECORE_Q_CMD_CFC_DEL, + ECORE_Q_CMD_TERMINATE, + ECORE_Q_CMD_EMPTY, + ECORE_Q_CMD_MAX, +}; + +/* queue SETUP + INIT flags */ +enum { + ECORE_Q_FLG_TPA, + ECORE_Q_FLG_TPA_IPV6, + ECORE_Q_FLG_TPA_GRO, + ECORE_Q_FLG_STATS, + ECORE_Q_FLG_ZERO_STATS, + ECORE_Q_FLG_ACTIVE, + ECORE_Q_FLG_OV, + ECORE_Q_FLG_VLAN, + ECORE_Q_FLG_COS, + ECORE_Q_FLG_HC, + ECORE_Q_FLG_HC_EN, + ECORE_Q_FLG_DHC, + ECORE_Q_FLG_OOO, + ECORE_Q_FLG_FCOE, + ECORE_Q_FLG_LEADING_RSS, + ECORE_Q_FLG_MCAST, + ECORE_Q_FLG_DEF_VLAN, + ECORE_Q_FLG_TX_SWITCH, + ECORE_Q_FLG_TX_SEC, + ECORE_Q_FLG_ANTI_SPOOF, + ECORE_Q_FLG_SILENT_VLAN_REM, + ECORE_Q_FLG_FORCE_DEFAULT_PRI, + ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, + ECORE_Q_FLG_PCSUM_ON_PKT, + ECORE_Q_FLG_TUN_INC_INNER_IP_ID +}; + +/* Queue type options: queue type may be a combination of below. */ +enum ecore_q_type { + ECORE_Q_TYPE_FWD, + ECORE_Q_TYPE_HAS_RX, + ECORE_Q_TYPE_HAS_TX, +}; + +#define ECORE_PRIMARY_CID_INDEX 0 +#define ECORE_MULTI_TX_COS_E1X 3 /* QM only */ +#define ECORE_MULTI_TX_COS_E2_E3A0 2 +#define ECORE_MULTI_TX_COS_E3B0 3 +#define ECORE_MULTI_TX_COS 3 /* Maximum possible */ +#define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN) + +struct ecore_queue_init_params { + struct { + unsigned long flags; + uint16_t hc_rate; + uint8_t fw_sb_id; + uint8_t sb_cq_index; + } tx; + + struct { + unsigned long flags; + uint16_t hc_rate; + uint8_t fw_sb_id; + uint8_t sb_cq_index; + } rx; + + /* CID context in the host memory */ + struct eth_context *cxts[ECORE_MULTI_TX_COS]; + + /* maximum number of cos supported by hardware */ + uint8_t max_cos; +}; + +struct ecore_queue_terminate_params { + /* index within the tx_only cids of this queue object */ + uint8_t cid_index; +}; + +struct ecore_queue_cfc_del_params { + /* index within the tx_only cids of this queue object */ + uint8_t cid_index; +}; + +struct ecore_queue_update_params { + unsigned long update_flags; /* ECORE_Q_UPDATE_XX bits */ + uint16_t def_vlan; + uint16_t silent_removal_value; + uint16_t silent_removal_mask; +/* index within the tx_only cids of this queue object */ + uint8_t cid_index; +}; + +struct rxq_pause_params { + uint16_t bd_th_lo; + uint16_t bd_th_hi; + uint16_t rcq_th_lo; + uint16_t rcq_th_hi; + uint16_t sge_th_lo; /* valid iff ECORE_Q_FLG_TPA */ + uint16_t sge_th_hi; /* valid iff ECORE_Q_FLG_TPA */ + uint16_t pri_map; +}; + +/* general */ +struct ecore_general_setup_params { + /* valid iff ECORE_Q_FLG_STATS */ + uint8_t stat_id; + + uint8_t spcl_id; + uint16_t mtu; + uint8_t cos; +}; + +struct ecore_rxq_setup_params { + /* dma */ + ecore_dma_addr_t dscr_map; + ecore_dma_addr_t rcq_map; + ecore_dma_addr_t rcq_np_map; + + uint16_t drop_flags; + uint16_t buf_sz; + uint8_t fw_sb_id; + uint8_t cl_qzone_id; + + /* valid iff ECORE_Q_FLG_TPA */ + uint16_t tpa_agg_sz; + uint8_t max_tpa_queues; + uint8_t rss_engine_id; + + /* valid iff ECORE_Q_FLG_MCAST */ + uint8_t mcast_engine_id; + + uint8_t cache_line_log; + + uint8_t sb_cq_index; + + /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */ + uint16_t silent_removal_value; + uint16_t silent_removal_mask; +}; + +struct ecore_txq_setup_params { + /* dma */ + ecore_dma_addr_t dscr_map; + + uint8_t fw_sb_id; + uint8_t sb_cq_index; + uint8_t cos; /* valid iff ECORE_Q_FLG_COS */ + uint16_t traffic_type; + /* equals to the leading rss client id, used for TX classification*/ + uint8_t tss_leading_cl_id; + + /* valid iff ECORE_Q_FLG_DEF_VLAN */ + uint16_t default_vlan; +}; + +struct ecore_queue_setup_params { + struct ecore_general_setup_params gen_params; + struct ecore_txq_setup_params txq_params; + struct ecore_rxq_setup_params rxq_params; + struct rxq_pause_params pause_params; + unsigned long flags; +}; + +struct ecore_queue_setup_tx_only_params { + struct ecore_general_setup_params gen_params; + struct ecore_txq_setup_params txq_params; + unsigned long flags; + /* index within the tx_only cids of this queue object */ + uint8_t cid_index; +}; + +struct ecore_queue_state_params { + struct ecore_queue_sp_obj *q_obj; + + /* Current command */ + enum ecore_queue_cmd cmd; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* Params according to the current command */ + union { + struct ecore_queue_update_params update; + struct ecore_queue_setup_params setup; + struct ecore_queue_init_params init; + struct ecore_queue_setup_tx_only_params tx_only; + struct ecore_queue_terminate_params terminate; + struct ecore_queue_cfc_del_params cfc_del; + } params; +}; + +struct ecore_viflist_params { + uint8_t echo_res; + uint8_t func_bit_map_res; +}; + +struct ecore_queue_sp_obj { + uint32_t cids[ECORE_MULTI_TX_COS]; + uint8_t cl_id; + uint8_t func_id; + + /* number of traffic classes supported by queue. + * The primary connection of the queue supports the first traffic + * class. Any further traffic class is supported by a tx-only + * connection. + * + * Therefore max_cos is also a number of valid entries in the cids + * array. + */ + uint8_t max_cos; + uint8_t num_tx_only, next_tx_only; + + enum ecore_q_state state, next_state; + + /* bits from enum ecore_q_type */ + unsigned long type; + + /* ECORE_Q_CMD_XX bits. This object implements "one + * pending" paradigm but for debug and tracing purposes it's + * more convenient to have different bits for different + * commands. + */ + unsigned long pending; + + /* Buffer to use as a ramrod data and its mapping */ + void *rdata; + ecore_dma_addr_t rdata_mapping; + + /** + * Performs one state change according to the given parameters. + * + * @return 0 in case of success and negative value otherwise. + */ + int (*send_cmd)(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params); + + /** + * Sets the pending bit according to the requested transition. + */ + int (*set_pending)(struct ecore_queue_sp_obj *o, + struct ecore_queue_state_params *params); + + /** + * Checks that the requested state transition is legal. + */ + int (*check_transition)(struct bnx2x_softc *sc, + struct ecore_queue_sp_obj *o, + struct ecore_queue_state_params *params); + + /** + * Completes the pending command. + */ + int (*complete_cmd)(struct bnx2x_softc *sc, + struct ecore_queue_sp_obj *o, + enum ecore_queue_cmd); + + int (*wait_comp)(struct bnx2x_softc *sc, + struct ecore_queue_sp_obj *o, + enum ecore_queue_cmd cmd); +}; + +/********************** Function state update *********************************/ +/* Allowed Function states */ +enum ecore_func_state { + ECORE_F_STATE_RESET, + ECORE_F_STATE_INITIALIZED, + ECORE_F_STATE_STARTED, + ECORE_F_STATE_TX_STOPPED, + ECORE_F_STATE_MAX, +}; + +/* Allowed Function commands */ +enum ecore_func_cmd { + ECORE_F_CMD_HW_INIT, + ECORE_F_CMD_START, + ECORE_F_CMD_STOP, + ECORE_F_CMD_HW_RESET, + ECORE_F_CMD_AFEX_UPDATE, + ECORE_F_CMD_AFEX_VIFLISTS, + ECORE_F_CMD_TX_STOP, + ECORE_F_CMD_TX_START, + ECORE_F_CMD_SWITCH_UPDATE, + ECORE_F_CMD_MAX, +}; + +struct ecore_func_hw_init_params { + /* A load phase returned by MCP. + * + * May be: + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP + * FW_MSG_CODE_DRV_LOAD_COMMON + * FW_MSG_CODE_DRV_LOAD_PORT + * FW_MSG_CODE_DRV_LOAD_FUNCTION + */ + uint32_t load_phase; +}; + +struct ecore_func_hw_reset_params { + /* A load phase returned by MCP. + * + * May be: + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP + * FW_MSG_CODE_DRV_LOAD_COMMON + * FW_MSG_CODE_DRV_LOAD_PORT + * FW_MSG_CODE_DRV_LOAD_FUNCTION + */ + uint32_t reset_phase; +}; + +struct ecore_func_start_params { + /* Multi Function mode: + * - Single Function + * - Switch Dependent + * - Switch Independent + */ + uint16_t mf_mode; + + /* Switch Dependent mode outer VLAN tag */ + uint16_t sd_vlan_tag; + + /* Function cos mode */ + uint8_t network_cos_mode; + + /* NVGRE classification enablement */ + uint8_t nvgre_clss_en; + + /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ + uint8_t gre_tunnel_mode; + + /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */ + uint8_t gre_tunnel_rss; + +}; + +struct ecore_func_switch_update_params { + uint8_t suspend; +}; + +struct ecore_func_afex_update_params { + uint16_t vif_id; + uint16_t afex_default_vlan; + uint8_t allowed_priorities; +}; + +struct ecore_func_afex_viflists_params { + uint16_t vif_list_index; + uint8_t func_bit_map; + uint8_t afex_vif_list_command; + uint8_t func_to_clear; +}; +struct ecore_func_tx_start_params { + struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; + uint8_t dcb_enabled; + uint8_t dcb_version; + uint8_t dont_add_pri_0; +}; + +struct ecore_func_state_params { + struct ecore_func_sp_obj *f_obj; + + /* Current command */ + enum ecore_func_cmd cmd; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* Params according to the current command */ + union { + struct ecore_func_hw_init_params hw_init; + struct ecore_func_hw_reset_params hw_reset; + struct ecore_func_start_params start; + struct ecore_func_switch_update_params switch_update; + struct ecore_func_afex_update_params afex_update; + struct ecore_func_afex_viflists_params afex_viflists; + struct ecore_func_tx_start_params tx_start; + } params; +}; + +struct ecore_func_sp_drv_ops { + /* Init tool + runtime initialization: + * - Common Chip + * - Common (per Path) + * - Port + * - Function phases + */ + int (*init_hw_cmn_chip)(struct bnx2x_softc *sc); + int (*init_hw_cmn)(struct bnx2x_softc *sc); + int (*init_hw_port)(struct bnx2x_softc *sc); + int (*init_hw_func)(struct bnx2x_softc *sc); + + /* Reset Function HW: Common, Port, Function phases. */ + void (*reset_hw_cmn)(struct bnx2x_softc *sc); + void (*reset_hw_port)(struct bnx2x_softc *sc); + void (*reset_hw_func)(struct bnx2x_softc *sc); + + /* Prepare/Release FW resources */ + int (*init_fw)(struct bnx2x_softc *sc); + void (*release_fw)(struct bnx2x_softc *sc); +}; + +struct ecore_func_sp_obj { + enum ecore_func_state state, next_state; + + /* ECORE_FUNC_CMD_XX bits. This object implements "one + * pending" paradigm but for debug and tracing purposes it's + * more convenient to have different bits for different + * commands. + */ + unsigned long pending; + + /* Buffer to use as a ramrod data and its mapping */ + void *rdata; + ecore_dma_addr_t rdata_mapping; + + /* Buffer to use as a afex ramrod data and its mapping. + * This can't be same rdata as above because afex ramrod requests + * can arrive to the object in parallel to other ramrod requests. + */ + void *afex_rdata; + ecore_dma_addr_t afex_rdata_mapping; + + /* this mutex validates that when pending flag is taken, the next + * ramrod to be sent will be the one set the pending bit + */ + ECORE_MUTEX one_pending_mutex; + + /* Driver interface */ + struct ecore_func_sp_drv_ops *drv; + + /** + * Performs one state change according to the given parameters. + * + * @return 0 in case of success and negative value otherwise. + */ + int (*send_cmd)(struct bnx2x_softc *sc, + struct ecore_func_state_params *params); + + /** + * Checks that the requested state transition is legal. + */ + int (*check_transition)(struct bnx2x_softc *sc, + struct ecore_func_sp_obj *o, + struct ecore_func_state_params *params); + + /** + * Completes the pending command. + */ + int (*complete_cmd)(struct bnx2x_softc *sc, + struct ecore_func_sp_obj *o, + enum ecore_func_cmd cmd); + + int (*wait_comp)(struct bnx2x_softc *sc, struct ecore_func_sp_obj *o, + enum ecore_func_cmd cmd); +}; + +/********************** Interfaces ********************************************/ +/* Queueable objects set */ +union ecore_qable_obj { + struct ecore_vlan_mac_obj vlan_mac; +}; +/************** Function state update *********/ +void ecore_init_func_obj(struct bnx2x_softc *sc, + struct ecore_func_sp_obj *obj, + void *rdata, ecore_dma_addr_t rdata_mapping, + void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping, + struct ecore_func_sp_drv_ops *drv_iface); + +int ecore_func_state_change(struct bnx2x_softc *sc, + struct ecore_func_state_params *params); + +enum ecore_func_state ecore_func_get_state(struct bnx2x_softc *sc, + struct ecore_func_sp_obj *o); +/******************* Queue State **************/ +void ecore_init_queue_obj(struct bnx2x_softc *sc, + struct ecore_queue_sp_obj *obj, uint8_t cl_id, uint32_t *cids, + uint8_t cid_cnt, uint8_t func_id, void *rdata, + ecore_dma_addr_t rdata_mapping, unsigned long type); + +int ecore_queue_state_change(struct bnx2x_softc *sc, + struct ecore_queue_state_params *params); + +/********************* VLAN-MAC ****************/ +void ecore_init_mac_obj(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *mac_obj, + uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, + ecore_dma_addr_t rdata_mapping, int state, + unsigned long *pstate, ecore_obj_type type, + struct ecore_credit_pool_obj *macs_pool); + +void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o); +int ecore_vlan_mac_h_write_lock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o); +void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc, + struct ecore_vlan_mac_obj *o); +int ecore_config_vlan_mac(struct bnx2x_softc *sc, + struct ecore_vlan_mac_ramrod_params *p); + +int ecore_vlan_mac_move(struct bnx2x_softc *sc, + struct ecore_vlan_mac_ramrod_params *p, + struct ecore_vlan_mac_obj *dest_o); + +/********************* RX MODE ****************/ + +void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, + struct ecore_rx_mode_obj *o); + +/** + * ecore_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters. + * + * @p: Command parameters + * + * Return: 0 - if operation was successful and there is no pending completions, + * positive number - if there are pending completions, + * negative - if there were errors + */ +int ecore_config_rx_mode(struct bnx2x_softc *sc, + struct ecore_rx_mode_ramrod_params *p); + +/****************** MULTICASTS ****************/ + +void ecore_init_mcast_obj(struct bnx2x_softc *sc, + struct ecore_mcast_obj *mcast_obj, + uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id, + uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + ecore_obj_type type); + +/** + * ecore_config_mcast - Configure multicast MACs list. + * + * @cmd: command to execute: BNX2X_MCAST_CMD_X + * + * May configure a new list + * provided in p->mcast_list (ECORE_MCAST_CMD_ADD), clean up + * (ECORE_MCAST_CMD_DEL) or restore (ECORE_MCAST_CMD_RESTORE) a current + * configuration, continue to execute the pending commands + * (ECORE_MCAST_CMD_CONT). + * + * If previous command is still pending or if number of MACs to + * configure is more that maximum number of MACs in one command, + * the current command will be enqueued to the tail of the + * pending commands list. + * + * Return: 0 is operation was successfull and there are no pending completions, + * negative if there were errors, positive if there are pending + * completions. + */ +int ecore_config_mcast(struct bnx2x_softc *sc, + struct ecore_mcast_ramrod_params *p, + enum ecore_mcast_cmd cmd); + +/****************** CREDIT POOL ****************/ +void ecore_init_mac_credit_pool(struct bnx2x_softc *sc, + struct ecore_credit_pool_obj *p, uint8_t func_id, + uint8_t func_num); +void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc, + struct ecore_credit_pool_obj *p, uint8_t func_id, + uint8_t func_num); + +/****************** RSS CONFIGURATION ****************/ +void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj, + uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id, + void *rdata, ecore_dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + ecore_obj_type type); + +/** + * ecore_config_rss - Updates RSS configuration according to provided parameters + * + * Return: 0 in case of success + */ +int ecore_config_rss(struct bnx2x_softc *sc, + struct ecore_config_rss_params *p); + + +#endif /* ECORE_SP_H */ diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c new file mode 100644 index 00000000..b9149b89 --- /dev/null +++ b/drivers/net/bnx2x/elink.c @@ -0,0 +1,13357 @@ +/* + * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#include "bnx2x.h" +#include "elink.h" +#include "ecore_mfw_req.h" +#include "ecore_fw_defs.h" +#include "ecore_hsi.h" +#include "ecore_reg.h" + +static elink_status_t elink_link_reset(struct elink_params *params, + struct elink_vars *vars, + uint8_t reset_ext_phy); +static elink_status_t elink_check_half_open_conn(struct elink_params *params, + struct elink_vars *vars, + uint8_t notify); +static elink_status_t elink_sfp_module_detection(struct elink_phy *phy, + struct elink_params *params); + +#define MDIO_REG_BANK_CL73_IEEEB0 0x0 +#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 +#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200 +#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000 +#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000 + +#define MDIO_REG_BANK_CL73_IEEEB1 0x10 +#define MDIO_CL73_IEEEB1_AN_ADV1 0x00 +#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400 +#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800 +#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00 +#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00 +#define MDIO_CL73_IEEEB1_AN_ADV2 0x01 +#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000 +#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020 +#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040 +#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080 +#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03 +#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400 +#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800 +#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00 +#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00 +#define MDIO_CL73_IEEEB1_AN_LP_ADV2 0x04 + +#define MDIO_REG_BANK_RX0 0x80b0 +#define MDIO_RX0_RX_STATUS 0x10 +#define MDIO_RX0_RX_STATUS_SIGDET 0x8000 +#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000 +#define MDIO_RX0_RX_EQ_BOOST 0x1c +#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 +#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_RX1 0x80c0 +#define MDIO_RX1_RX_EQ_BOOST 0x1c +#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 +#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_RX2 0x80d0 +#define MDIO_RX2_RX_EQ_BOOST 0x1c +#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 +#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_RX3 0x80e0 +#define MDIO_RX3_RX_EQ_BOOST 0x1c +#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 +#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_RX_ALL 0x80f0 +#define MDIO_RX_ALL_RX_EQ_BOOST 0x1c +#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7 +#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10 + +#define MDIO_REG_BANK_TX0 0x8060 +#define MDIO_TX0_TX_DRIVER 0x17 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12 +#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 +#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4 +#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e +#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1 +#define MDIO_TX0_TX_DRIVER_ICBUF1T 1 + +#define MDIO_REG_BANK_TX1 0x8070 +#define MDIO_TX1_TX_DRIVER 0x17 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12 +#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 +#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4 +#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e +#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1 +#define MDIO_TX0_TX_DRIVER_ICBUF1T 1 + +#define MDIO_REG_BANK_TX2 0x8080 +#define MDIO_TX2_TX_DRIVER 0x17 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12 +#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 +#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4 +#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e +#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1 +#define MDIO_TX0_TX_DRIVER_ICBUF1T 1 + +#define MDIO_REG_BANK_TX3 0x8090 +#define MDIO_TX3_TX_DRIVER 0x17 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000 +#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12 +#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 +#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0 +#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4 +#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e +#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1 +#define MDIO_TX0_TX_DRIVER_ICBUF1T 1 + +#define MDIO_REG_BANK_XGXS_BLOCK0 0x8000 +#define MDIO_BLOCK0_XGXS_CONTROL 0x10 + +#define MDIO_REG_BANK_XGXS_BLOCK1 0x8010 +#define MDIO_BLOCK1_LANE_CTRL0 0x15 +#define MDIO_BLOCK1_LANE_CTRL1 0x16 +#define MDIO_BLOCK1_LANE_CTRL2 0x17 +#define MDIO_BLOCK1_LANE_PRBS 0x19 + +#define MDIO_REG_BANK_XGXS_BLOCK2 0x8100 +#define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10 +#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000 +#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000 +#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11 +#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000 +#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14 +#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001 +#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010 +#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15 + +#define MDIO_REG_BANK_GP_STATUS 0x8120 +#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B +#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 0x3900 + +#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13 +#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1) + +#define MDIO_REG_BANK_SERDES_DIGITAL 0x8300 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001 +#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15 +#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002 +#define MDIO_SERDES_DIGITAL_MISC1 0x18 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000 +#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008 +#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009 + +#define MDIO_REG_BANK_OVER_1G 0x8320 +#define MDIO_OVER_1G_DIGCTL_3_4 0x14 +#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0 +#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5 +#define MDIO_OVER_1G_UP1 0x19 +#define MDIO_OVER_1G_UP1_2_5G 0x0001 +#define MDIO_OVER_1G_UP1_5G 0x0002 +#define MDIO_OVER_1G_UP1_6G 0x0004 +#define MDIO_OVER_1G_UP1_10G 0x0010 +#define MDIO_OVER_1G_UP1_10GH 0x0008 +#define MDIO_OVER_1G_UP1_12G 0x0020 +#define MDIO_OVER_1G_UP1_12_5G 0x0040 +#define MDIO_OVER_1G_UP1_13G 0x0080 +#define MDIO_OVER_1G_UP1_15G 0x0100 +#define MDIO_OVER_1G_UP1_16G 0x0200 +#define MDIO_OVER_1G_UP2 0x1A +#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007 +#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038 +#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0 +#define MDIO_OVER_1G_UP3 0x1B +#define MDIO_OVER_1G_UP3_HIGIG2 0x0001 +#define MDIO_OVER_1G_LP_UP1 0x1C +#define MDIO_OVER_1G_LP_UP2 0x1D +#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff +#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780 +#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7 +#define MDIO_OVER_1G_LP_UP3 0x1E + +#define MDIO_REG_BANK_REMOTE_PHY 0x8330 +#define MDIO_REMOTE_PHY_MISC_RX_STATUS 0x10 +#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG 0x0010 +#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG 0x0600 + +#define MDIO_REG_BANK_BAM_NEXT_PAGE 0x8350 +#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10 +#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001 +#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002 + +#define MDIO_REG_BANK_CL73_USERB0 0x8370 +#define MDIO_CL73_USERB0_CL73_UCTRL 0x10 +#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL 0x0002 +#define MDIO_CL73_USERB0_CL73_USTAT1 0x11 +#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK 0x0100 +#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37 0x0400 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14 +#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001 + +#define MDIO_REG_BANK_AER_BLOCK 0xFFD0 +#define MDIO_AER_BLOCK_AER_REG 0x1E + +#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0 +#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10 +#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040 +#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000 +#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000 +#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040 +#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100 +#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200 +#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000 +#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000 +#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000 +#define MDIO_COMBO_IEEE0_MII_STATUS 0x11 +#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004 +#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180 +#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE 0x0000 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH 0x0180 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040 +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020 +/*WhenthelinkpartnerisinSGMIImode(bit0=1),then +bit15=link,bit12=duplex,bits11:10=speed,bit14=acknowledge. +Theotherbitsarereservedandshouldbezero*/ +#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001 + +#define MDIO_PMA_DEVAD 0x1 +/*ieee*/ +#define MDIO_PMA_REG_CTRL 0x0 +#define MDIO_PMA_REG_STATUS 0x1 +#define MDIO_PMA_REG_10G_CTRL2 0x7 +#define MDIO_PMA_REG_TX_DISABLE 0x0009 +#define MDIO_PMA_REG_RX_SD 0xa +/*bnx2x*/ +#define MDIO_PMA_REG_BNX2X_CTRL 0x0096 +#define MDIO_PMA_REG_FEC_CTRL 0x00ab +#define MDIO_PMA_LASI_RXCTRL 0x9000 +#define MDIO_PMA_LASI_TXCTRL 0x9001 +#define MDIO_PMA_LASI_CTRL 0x9002 +#define MDIO_PMA_LASI_RXSTAT 0x9003 +#define MDIO_PMA_LASI_TXSTAT 0x9004 +#define MDIO_PMA_LASI_STAT 0x9005 +#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800 +#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808 +#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809 +#define MDIO_PMA_REG_TX_POWER_DOWN 0xca02 +#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09 +#define MDIO_PMA_REG_MISC_CTRL 0xca0a +#define MDIO_PMA_REG_GEN_CTRL 0xca10 +#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188 +#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a +#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12 +#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13 +#define MDIO_PMA_REG_ROM_VER1 0xca19 +#define MDIO_PMA_REG_ROM_VER2 0xca1a +#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b +#define MDIO_PMA_REG_PLL_BANDWIDTH 0xca1d +#define MDIO_PMA_REG_PLL_CTRL 0xca1e +#define MDIO_PMA_REG_MISC_CTRL0 0xca23 +#define MDIO_PMA_REG_LRM_MODE 0xca3f +#define MDIO_PMA_REG_CDR_BANDWIDTH 0xca46 +#define MDIO_PMA_REG_MISC_CTRL1 0xca85 + +#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL 0x8000 +#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c +#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000 +#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004 +#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008 +#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c +#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT 0x8002 +#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR 0x8003 +#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF 0xc820 +#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff +#define MDIO_PMA_REG_8726_TX_CTRL1 0xca01 +#define MDIO_PMA_REG_8726_TX_CTRL2 0xca05 + +#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005 +#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007 +#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff +#define MDIO_PMA_REG_8727_MISC_CTRL 0x8309 +#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02 +#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05 +#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808 +#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e +#define MDIO_PMA_REG_8727_PCS_GP 0xc842 +#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4 + +#define MDIO_AN_REG_8727_MISC_CTRL 0x8309 +#define MDIO_PMA_REG_8073_CHIP_REV 0xc801 +#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820 +#define MDIO_PMA_REG_8073_XAUI_WA 0xc841 +#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08 + +#define MDIO_PMA_REG_7101_RESET 0xc000 +#define MDIO_PMA_REG_7107_LED_CNTL 0xc007 +#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009 +#define MDIO_PMA_REG_7101_VER1 0xc026 +#define MDIO_PMA_REG_7101_VER2 0xc027 + +#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811 +#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c +#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f +#define MDIO_PMA_REG_8481_LED3_MASK 0xa832 +#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834 +#define MDIO_PMA_REG_8481_LED5_MASK 0xa838 +#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835 +#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b +#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800 +#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11 + +#define MDIO_WIS_DEVAD 0x2 +/*bnx2x*/ +#define MDIO_WIS_REG_LASI_CNTL 0x9002 +#define MDIO_WIS_REG_LASI_STATUS 0x9005 + +#define MDIO_PCS_DEVAD 0x3 +#define MDIO_PCS_REG_STATUS 0x0020 +#define MDIO_PCS_REG_LASI_STATUS 0x9005 +#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000 +#define MDIO_PCS_REG_7101_SPI_MUX 0xD008 +#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A +#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5) +#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A +#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6) +#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7) +#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2) +#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028 + +#define MDIO_XS_DEVAD 0x4 +#define MDIO_XS_REG_STATUS 0x0001 +#define MDIO_XS_PLL_SEQUENCER 0x8000 +#define MDIO_XS_SFX7101_XGXS_TEST1 0xc00a + +#define MDIO_XS_8706_REG_BANK_RX0 0x80bc +#define MDIO_XS_8706_REG_BANK_RX1 0x80cc +#define MDIO_XS_8706_REG_BANK_RX2 0x80dc +#define MDIO_XS_8706_REG_BANK_RX3 0x80ec +#define MDIO_XS_8706_REG_BANK_RXA 0x80fc + +#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA + +#define MDIO_AN_DEVAD 0x7 +/*ieee*/ +#define MDIO_AN_REG_CTRL 0x0000 +#define MDIO_AN_REG_STATUS 0x0001 +#define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020 +#define MDIO_AN_REG_ADV_PAUSE 0x0010 +#define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400 +#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800 +#define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00 +#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00 +#define MDIO_AN_REG_ADV 0x0011 +#define MDIO_AN_REG_ADV2 0x0012 +#define MDIO_AN_REG_LP_AUTO_NEG 0x0013 +#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014 +#define MDIO_AN_REG_MASTER_STATUS 0x0021 +#define MDIO_AN_REG_EEE_ADV 0x003c +#define MDIO_AN_REG_LP_EEE_ADV 0x003d +/*bnx2x*/ +#define MDIO_AN_REG_LINK_STATUS 0x8304 +#define MDIO_AN_REG_CL37_CL73 0x8370 +#define MDIO_AN_REG_CL37_AN 0xffe0 +#define MDIO_AN_REG_CL37_FC_LD 0xffe4 +#define MDIO_AN_REG_CL37_FC_LP 0xffe5 +#define MDIO_AN_REG_1000T_STATUS 0xffea + +#define MDIO_AN_REG_8073_2_5G 0x8329 +#define MDIO_AN_REG_8073_BAM 0x8350 + +#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020 +#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0 +#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40 +#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1 +#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4 +#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6 +#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9 +#define MDIO_AN_REG_8481_1G_100T_EXT_CTRL 0xfff0 +#define MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF 0x0008 +#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5 +#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7 +#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8 +#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc + +/* BNX2X84823 only */ +#define MDIO_CTL_DEVAD 0x1e +#define MDIO_CTL_REG_84823_MEDIA 0x401a +#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018 + /* These pins configure the BNX2X84823 interface to MAC after reset. */ +#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008 +#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010 + /* These pins configure the BNX2X84823 interface to Line after reset. */ +#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060 +#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020 +#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040 + /* When this pin is active high during reset, 10GBASE-T core is power + * down, When it is active low the 10GBASE-T is power up + */ +#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080 +#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100 +#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000 +#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100 +#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000 +#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005 +#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080 +#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH 0xa82b +#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f +#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 +#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec +#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 + +/* BNX2X84833 only */ +#define MDIO_84833_TOP_CFG_FW_REV 0x400f +#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1 +#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81 +#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a +#define MDIO_84833_SUPER_ISOLATE 0x8000 +/* These are mailbox register set used by 84833. */ +#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005 +#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006 +#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007 +#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008 +#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009 +#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037 +#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038 +#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039 +#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a +#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b +#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c +#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0 +#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26 +#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27 +#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28 +#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29 +#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30 +#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31 + +/* Mailbox command set used by 84833. */ +#define PHY84833_CMD_SET_PAIR_SWAP 0x8001 +#define PHY84833_CMD_GET_EEE_MODE 0x8008 +#define PHY84833_CMD_SET_EEE_MODE 0x8009 +#define PHY84833_CMD_GET_CURRENT_TEMP 0x8031 +/* Mailbox status set used by 84833. */ +#define PHY84833_STATUS_CMD_RECEIVED 0x0001 +#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002 +#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004 +#define PHY84833_STATUS_CMD_COMPLETE_ERROR 0x0008 +#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS 0x0010 +#define PHY84833_STATUS_CMD_SYSTEM_BOOT 0x0020 +#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040 +#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 +#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 + +/* Warpcore clause 45 addressing */ +#define MDIO_WC_DEVAD 0x3 +#define MDIO_WC_REG_IEEE0BLK_MIICNTL 0x0 +#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000 +#define MDIO_WC_REG_PCS_STATUS2 0x0021 +#define MDIO_WC_REG_PMD_KR_CONTROL 0x0096 +#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 +#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e +#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL0 0x8015 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL1 0x8016 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL2 0x8017 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL3 0x8018 +#define MDIO_WC_REG_XGXSBLK1_LANETEST0 0x801a +#define MDIO_WC_REG_TX0_ANA_CTRL0 0x8061 +#define MDIO_WC_REG_TX1_ANA_CTRL0 0x8071 +#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081 +#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091 +#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067 +#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04 +#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0 +#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08 +#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 +#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET 0x0c +#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK 0x7000 +#define MDIO_WC_REG_TX1_TX_DRIVER 0x8077 +#define MDIO_WC_REG_TX2_TX_DRIVER 0x8087 +#define MDIO_WC_REG_TX3_TX_DRIVER 0x8097 +#define MDIO_WC_REG_RX0_ANARXCONTROL1G 0x80b9 +#define MDIO_WC_REG_RX2_ANARXCONTROL1G 0x80d9 +#define MDIO_WC_REG_RX0_PCI_CTRL 0x80ba +#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca +#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da +#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea +#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 +#define MDIO_WC_REG_XGXS_STATUS3 0x8129 +#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 +#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131 +#define MDIO_WC_REG_XGXS_STATUS4 0x813c +#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141 +#define MDIO_WC_REG_XGXS_X2_CONTROL3 0x8142 +#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B +#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169 +#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0 +#define MDIO_WC_REG_GP2_STATUS_GP_2_1 0x81d1 +#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2 +#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL 0x1000 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CMPL 0x0100 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP 0x0010 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_AN_CAP 0x1 +#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE +#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc +#define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE +#define MDIO_WC_REG_DSC1B0_UC_CTRL 0x820e +#define MDIO_WC_REG_DSC1B0_UC_CTRL_RDY4CMD (1<<7) +#define MDIO_WC_REG_DSC_SMC 0x8213 +#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e +#define MDIO_WC_REG_TX_FIR_TAP 0x82e2 +#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00 +#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f +#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04 +#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0 +#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a +#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00 +#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000 +#define MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP 0x82e2 +#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3 +#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6 +#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7 +#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL 0x82e8 +#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL 0x82ec +#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1 0x8300 +#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2 0x8301 +#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3 0x8302 +#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1 0x8304 +#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308 +#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309 +#define MDIO_WC_REG_DIGITAL3_UP1 0x8329 +#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c +#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c +#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e +#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345 +#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349 +#define MDIO_WC_REG_DIGITAL5_LINK_STATUS 0x834d +#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e +#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350 +#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368 +#define MDIO_WC_REG_CL73_USERB0_CTRL 0x8370 +#define MDIO_WC_REG_CL73_USERB0_USTAT 0x8371 +#define MDIO_WC_REG_CL73_BAM_CTRL1 0x8372 +#define MDIO_WC_REG_CL73_BAM_CTRL2 0x8373 +#define MDIO_WC_REG_CL73_BAM_CTRL3 0x8374 +#define MDIO_WC_REG_CL73_BAM_CODE_FIELD 0x837b +#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390 +#define MDIO_WC_REG_TX66_CONTROL 0x83b0 +#define MDIO_WC_REG_RX66_CONTROL 0x83c0 +#define MDIO_WC_REG_RX66_SCW0 0x83c2 +#define MDIO_WC_REG_RX66_SCW1 0x83c3 +#define MDIO_WC_REG_RX66_SCW2 0x83c4 +#define MDIO_WC_REG_RX66_SCW3 0x83c5 +#define MDIO_WC_REG_RX66_SCW0_MASK 0x83c6 +#define MDIO_WC_REG_RX66_SCW1_MASK 0x83c7 +#define MDIO_WC_REG_RX66_SCW2_MASK 0x83c8 +#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9 +#define MDIO_WC_REG_FX100_CTRL1 0x8400 +#define MDIO_WC_REG_FX100_CTRL3 0x8402 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL5 0x8436 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL6 0x8437 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL7 0x8438 +#define MDIO_WC_REG_CL82_USERB1_TX_CTRL9 0x8439 +#define MDIO_WC_REG_CL82_USERB1_RX_CTRL10 0x843a +#define MDIO_WC_REG_CL82_USERB1_RX_CTRL11 0x843b +#define MDIO_WC_REG_ETA_CL73_OUI1 0x8453 +#define MDIO_WC_REG_ETA_CL73_OUI2 0x8454 +#define MDIO_WC_REG_ETA_CL73_OUI3 0x8455 +#define MDIO_WC_REG_ETA_CL73_LD_BAM_CODE 0x8456 +#define MDIO_WC_REG_ETA_CL73_LD_UD_CODE 0x8457 +#define MDIO_WC_REG_MICROBLK_CMD 0xffc2 +#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5 +#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc + +#define MDIO_WC_REG_AERBLK_AER 0xffde +#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL 0xffe0 +#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1 + +#define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A +#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0 +#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4 + +#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141 + +#define DIGITAL5_ACTUAL_SPEED_TX_MASK 0x003f + +/* 54618se */ +#define MDIO_REG_GPHY_MII_STATUS 0x1 +#define MDIO_REG_GPHY_PHYID_LSB 0x3 +#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd +#define MDIO_REG_GPHY_CL45_REG_WRITE 0x4000 +#define MDIO_REG_GPHY_CL45_REG_READ 0xc000 +#define MDIO_REG_GPHY_CL45_DATA_REG 0xe +#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e +#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15 +#define MDIO_REG_GPHY_EXP_ACCESS 0x17 +#define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00 +#define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40 +#define MDIO_REG_GPHY_AUX_STATUS 0x19 +#define MDIO_REG_INTR_STATUS 0x1a +#define MDIO_REG_INTR_MASK 0x1b +#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1) +#define MDIO_REG_GPHY_SHADOW 0x1c +#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10) +#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10) +#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15) +#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10) +#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8) + +typedef elink_status_t(*read_sfp_module_eeprom_func_p) (struct elink_phy * phy, + struct elink_params * + params, + uint8_t dev_addr, + uint16_t addr, + uint8_t byte_cnt, + uint8_t * o_buf, + uint8_t); +/********************************************************/ +#define ELINK_ETH_HLEN 14 +/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ +#define ELINK_ETH_OVREHEAD (ELINK_ETH_HLEN + 8 + 8) +#define ELINK_ETH_MIN_PACKET_SIZE 60 +#define ELINK_ETH_MAX_PACKET_SIZE 1500 +#define ELINK_ETH_MAX_JUMBO_PACKET_SIZE 9600 +#define ELINK_MDIO_ACCESS_TIMEOUT 1000 +#define WC_LANE_MAX 4 +#define I2C_SWITCH_WIDTH 2 +#define I2C_BSC0 0 +#define I2C_BSC1 1 +#define I2C_WA_RETRY_CNT 3 +#define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1) +#define MCPR_IMC_COMMAND_READ_OP 1 +#define MCPR_IMC_COMMAND_WRITE_OP 2 + +/* LED Blink rate that will achieve ~15.9Hz */ +#define LED_BLINK_RATE_VAL_E3 354 +#define LED_BLINK_RATE_VAL_E1X_E2 480 +/***********************************************************/ +/* Shortcut definitions */ +/***********************************************************/ + +#define ELINK_NIG_LATCH_BC_ENABLE_MI_INT 0 + +#define ELINK_NIG_STATUS_EMAC0_MI_INT \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT +#define ELINK_NIG_STATUS_XGXS0_LINK10G \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G +#define ELINK_NIG_STATUS_XGXS0_LINK_STATUS \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS +#define ELINK_NIG_STATUS_XGXS0_LINK_STATUS_SIZE \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE +#define ELINK_NIG_STATUS_SERDES0_LINK_STATUS \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS +#define ELINK_NIG_MASK_MI_INT \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT +#define ELINK_NIG_MASK_XGXS0_LINK10G \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G +#define ELINK_NIG_MASK_XGXS0_LINK_STATUS \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS +#define ELINK_NIG_MASK_SERDES0_LINK_STATUS \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS + +#define ELINK_MDIO_AN_CL73_OR_37_COMPLETE \ + (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \ + MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE) + +#define ELINK_XGXS_RESET_BITS \ + (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB) + +#define ELINK_SERDES_RESET_BITS \ + (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD) + +#define ELINK_AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 +#define ELINK_AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 +#define ELINK_AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM +#define ELINK_AUTONEG_PARALLEL \ + SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION +#define ELINK_AUTONEG_SGMII_FIBER_AUTODET \ + SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT +#define ELINK_AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY + +#define ELINK_GP_STATUS_PAUSE_RSOLUTION_TXSIDE \ + MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE +#define ELINK_GP_STATUS_PAUSE_RSOLUTION_RXSIDE \ + MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE +#define ELINK_GP_STATUS_SPEED_MASK \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK +#define ELINK_GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M +#define ELINK_GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M +#define ELINK_GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G +#define ELINK_GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G +#define ELINK_GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G +#define ELINK_GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G +#define ELINK_GP_STATUS_10G_HIG \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG +#define ELINK_GP_STATUS_10G_CX4 \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 +#define ELINK_GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX +#define ELINK_GP_STATUS_10G_KX4 \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 +#define ELINK_GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR +#define ELINK_GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI +#define ELINK_GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS +#define ELINK_GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI +#define ELINK_GP_STATUS_20G_KR2 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 +#define ELINK_LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD +#define ELINK_LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD +#define ELINK_LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD +#define ELINK_LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 +#define ELINK_LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD +#define ELINK_LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD +#define ELINK_LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD +#define ELINK_LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD +#define ELINK_LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD +#define ELINK_LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD +#define ELINK_LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD +#define ELINK_LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD +#define ELINK_LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD +#define ELINK_LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD +#define ELINK_LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD + +#define ELINK_LINK_UPDATE_MASK \ + (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \ + LINK_STATUS_LINK_UP | \ + LINK_STATUS_PHYSICAL_LINK_FLAG | \ + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \ + LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \ + LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \ + LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \ + LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \ + LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) + +#define ELINK_SFP_EEPROM_CON_TYPE_ADDR 0x2 +#define ELINK_SFP_EEPROM_CON_TYPE_VAL_LC 0x7 +#define ELINK_SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 +#define ELINK_SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22 + +#define ELINK_SFP_EEPROM_COMP_CODE_ADDR 0x3 +#define ELINK_SFP_EEPROM_COMP_CODE_SR_MASK (1<<4) +#define ELINK_SFP_EEPROM_COMP_CODE_LR_MASK (1<<5) +#define ELINK_SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6) + +#define ELINK_SFP_EEPROM_FC_TX_TECH_ADDR 0x8 +#define ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 +#define ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 + +#define ELINK_SFP_EEPROM_OPTIONS_ADDR 0x40 +#define ELINK_SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1 +#define ELINK_SFP_EEPROM_OPTIONS_SIZE 2 + +#define ELINK_EDC_MODE_LINEAR 0x0022 +#define ELINK_EDC_MODE_LIMITING 0x0044 +#define ELINK_EDC_MODE_PASSIVE_DAC 0x0055 +#define ELINK_EDC_MODE_ACTIVE_DAC 0x0066 + +/* ETS defines*/ +#define DCBX_INVALID_COS (0xFF) + +#define ELINK_ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) +#define ELINK_ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000) +#define ELINK_ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360) +#define ELINK_ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720) +#define ELINK_ETS_E3B0_PBF_MIN_W_VAL (10000) + +#define ELINK_MAX_PACKET_SIZE (9700) +#define MAX_KR_LINK_RETRY 4 + +/**********************************************************/ +/* INTERFACE */ +/**********************************************************/ + +#define CL22_WR_OVER_CL45(_sc, _phy, _bank, _addr, _val) \ + elink_cl45_write(_sc, _phy, \ + (_phy)->def_md_devad, \ + (_bank + (_addr & 0xf)), \ + _val) + +#define CL22_RD_OVER_CL45(_sc, _phy, _bank, _addr, _val) \ + elink_cl45_read(_sc, _phy, \ + (_phy)->def_md_devad, \ + (_bank + (_addr & 0xf)), \ + _val) + +static uint32_t elink_bits_en(struct bnx2x_softc *sc, uint32_t reg, uint32_t bits) +{ + uint32_t val = REG_RD(sc, reg); + + val |= bits; + REG_WR(sc, reg, val); + return val; +} + +static uint32_t elink_bits_dis(struct bnx2x_softc *sc, uint32_t reg, + uint32_t bits) +{ + uint32_t val = REG_RD(sc, reg); + + val &= ~bits; + REG_WR(sc, reg, val); + return val; +} + +/* + * elink_check_lfa - This function checks if link reinitialization is required, + * or link flap can be avoided. + * + * @params: link parameters + * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed + * condition code. + */ +static int elink_check_lfa(struct elink_params *params) +{ + uint32_t link_status, cfg_idx, lfa_mask, cfg_size; + uint32_t cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config; + uint32_t saved_val, req_val, eee_status; + struct bnx2x_softc *sc = params->sc; + + additional_config = + REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)); + + /* NOTE: must be first condition checked - + * to verify DCC bit is cleared in any case! + */ + if (additional_config & NO_LFA_DUE_TO_DCC_MASK) { + PMD_DRV_LOG(DEBUG, "No LFA due to DCC flap after clp exit"); + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, additional_config), + additional_config & ~NO_LFA_DUE_TO_DCC_MASK); + return LFA_DCC_LFA_DISABLED; + } + + /* Verify that link is up */ + link_status = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + port_mb[params->port].link_status)); + if (!(link_status & LINK_STATUS_LINK_UP)) + return LFA_LINK_DOWN; + + /* if loaded after BOOT from SAN, don't flap the link in any case and + * rely on link set by preboot driver + */ + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_BOOT_FROM_SAN) + return 0; + + /* Verify that loopback mode is not set */ + if (params->loopback_mode) + return LFA_LOOPBACK_ENABLED; + + /* Verify that MFW supports LFA */ + if (!params->lfa_base) + return LFA_MFW_IS_TOO_OLD; + + if (params->num_phys == 3) { + cfg_size = 2; + lfa_mask = 0xffffffff; + } else { + cfg_size = 1; + lfa_mask = 0xffff; + } + + /* Compare Duplex */ + saved_val = REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, req_duplex)); + req_val = params->req_duplex[0] | (params->req_duplex[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + PMD_DRV_LOG(INFO, "Duplex mismatch %x vs. %x", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_DUPLEX_MISMATCH; + } + /* Compare Flow Control */ + saved_val = REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, req_flow_ctrl)); + req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + PMD_DRV_LOG(DEBUG, "Flow control mismatch %x vs. %x", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_FLOW_CTRL_MISMATCH; + } + /* Compare Link Speed */ + saved_val = REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, req_line_speed)); + req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + PMD_DRV_LOG(DEBUG, "Link speed mismatch %x vs. %x", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_LINK_SPEED_MISMATCH; + } + + for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) { + cur_speed_cap_mask = REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, + speed_cap_mask[cfg_idx])); + + if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) { + PMD_DRV_LOG(DEBUG, "Speed Cap mismatch %x vs. %x", + cur_speed_cap_mask, + params->speed_cap_mask[cfg_idx]); + return LFA_SPEED_CAP_MISMATCH; + } + } + + cur_req_fc_auto_adv = + REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)) & + REQ_FC_AUTO_ADV_MASK; + + if ((uint16_t) cur_req_fc_auto_adv != params->req_fc_auto_adv) { + PMD_DRV_LOG(DEBUG, "Flow Ctrl AN mismatch %x vs. %x", + cur_req_fc_auto_adv, params->req_fc_auto_adv); + return LFA_FLOW_CTRL_MISMATCH; + } + + eee_status = REG_RD(sc, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port])); + + if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^ + (params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI)) || + ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^ + (params->eee_mode & ELINK_EEE_MODE_ADV_LPI))) { + PMD_DRV_LOG(DEBUG, "EEE mismatch %x vs. %x", params->eee_mode, + eee_status); + return LFA_EEE_MISMATCH; + } + + /* LFA conditions are met */ + return 0; +} + +/******************************************************************/ +/* EPIO/GPIO section */ +/******************************************************************/ +static void elink_get_epio(struct bnx2x_softc *sc, uint32_t epio_pin, + uint32_t * en) +{ + uint32_t epio_mask, gp_oenable; + *en = 0; + /* Sanity check */ + if (epio_pin > 31) { + PMD_DRV_LOG(DEBUG, "Invalid EPIO pin %d to get", epio_pin); + return; + } + + epio_mask = 1 << epio_pin; + /* Set this EPIO to output */ + gp_oenable = REG_RD(sc, MCP_REG_MCPR_GP_OENABLE); + REG_WR(sc, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask); + + *en = (REG_RD(sc, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin; +} + +static void elink_set_epio(struct bnx2x_softc *sc, uint32_t epio_pin, uint32_t en) +{ + uint32_t epio_mask, gp_output, gp_oenable; + + /* Sanity check */ + if (epio_pin > 31) { + PMD_DRV_LOG(DEBUG, "Invalid EPIO pin %d to set", epio_pin); + return; + } + PMD_DRV_LOG(DEBUG, "Setting EPIO pin %d to %d", epio_pin, en); + epio_mask = 1 << epio_pin; + /* Set this EPIO to output */ + gp_output = REG_RD(sc, MCP_REG_MCPR_GP_OUTPUTS); + if (en) + gp_output |= epio_mask; + else + gp_output &= ~epio_mask; + + REG_WR(sc, MCP_REG_MCPR_GP_OUTPUTS, gp_output); + + /* Set the value for this EPIO */ + gp_oenable = REG_RD(sc, MCP_REG_MCPR_GP_OENABLE); + REG_WR(sc, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask); +} + +static void elink_set_cfg_pin(struct bnx2x_softc *sc, uint32_t pin_cfg, + uint32_t val) +{ + if (pin_cfg == PIN_CFG_NA) + return; + if (pin_cfg >= PIN_CFG_EPIO0) { + elink_set_epio(sc, pin_cfg - PIN_CFG_EPIO0, val); + } else { + uint8_t gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; + uint8_t gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; + elink_cb_gpio_write(sc, gpio_num, (uint8_t) val, gpio_port); + } +} + +static uint32_t elink_get_cfg_pin(struct bnx2x_softc *sc, uint32_t pin_cfg, + uint32_t * val) +{ + if (pin_cfg == PIN_CFG_NA) + return ELINK_STATUS_ERROR; + if (pin_cfg >= PIN_CFG_EPIO0) { + elink_get_epio(sc, pin_cfg - PIN_CFG_EPIO0, val); + } else { + uint8_t gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; + uint8_t gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; + *val = elink_cb_gpio_read(sc, gpio_num, gpio_port); + } + return ELINK_STATUS_OK; + +} + +/******************************************************************/ +/* PFC section */ +/******************************************************************/ +static void elink_update_pfc_xmac(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t xmac_base; + uint32_t pause_val, pfc0_val, pfc1_val; + + /* XMAC base adrr */ + xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + /* Initialize pause and pfc registers */ + pause_val = 0x18000; + pfc0_val = 0xFFFF8000; + pfc1_val = 0x2; + + /* No PFC support */ + if (!(params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)) { + + /* RX flow control - Process pause frame in receive direction + */ + if (vars->flow_ctrl & ELINK_FLOW_CTRL_RX) + pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN; + + /* TX flow control - Send pause packet when buffer is full */ + if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX) + pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN; + } else { /* PFC support */ + pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN | + XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN | + XMAC_PFC_CTRL_HI_REG_RX_PFC_EN | + XMAC_PFC_CTRL_HI_REG_TX_PFC_EN | + XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON; + /* Write pause and PFC registers */ + REG_WR(sc, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); + REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); + REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); + pfc1_val &= ~XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON; + + } + + /* Write pause and PFC registers */ + REG_WR(sc, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); + REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); + REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); + + /* Set MAC address for source TX Pause/PFC frames */ + REG_WR(sc, xmac_base + XMAC_REG_CTRL_SA_LO, + ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | (params->mac_addr[5]))); + REG_WR(sc, xmac_base + XMAC_REG_CTRL_SA_HI, + ((params->mac_addr[0] << 8) | (params->mac_addr[1]))); + + DELAY(30); +} + +/******************************************************************/ +/* MAC/PBF section */ +/******************************************************************/ +static void elink_set_mdio_clk(struct bnx2x_softc *sc, uint32_t emac_base) +{ + uint32_t new_mode, cur_mode; + uint32_t clc_cnt; + /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz + * (a value of 49==0x31) and make sure that the AUTO poll is off + */ + cur_mode = REG_RD(sc, emac_base + EMAC_REG_EMAC_MDIO_MODE); + + if (USES_WARPCORE(sc)) + clc_cnt = 74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT; + else + clc_cnt = 49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT; + + if (((cur_mode & EMAC_MDIO_MODE_CLOCK_CNT) == clc_cnt) && + (cur_mode & (EMAC_MDIO_MODE_CLAUSE_45))) + return; + + new_mode = cur_mode & + ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT); + new_mode |= clc_cnt; + new_mode |= (EMAC_MDIO_MODE_CLAUSE_45); + + PMD_DRV_LOG(DEBUG, "Changing emac_mode from 0x%x to 0x%x", + cur_mode, new_mode); + REG_WR(sc, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode); + DELAY(40); +} + +static void elink_set_mdio_emac_per_phy(struct bnx2x_softc *sc, + struct elink_params *params) +{ + uint8_t phy_index; + /* Set mdio clock per phy */ + for (phy_index = ELINK_INT_PHY; phy_index < params->num_phys; + phy_index++) + elink_set_mdio_clk(sc, params->phy[phy_index].mdio_ctrl); +} + +static uint8_t elink_is_4_port_mode(struct bnx2x_softc *sc) +{ + uint32_t port4mode_ovwr_val; + /* Check 4-port override enabled */ + port4mode_ovwr_val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); + if (port4mode_ovwr_val & (1 << 0)) { + /* Return 4-port mode override value */ + return (port4mode_ovwr_val & (1 << 1)) == (1 << 1); + } + /* Return 4-port mode from input pin */ + return (uint8_t) REG_RD(sc, MISC_REG_PORT4MODE_EN); +} + +static void elink_emac_init(struct elink_params *params) +{ + /* reset and unreset the emac core */ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + uint32_t val; + uint16_t timeout; + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); + DELAY(5); + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); + + /* init emac - use read-modify-write */ + /* self clear reset */ + val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE); + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MODE, + (val | EMAC_MODE_RESET)); + + timeout = 200; + do { + val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE); + PMD_DRV_LOG(DEBUG, "EMAC reset reg is %u", val); + if (!timeout) { + PMD_DRV_LOG(DEBUG, "EMAC timeout!"); + return; + } + timeout--; + } while (val & EMAC_MODE_RESET); + + elink_set_mdio_emac_per_phy(sc, params); + /* Set mac address */ + val = ((params->mac_addr[0] << 8) | params->mac_addr[1]); + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MAC_MATCH, val); + + val = ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | params->mac_addr[5]); + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MAC_MATCH + 4, val); +} + +static void elink_set_xumac_nig(struct elink_params *params, + uint16_t tx_pause_en, uint8_t enable) +{ + struct bnx2x_softc *sc = params->sc; + + REG_WR(sc, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN, + enable); + REG_WR(sc, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN, + enable); + REG_WR(sc, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN : + NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en); +} + +static void elink_set_umac_rxtx(struct elink_params *params, uint8_t en) +{ + uint32_t umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + uint32_t val; + struct bnx2x_softc *sc = params->sc; + if (!(REG_RD(sc, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port))) + return; + val = REG_RD(sc, umac_base + UMAC_REG_COMMAND_CONFIG); + if (en) + val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA); + else + val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA); + /* Disable RX and TX */ + REG_WR(sc, umac_base + UMAC_REG_COMMAND_CONFIG, val); +} + +static void elink_umac_enable(struct elink_params *params, + struct elink_vars *vars, uint8_t lb) +{ + uint32_t val; + uint32_t umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + struct bnx2x_softc *sc = params->sc; + /* Reset UMAC */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); + DELAY(1000 * 1); + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); + + PMD_DRV_LOG(DEBUG, "enabling UMAC"); + + /* This register opens the gate for the UMAC despite its name */ + REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + params->port * 4, 1); + + val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN | + UMAC_COMMAND_CONFIG_REG_PAD_EN | + UMAC_COMMAND_CONFIG_REG_SW_RESET | + UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK; + switch (vars->line_speed) { + case ELINK_SPEED_10: + val |= (0 << 2); + break; + case ELINK_SPEED_100: + val |= (1 << 2); + break; + case ELINK_SPEED_1000: + val |= (2 << 2); + break; + case ELINK_SPEED_2500: + val |= (3 << 2); + break; + default: + PMD_DRV_LOG(DEBUG, "Invalid speed for UMAC %d", + vars->line_speed); + break; + } + if (!(vars->flow_ctrl & ELINK_FLOW_CTRL_TX)) + val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE; + + if (!(vars->flow_ctrl & ELINK_FLOW_CTRL_RX)) + val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE; + + if (vars->duplex == DUPLEX_HALF) + val |= UMAC_COMMAND_CONFIG_REG_HD_ENA; + + REG_WR(sc, umac_base + UMAC_REG_COMMAND_CONFIG, val); + DELAY(50); + + /* Configure UMAC for EEE */ + if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) { + PMD_DRV_LOG(DEBUG, "configured UMAC for EEE"); + REG_WR(sc, umac_base + UMAC_REG_UMAC_EEE_CTRL, + UMAC_UMAC_EEE_CTRL_REG_EEE_EN); + REG_WR(sc, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11); + } else { + REG_WR(sc, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0); + } + + /* Set MAC address for source TX Pause/PFC frames (under SW reset) */ + REG_WR(sc, umac_base + UMAC_REG_MAC_ADDR0, + ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | (params->mac_addr[5]))); + REG_WR(sc, umac_base + UMAC_REG_MAC_ADDR1, + ((params->mac_addr[0] << 8) | (params->mac_addr[1]))); + + /* Enable RX and TX */ + val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN; + val |= UMAC_COMMAND_CONFIG_REG_TX_ENA | UMAC_COMMAND_CONFIG_REG_RX_ENA; + REG_WR(sc, umac_base + UMAC_REG_COMMAND_CONFIG, val); + DELAY(50); + + /* Remove SW Reset */ + val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET; + + /* Check loopback mode */ + if (lb) + val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA; + REG_WR(sc, umac_base + UMAC_REG_COMMAND_CONFIG, val); + + /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame + * length used by the MAC receive logic to check frames. + */ + REG_WR(sc, umac_base + UMAC_REG_MAXFR, 0x2710); + elink_set_xumac_nig(params, + ((vars->flow_ctrl & ELINK_FLOW_CTRL_TX) != 0), 1); + vars->mac_type = ELINK_MAC_TYPE_UMAC; + +} + +/* Define the XMAC mode */ +static void elink_xmac_init(struct elink_params *params, uint32_t max_speed) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t is_port4mode = elink_is_4_port_mode(sc); + + /* In 4-port mode, need to set the mode only once, so if XMAC is + * already out of reset, it means the mode has already been set, + * and it must not* reset the XMAC again, since it controls both + * ports of the path + */ + + if (((CHIP_NUM(sc) == CHIP_NUM_57840_4_10) || + (CHIP_NUM(sc) == CHIP_NUM_57840_2_20) || + (CHIP_NUM(sc) == CHIP_NUM_57840_OBS)) && + is_port4mode && + (REG_RD(sc, MISC_REG_RESET_REG_2) & + MISC_REGISTERS_RESET_REG_2_XMAC)) { + PMD_DRV_LOG(DEBUG, "XMAC already out of reset in 4-port mode"); + return; + } + + /* Hard reset */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + MISC_REGISTERS_RESET_REG_2_XMAC); + DELAY(1000 * 1); + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + MISC_REGISTERS_RESET_REG_2_XMAC); + if (is_port4mode) { + PMD_DRV_LOG(DEBUG, "Init XMAC to 2 ports x 10G per path"); + + /* Set the number of ports on the system side to up to 2 */ + REG_WR(sc, MISC_REG_XMAC_CORE_PORT_MODE, 1); + + /* Set the number of ports on the Warp Core to 10G */ + REG_WR(sc, MISC_REG_XMAC_PHY_PORT_MODE, 3); + } else { + /* Set the number of ports on the system side to 1 */ + REG_WR(sc, MISC_REG_XMAC_CORE_PORT_MODE, 0); + if (max_speed == ELINK_SPEED_10000) { + PMD_DRV_LOG(DEBUG, + "Init XMAC to 10G x 1 port per path"); + /* Set the number of ports on the Warp Core to 10G */ + REG_WR(sc, MISC_REG_XMAC_PHY_PORT_MODE, 3); + } else { + PMD_DRV_LOG(DEBUG, + "Init XMAC to 20G x 2 ports per path"); + /* Set the number of ports on the Warp Core to 20G */ + REG_WR(sc, MISC_REG_XMAC_PHY_PORT_MODE, 1); + } + } + /* Soft reset */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); + DELAY(1000 * 1); + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); + +} + +static void elink_set_xmac_rxtx(struct elink_params *params, uint8_t en) +{ + uint8_t port = params->port; + struct bnx2x_softc *sc = params->sc; + uint32_t pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + uint32_t val; + + if (REG_RD(sc, MISC_REG_RESET_REG_2) & MISC_REGISTERS_RESET_REG_2_XMAC) { + /* Send an indication to change the state in the NIG back to XON + * Clearing this bit enables the next set of this bit to get + * rising edge + */ + pfc_ctrl = REG_RD(sc, xmac_base + XMAC_REG_PFC_CTRL_HI); + REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI, + (pfc_ctrl & ~(1 << 1))); + REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI, + (pfc_ctrl | (1 << 1))); + PMD_DRV_LOG(DEBUG, "Disable XMAC on port %x", port); + val = REG_RD(sc, xmac_base + XMAC_REG_CTRL); + if (en) + val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN); + else + val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN); + REG_WR(sc, xmac_base + XMAC_REG_CTRL, val); + } +} + +static elink_status_t elink_xmac_enable(struct elink_params *params, + struct elink_vars *vars, uint8_t lb) +{ + uint32_t val, xmac_base; + struct bnx2x_softc *sc = params->sc; + PMD_DRV_LOG(DEBUG, "enabling XMAC"); + + xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + elink_xmac_init(params, vars->line_speed); + + /* This register determines on which events the MAC will assert + * error on the i/f to the NIG along w/ EOP. + */ + + /* This register tells the NIG whether to send traffic to UMAC + * or XMAC + */ + REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + params->port * 4, 0); + + /* When XMAC is in XLGMII mode, disable sending idles for fault + * detection. + */ + if (!(params->phy[ELINK_INT_PHY].flags & ELINK_FLAGS_TX_ERROR_CHECK)) { + REG_WR(sc, xmac_base + XMAC_REG_RX_LSS_CTRL, + (XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE | + XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE)); + REG_WR(sc, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); + REG_WR(sc, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS | + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS); + } + /* Set Max packet size */ + REG_WR(sc, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710); + + /* CRC append for Tx packets */ + REG_WR(sc, xmac_base + XMAC_REG_TX_CTRL, 0xC800); + + /* update PFC */ + elink_update_pfc_xmac(params, vars); + + if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) { + PMD_DRV_LOG(DEBUG, "Setting XMAC for EEE"); + REG_WR(sc, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008); + REG_WR(sc, xmac_base + XMAC_REG_EEE_CTRL, 0x1); + } else { + REG_WR(sc, xmac_base + XMAC_REG_EEE_CTRL, 0x0); + } + + /* Enable TX and RX */ + val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN; + + /* Set MAC in XLGMII mode for dual-mode */ + if ((vars->line_speed == ELINK_SPEED_20000) && + (params->phy[ELINK_INT_PHY].supported & + ELINK_SUPPORTED_20000baseKR2_Full)) + val |= XMAC_CTRL_REG_XLGMII_ALIGN_ENB; + + /* Check loopback mode */ + if (lb) + val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK; + REG_WR(sc, xmac_base + XMAC_REG_CTRL, val); + elink_set_xumac_nig(params, + ((vars->flow_ctrl & ELINK_FLOW_CTRL_TX) != 0), 1); + + vars->mac_type = ELINK_MAC_TYPE_XMAC; + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_emac_enable(struct elink_params *params, + struct elink_vars *vars, uint8_t lb) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + uint32_t val; + + PMD_DRV_LOG(DEBUG, "enabling EMAC"); + + /* Disable BMAC */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + + /* enable emac and not bmac */ + REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + port * 4, 1); + +#ifdef ELINK_INCLUDE_EMUL + /* for paladium */ + if (CHIP_REV_IS_EMUL(sc)) { + /* Use lane 1 (of lanes 0-3) */ + REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 1); + REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1); + } + /* for fpga */ + else +#endif +#ifdef ELINK_INCLUDE_FPGA + if (CHIP_REV_IS_FPGA(sc)) { + /* Use lane 1 (of lanes 0-3) */ + PMD_DRV_LOG(DEBUG, "elink_emac_enable: Setting FPGA"); + + REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 1); + REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 0); + } else +#endif + /* ASIC */ + if (vars->phy_flags & PHY_XGXS_FLAG) { + uint32_t ser_lane = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + + PMD_DRV_LOG(DEBUG, "XGXS"); + /* select the master lanes (out of 0-3) */ + REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, ser_lane); + /* select XGXS */ + REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1); + + } else { /* SerDes */ + PMD_DRV_LOG(DEBUG, "SerDes"); + /* select SerDes */ + REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 0); + } + + elink_bits_en(sc, emac_base + EMAC_REG_EMAC_RX_MODE, + EMAC_RX_MODE_RESET); + elink_bits_en(sc, emac_base + EMAC_REG_EMAC_TX_MODE, + EMAC_TX_MODE_RESET); + +#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA) + if (CHIP_REV_IS_SLOW(sc)) { + /* config GMII mode */ + val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE); + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MODE, + (val | EMAC_MODE_PORT_GMII)); + } else { /* ASIC */ +#endif + /* pause enable/disable */ + elink_bits_dis(sc, emac_base + EMAC_REG_EMAC_RX_MODE, + EMAC_RX_MODE_FLOW_EN); + + elink_bits_dis(sc, emac_base + EMAC_REG_EMAC_TX_MODE, + (EMAC_TX_MODE_EXT_PAUSE_EN | + EMAC_TX_MODE_FLOW_EN)); + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED)) { + if (vars->flow_ctrl & ELINK_FLOW_CTRL_RX) + elink_bits_en(sc, emac_base + + EMAC_REG_EMAC_RX_MODE, + EMAC_RX_MODE_FLOW_EN); + + if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX) + elink_bits_en(sc, emac_base + + EMAC_REG_EMAC_TX_MODE, + (EMAC_TX_MODE_EXT_PAUSE_EN | + EMAC_TX_MODE_FLOW_EN)); + } else + elink_bits_en(sc, emac_base + EMAC_REG_EMAC_TX_MODE, + EMAC_TX_MODE_FLOW_EN); +#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA) + } +#endif + + /* KEEP_VLAN_TAG, promiscuous */ + val = REG_RD(sc, emac_base + EMAC_REG_EMAC_RX_MODE); + val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; + + /* Setting this bit causes MAC control frames (except for pause + * frames) to be passed on for processing. This setting has no + * affect on the operation of the pause frames. This bit effects + * all packets regardless of RX Parser packet sorting logic. + * Turn the PFC off to make sure we are in Xon state before + * enabling it. + */ + elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_MODE, 0); + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) { + PMD_DRV_LOG(DEBUG, "PFC is enabled"); + /* Enable PFC again */ + elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_MODE, + EMAC_REG_RX_PFC_MODE_RX_EN | + EMAC_REG_RX_PFC_MODE_TX_EN | + EMAC_REG_RX_PFC_MODE_PRIORITIES); + + elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_PARAM, + ((0x0101 << + EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) | + (0x00ff << + EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT))); + val |= EMAC_RX_MODE_KEEP_MAC_CONTROL; + } + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_RX_MODE, val); + + /* Set Loopback */ + val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE); + if (lb) + val |= 0x810; + else + val &= ~0x810; + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MODE, val); + + /* Enable emac */ + REG_WR(sc, NIG_REG_NIG_EMAC0_EN + port * 4, 1); + + /* Enable emac for jumbo packets */ + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_RX_MTU_SIZE, + (EMAC_RX_MTU_SIZE_JUMBO_ENA | + (ELINK_ETH_MAX_JUMBO_PACKET_SIZE + + ELINK_ETH_OVREHEAD))); + + /* Strip CRC */ + REG_WR(sc, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port * 4, 0x1); + + /* Disable the NIG in/out to the bmac */ + REG_WR(sc, NIG_REG_BMAC0_IN_EN + port * 4, 0x0); + REG_WR(sc, NIG_REG_BMAC0_PAUSE_OUT_EN + port * 4, 0x0); + REG_WR(sc, NIG_REG_BMAC0_OUT_EN + port * 4, 0x0); + + /* Enable the NIG in/out to the emac */ + REG_WR(sc, NIG_REG_EMAC0_IN_EN + port * 4, 0x1); + val = 0; + if ((params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED) || + (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)) + val = 1; + + REG_WR(sc, NIG_REG_EMAC0_PAUSE_OUT_EN + port * 4, val); + REG_WR(sc, NIG_REG_EGRESS_EMAC0_OUT_EN + port * 4, 0x1); + +#ifdef ELINK_INCLUDE_EMUL + if (CHIP_REV_IS_EMUL(sc)) { + /* Take the BigMac out of reset */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + + /* Enable access for bmac registers */ + REG_WR(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4, 0x1); + } else +#endif + REG_WR(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4, 0x0); + + vars->mac_type = ELINK_MAC_TYPE_EMAC; + return ELINK_STATUS_OK; +} + +static void elink_update_pfc_bmac1(struct elink_params *params, + struct elink_vars *vars) +{ + uint32_t wb_data[2]; + struct bnx2x_softc *sc = params->sc; + uint32_t bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + + uint32_t val = 0x14; + if ((!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED)) && + (vars->flow_ctrl & ELINK_FLOW_CTRL_RX)) + /* Enable BigMAC to react on received Pause packets */ + val |= (1 << 5); + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2); + + /* TX control */ + val = 0xc0; + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED) && + (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)) + val |= 0x800000; + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2); +} + +static void elink_update_pfc_bmac2(struct elink_params *params, + struct elink_vars *vars, uint8_t is_lb) +{ + /* Set rx control: Strip CRC and enable BigMAC to relay + * control packets to the system as well + */ + uint32_t wb_data[2]; + struct bnx2x_softc *sc = params->sc; + uint32_t bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + uint32_t val = 0x14; + + if ((!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED)) && + (vars->flow_ctrl & ELINK_FLOW_CTRL_RX)) + /* Enable BigMAC to react on received Pause packets */ + val |= (1 << 5); + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2); + DELAY(30); + + /* Tx control */ + val = 0xc0; + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED) && + (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)) + val |= 0x800000; + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2); + + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) { + PMD_DRV_LOG(DEBUG, "PFC is enabled"); + /* Enable PFC RX & TX & STATS and set 8 COS */ + wb_data[0] = 0x0; + wb_data[0] |= (1 << 0); /* RX */ + wb_data[0] |= (1 << 1); /* TX */ + wb_data[0] |= (1 << 2); /* Force initial Xon */ + wb_data[0] |= (1 << 3); /* 8 cos */ + wb_data[0] |= (1 << 5); /* STATS */ + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, + wb_data, 2); + /* Clear the force Xon */ + wb_data[0] &= ~(1 << 2); + } else { + PMD_DRV_LOG(DEBUG, "PFC is disabled"); + /* Disable PFC RX & TX & STATS and set 8 COS */ + wb_data[0] = 0x8; + wb_data[1] = 0; + } + + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); + + /* Set Time (based unit is 512 bit time) between automatic + * re-sending of PP packets amd enable automatic re-send of + * Per-Priroity Packet as long as pp_gen is asserted and + * pp_disable is low. + */ + val = 0x8000; + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) + val |= (1 << 16); /* enable automatic re-send */ + + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, + wb_data, 2); + + /* mac control */ + val = 0x3; /* Enable RX and TX */ + if (is_lb) { + val |= 0x4; /* Local loopback */ + PMD_DRV_LOG(DEBUG, "enable bmac loopback"); + } + /* When PFC enabled, Pass pause frames towards the NIG. */ + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) + val |= ((1 << 6) | (1 << 5)); + + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); +} + +/****************************************************************************** +* Description: +* This function is needed because NIG ARB_CREDIT_WEIGHT_X are +* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. +******************************************************************************/ +static elink_status_t elink_pfc_nig_rx_priority_mask(struct bnx2x_softc *sc, + uint8_t cos_entry, + uint32_t priority_mask, + uint8_t port) +{ + uint32_t nig_reg_rx_priority_mask_add = 0; + + switch (cos_entry) { + case 0: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS0_PRIORITY_MASK : + NIG_REG_P0_RX_COS0_PRIORITY_MASK; + break; + case 1: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS1_PRIORITY_MASK : + NIG_REG_P0_RX_COS1_PRIORITY_MASK; + break; + case 2: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS2_PRIORITY_MASK : + NIG_REG_P0_RX_COS2_PRIORITY_MASK; + break; + case 3: + if (port) + return ELINK_STATUS_ERROR; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK; + break; + case 4: + if (port) + return ELINK_STATUS_ERROR; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK; + break; + case 5: + if (port) + return ELINK_STATUS_ERROR; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK; + break; + } + + REG_WR(sc, nig_reg_rx_priority_mask_add, priority_mask); + + return ELINK_STATUS_OK; +} + +static void elink_update_mng(struct elink_params *params, uint32_t link_status) +{ + struct bnx2x_softc *sc = params->sc; + + REG_WR(sc, params->shmem_base + + offsetof(struct shmem_region, + port_mb[params->port].link_status), link_status); +} + +static void elink_update_link_attr(struct elink_params *params, + uint32_t link_attr) +{ + struct bnx2x_softc *sc = params->sc; + + if (SHMEM2_HAS(sc, link_attr_sync)) + REG_WR(sc, params->shmem2_base + + offsetof(struct shmem2_region, + link_attr_sync[params->port]), link_attr); +} + +static void elink_update_pfc_nig(struct elink_params *params, + struct elink_nig_brb_pfc_port_params + *nig_params) +{ + uint32_t xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = + 0; + uint32_t llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0; + uint32_t pkt_priority_to_cos = 0; + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + + int set_pfc = params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED; + PMD_DRV_LOG(DEBUG, "updating pfc nig parameters"); + + /* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set + * MAC control frames (that are not pause packets) + * will be forwarded to the XCM. + */ + xcm_mask = REG_RD(sc, port ? NIG_REG_LLH1_XCM_MASK : + NIG_REG_LLH0_XCM_MASK); + /* NIG params will override non PFC params, since it's possible to + * do transition from PFC to SAFC + */ + if (set_pfc) { + pause_enable = 0; + llfc_out_en = 0; + llfc_enable = 0; + if (CHIP_IS_E3(sc)) + ppp_enable = 0; + else + ppp_enable = 1; + xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : + NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); + xcm_out_en = 0; + hwpfc_enable = 1; + } else { + if (nig_params) { + llfc_out_en = nig_params->llfc_out_en; + llfc_enable = nig_params->llfc_enable; + pause_enable = nig_params->pause_enable; + } else /* Default non PFC mode - PAUSE */ + pause_enable = 1; + + xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : + NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); + xcm_out_en = 1; + } + + if (CHIP_IS_E3(sc)) + REG_WR(sc, port ? NIG_REG_BRB1_PAUSE_IN_EN : + NIG_REG_BRB0_PAUSE_IN_EN, pause_enable); + REG_WR(sc, port ? NIG_REG_LLFC_OUT_EN_1 : + NIG_REG_LLFC_OUT_EN_0, llfc_out_en); + REG_WR(sc, port ? NIG_REG_LLFC_ENABLE_1 : + NIG_REG_LLFC_ENABLE_0, llfc_enable); + REG_WR(sc, port ? NIG_REG_PAUSE_ENABLE_1 : + NIG_REG_PAUSE_ENABLE_0, pause_enable); + + REG_WR(sc, port ? NIG_REG_PPP_ENABLE_1 : + NIG_REG_PPP_ENABLE_0, ppp_enable); + + REG_WR(sc, port ? NIG_REG_LLH1_XCM_MASK : + NIG_REG_LLH0_XCM_MASK, xcm_mask); + + REG_WR(sc, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 : + NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7); + + /* Output enable for RX_XCM # IF */ + REG_WR(sc, port ? NIG_REG_XCM1_OUT_EN : + NIG_REG_XCM0_OUT_EN, xcm_out_en); + + /* HW PFC TX enable */ + REG_WR(sc, port ? NIG_REG_P1_HWPFC_ENABLE : + NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable); + + if (nig_params) { + uint8_t i = 0; + pkt_priority_to_cos = nig_params->pkt_priority_to_cos; + + for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++) + elink_pfc_nig_rx_priority_mask(sc, i, + nig_params-> + rx_cos_priority_mask[i], + port); + + REG_WR(sc, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 : + NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0, + nig_params->llfc_high_priority_classes); + + REG_WR(sc, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 : + NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0, + nig_params->llfc_low_priority_classes); + } + REG_WR(sc, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS : + NIG_REG_P0_PKT_PRIORITY_TO_COS, pkt_priority_to_cos); +} + +elink_status_t elink_update_pfc(struct elink_params *params, + struct elink_vars *vars, + struct elink_nig_brb_pfc_port_params + *pfc_params) +{ + /* The PFC and pause are orthogonal to one another, meaning when + * PFC is enabled, the pause are disabled, and when PFC is + * disabled, pause are set according to the pause result. + */ + uint32_t val; + struct bnx2x_softc *sc = params->sc; + elink_status_t elink_status = ELINK_STATUS_OK; + uint8_t bmac_loopback = (params->loopback_mode == ELINK_LOOPBACK_BMAC); + + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) + vars->link_status |= LINK_STATUS_PFC_ENABLED; + else + vars->link_status &= ~LINK_STATUS_PFC_ENABLED; + + elink_update_mng(params, vars->link_status); + + /* Update NIG params */ + elink_update_pfc_nig(params, pfc_params); + + if (!vars->link_up) + return elink_status; + + PMD_DRV_LOG(DEBUG, "About to update PFC in BMAC"); + + if (CHIP_IS_E3(sc)) { + if (vars->mac_type == ELINK_MAC_TYPE_XMAC) + elink_update_pfc_xmac(params, vars); + } else { + val = REG_RD(sc, MISC_REG_RESET_REG_2); + if ((val & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) + == 0) { + PMD_DRV_LOG(DEBUG, "About to update PFC in EMAC"); + elink_emac_enable(params, vars, 0); + return elink_status; + } + if (CHIP_IS_E2(sc)) + elink_update_pfc_bmac2(params, vars, bmac_loopback); + else + elink_update_pfc_bmac1(params, vars); + + val = 0; + if ((params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED) || + (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)) + val = 1; + REG_WR(sc, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port * 4, val); + } + return elink_status; +} + +static elink_status_t elink_bmac1_enable(struct elink_params *params, + struct elink_vars *vars, uint8_t is_lb) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint32_t bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + uint32_t wb_data[2]; + uint32_t val; + + PMD_DRV_LOG(DEBUG, "Enabling BigMAC1"); + + /* XGXS control */ + wb_data[0] = 0x3c; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, + wb_data, 2); + + /* TX MAC SA */ + wb_data[0] = ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | params->mac_addr[5]); + wb_data[1] = ((params->mac_addr[0] << 8) | params->mac_addr[1]); + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2); + + /* MAC control */ + val = 0x3; + if (is_lb) { + val |= 0x4; + PMD_DRV_LOG(DEBUG, "enable bmac loopback"); + } + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); + + /* Set rx mtu */ + wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2); + + elink_update_pfc_bmac1(params, vars); + + /* Set tx mtu */ + wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2); + + /* Set cnt max size */ + wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2); + + /* Configure SAFC */ + wb_data[0] = 0x1000200; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, + wb_data, 2); +#ifdef ELINK_INCLUDE_EMUL + /* Fix for emulation */ + if (CHIP_REV_IS_EMUL(sc)) { + wb_data[0] = 0xf000; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD, + wb_data, 2); + } +#endif + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_bmac2_enable(struct elink_params *params, + struct elink_vars *vars, uint8_t is_lb) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint32_t bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + uint32_t wb_data[2]; + + PMD_DRV_LOG(DEBUG, "Enabling BigMAC2"); + + wb_data[0] = 0; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); + DELAY(30); + + /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */ + wb_data[0] = 0x3c; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, + wb_data, 2); + + DELAY(30); + + /* TX MAC SA */ + wb_data[0] = ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | params->mac_addr[5]); + wb_data[1] = ((params->mac_addr[0] << 8) | params->mac_addr[1]); + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR, + wb_data, 2); + + DELAY(30); + + /* Configure SAFC */ + wb_data[0] = 0x1000200; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS, + wb_data, 2); + DELAY(30); + + /* Set RX MTU */ + wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); + DELAY(30); + + /* Set TX MTU */ + wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); + DELAY(30); + /* Set cnt max size */ + wb_data[0] = ELINK_ETH_MAX_JUMBO_PACKET_SIZE + ELINK_ETH_OVREHEAD - 2; + wb_data[1] = 0; + REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); + DELAY(30); + elink_update_pfc_bmac2(params, vars, is_lb); + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_bmac_enable(struct elink_params *params, + struct elink_vars *vars, + uint8_t is_lb, uint8_t reset_bmac) +{ + elink_status_t rc = ELINK_STATUS_OK; + uint8_t port = params->port; + struct bnx2x_softc *sc = params->sc; + uint32_t val; + /* Reset and unreset the BigMac */ + if (reset_bmac) { + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + DELAY(1000 * 1); + } + + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + + /* Enable access for bmac registers */ + REG_WR(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4, 0x1); + + /* Enable BMAC according to BMAC type */ + if (CHIP_IS_E2(sc)) + rc = elink_bmac2_enable(params, vars, is_lb); + else + rc = elink_bmac1_enable(params, vars, is_lb); + REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 0x1); + REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 0x0); + REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + port * 4, 0x0); + val = 0; + if ((params->feature_config_flags & + ELINK_FEATURE_CONFIG_PFC_ENABLED) || + (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)) + val = 1; + REG_WR(sc, NIG_REG_BMAC0_PAUSE_OUT_EN + port * 4, val); + REG_WR(sc, NIG_REG_EGRESS_EMAC0_OUT_EN + port * 4, 0x0); + REG_WR(sc, NIG_REG_EMAC0_IN_EN + port * 4, 0x0); + REG_WR(sc, NIG_REG_EMAC0_PAUSE_OUT_EN + port * 4, 0x0); + REG_WR(sc, NIG_REG_BMAC0_IN_EN + port * 4, 0x1); + REG_WR(sc, NIG_REG_BMAC0_OUT_EN + port * 4, 0x1); + + vars->mac_type = ELINK_MAC_TYPE_BMAC; + return rc; +} + +static void elink_set_bmac_rx(struct bnx2x_softc *sc, uint8_t port, uint8_t en) +{ + uint32_t bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + uint32_t wb_data[2]; + uint32_t nig_bmac_enable = + REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); + + if (CHIP_IS_E2(sc)) + bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL; + else + bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL; + /* Only if the bmac is out of reset */ + if (REG_RD(sc, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && nig_bmac_enable) { + /* Clear Rx Enable bit in BMAC_CONTROL register */ + REG_RD_DMAE(sc, bmac_addr, wb_data, 2); + if (en) + wb_data[0] |= ELINK_BMAC_CONTROL_RX_ENABLE; + else + wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; + REG_WR_DMAE(sc, bmac_addr, wb_data, 2); + DELAY(1000 * 1); + } +} + +static elink_status_t elink_pbf_update(struct elink_params *params, + uint32_t flow_ctrl, uint32_t line_speed) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint32_t init_crd, crd; + uint32_t count = 1000; + + /* Disable port */ + REG_WR(sc, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port * 4, 0x1); + + /* Wait for init credit */ + init_crd = REG_RD(sc, PBF_REG_P0_INIT_CRD + port * 4); + crd = REG_RD(sc, PBF_REG_P0_CREDIT + port * 8); + PMD_DRV_LOG(DEBUG, "init_crd 0x%x crd 0x%x", init_crd, crd); + + while ((init_crd != crd) && count) { + DELAY(1000 * 5); + crd = REG_RD(sc, PBF_REG_P0_CREDIT + port * 8); + count--; + } + crd = REG_RD(sc, PBF_REG_P0_CREDIT + port * 8); + if (init_crd != crd) { + PMD_DRV_LOG(DEBUG, "BUG! init_crd 0x%x != crd 0x%x", + init_crd, crd); + return ELINK_STATUS_ERROR; + } + + if (flow_ctrl & ELINK_FLOW_CTRL_RX || + line_speed == ELINK_SPEED_10 || + line_speed == ELINK_SPEED_100 || + line_speed == ELINK_SPEED_1000 || line_speed == ELINK_SPEED_2500) { + REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 1); + /* Update threshold */ + REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, 0); + /* Update init credit */ + init_crd = 778; /* (800-18-4) */ + + } else { + uint32_t thresh = (ELINK_ETH_MAX_JUMBO_PACKET_SIZE + + ELINK_ETH_OVREHEAD) / 16; + REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port * 4, 0); + /* Update threshold */ + REG_WR(sc, PBF_REG_P0_ARB_THRSH + port * 4, thresh); + /* Update init credit */ + switch (line_speed) { + case ELINK_SPEED_10000: + init_crd = thresh + 553 - 22; + break; + default: + PMD_DRV_LOG(DEBUG, "Invalid line_speed 0x%x", + line_speed); + return ELINK_STATUS_ERROR; + } + } + REG_WR(sc, PBF_REG_P0_INIT_CRD + port * 4, init_crd); + PMD_DRV_LOG(DEBUG, "PBF updated to speed %d credit %d", + line_speed, init_crd); + + /* Probe the credit changes */ + REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0x1); + DELAY(1000 * 5); + REG_WR(sc, PBF_REG_INIT_P0 + port * 4, 0x0); + + /* Enable port */ + REG_WR(sc, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port * 4, 0x0); + return ELINK_STATUS_OK; +} + +/** + * elink_get_emac_base - retrive emac base address + * + * @bp: driver handle + * @mdc_mdio_access: access type + * @port: port id + * + * This function selects the MDC/MDIO access (through emac0 or + * emac1) depend on the mdc_mdio_access, port, port swapped. Each + * phy has a default access mode, which could also be overridden + * by nvram configuration. This parameter, whether this is the + * default phy configuration, or the nvram overrun + * configuration, is passed here as mdc_mdio_access and selects + * the emac_base for the CL45 read/writes operations + */ +static uint32_t elink_get_emac_base(struct bnx2x_softc *sc, + uint32_t mdc_mdio_access, uint8_t port) +{ + uint32_t emac_base = 0; + switch (mdc_mdio_access) { + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE: + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0: + if (REG_RD(sc, NIG_REG_PORT_SWAP)) + emac_base = GRCBASE_EMAC1; + else + emac_base = GRCBASE_EMAC0; + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1: + if (REG_RD(sc, NIG_REG_PORT_SWAP)) + emac_base = GRCBASE_EMAC0; + else + emac_base = GRCBASE_EMAC1; + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH: + emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED: + emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1; + break; + default: + break; + } + return emac_base; + +} + +/******************************************************************/ +/* CL22 access functions */ +/******************************************************************/ +static elink_status_t elink_cl22_write(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint16_t reg, uint16_t val) +{ + uint32_t tmp, mode; + uint8_t i; + elink_status_t rc = ELINK_STATUS_OK; + /* Switch to CL22 */ + mode = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, + mode & ~EMAC_MDIO_MODE_CLAUSE_45); + + /* Address */ + tmp = ((phy->addr << 21) | (reg << 16) | val | + EMAC_MDIO_COMM_COMMAND_WRITE_22 | EMAC_MDIO_COMM_START_BUSY); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + + for (i = 0; i < 50; i++) { + DELAY(10); + + tmp = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { + DELAY(5); + break; + } + } + if (tmp & EMAC_MDIO_COMM_START_BUSY) { + PMD_DRV_LOG(DEBUG, "write phy register failed"); + rc = ELINK_STATUS_TIMEOUT; + } + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); + return rc; +} + +static elink_status_t elink_cl22_read(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint16_t reg, uint16_t * ret_val) +{ + uint32_t val, mode; + uint16_t i; + elink_status_t rc = ELINK_STATUS_OK; + + /* Switch to CL22 */ + mode = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, + mode & ~EMAC_MDIO_MODE_CLAUSE_45); + + /* Address */ + val = ((phy->addr << 21) | (reg << 16) | + EMAC_MDIO_COMM_COMMAND_READ_22 | EMAC_MDIO_COMM_START_BUSY); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); + + for (i = 0; i < 50; i++) { + DELAY(10); + + val = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(val & EMAC_MDIO_COMM_START_BUSY)) { + *ret_val = (uint16_t) (val & EMAC_MDIO_COMM_DATA); + DELAY(5); + break; + } + } + if (val & EMAC_MDIO_COMM_START_BUSY) { + PMD_DRV_LOG(DEBUG, "read phy register failed"); + + *ret_val = 0; + rc = ELINK_STATUS_TIMEOUT; + } + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); + return rc; +} + +/******************************************************************/ +/* CL45 access functions */ +/******************************************************************/ +static elink_status_t elink_cl45_read(struct bnx2x_softc *sc, + struct elink_phy *phy, uint8_t devad, + uint16_t reg, uint16_t * ret_val) +{ + uint32_t val; + uint16_t i; + elink_status_t rc = ELINK_STATUS_OK; + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_G) { + elink_set_mdio_clk(sc, phy->mdio_ctrl); + } + + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_B0) + elink_bits_en(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + /* Address */ + val = ((phy->addr << 21) | (devad << 16) | reg | + EMAC_MDIO_COMM_COMMAND_ADDRESS | EMAC_MDIO_COMM_START_BUSY); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); + + for (i = 0; i < 50; i++) { + DELAY(10); + + val = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(val & EMAC_MDIO_COMM_START_BUSY)) { + DELAY(5); + break; + } + } + if (val & EMAC_MDIO_COMM_START_BUSY) { + PMD_DRV_LOG(DEBUG, "read phy register failed"); + elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout" + + *ret_val = 0; + rc = ELINK_STATUS_TIMEOUT; + } else { + /* Data */ + val = ((phy->addr << 21) | (devad << 16) | + EMAC_MDIO_COMM_COMMAND_READ_45 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); + + for (i = 0; i < 50; i++) { + DELAY(10); + + val = REG_RD(sc, phy->mdio_ctrl + + EMAC_REG_EMAC_MDIO_COMM); + if (!(val & EMAC_MDIO_COMM_START_BUSY)) { + *ret_val = + (uint16_t) (val & EMAC_MDIO_COMM_DATA); + break; + } + } + if (val & EMAC_MDIO_COMM_START_BUSY) { + PMD_DRV_LOG(DEBUG, "read phy register failed"); + elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout" + + *ret_val = 0; + rc = ELINK_STATUS_TIMEOUT; + } + } + /* Work around for E3 A0 */ + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA) { + phy->flags ^= ELINK_FLAGS_DUMMY_READ; + if (phy->flags & ELINK_FLAGS_DUMMY_READ) { + uint16_t temp_val; + elink_cl45_read(sc, phy, devad, 0xf, &temp_val); + } + } + + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_B0) + elink_bits_dis(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + return rc; +} + +static elink_status_t elink_cl45_write(struct bnx2x_softc *sc, + struct elink_phy *phy, uint8_t devad, + uint16_t reg, uint16_t val) +{ + uint32_t tmp; + uint8_t i; + elink_status_t rc = ELINK_STATUS_OK; + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_G) { + elink_set_mdio_clk(sc, phy->mdio_ctrl); + } + + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_B0) + elink_bits_en(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + + /* Address */ + tmp = ((phy->addr << 21) | (devad << 16) | reg | + EMAC_MDIO_COMM_COMMAND_ADDRESS | EMAC_MDIO_COMM_START_BUSY); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + + for (i = 0; i < 50; i++) { + DELAY(10); + + tmp = REG_RD(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { + DELAY(5); + break; + } + } + if (tmp & EMAC_MDIO_COMM_START_BUSY) { + PMD_DRV_LOG(DEBUG, "write phy register failed"); + elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout" + + rc = ELINK_STATUS_TIMEOUT; + } else { + /* Data */ + tmp = ((phy->addr << 21) | (devad << 16) | val | + EMAC_MDIO_COMM_COMMAND_WRITE_45 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + + for (i = 0; i < 50; i++) { + DELAY(10); + + tmp = REG_RD(sc, phy->mdio_ctrl + + EMAC_REG_EMAC_MDIO_COMM); + if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { + DELAY(5); + break; + } + } + if (tmp & EMAC_MDIO_COMM_START_BUSY) { + PMD_DRV_LOG(DEBUG, "write phy register failed"); + elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout" + + rc = ELINK_STATUS_TIMEOUT; + } + } + /* Work around for E3 A0 */ + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA) { + phy->flags ^= ELINK_FLAGS_DUMMY_READ; + if (phy->flags & ELINK_FLAGS_DUMMY_READ) { + uint16_t temp_val; + elink_cl45_read(sc, phy, devad, 0xf, &temp_val); + } + } + if (phy->flags & ELINK_FLAGS_MDC_MDIO_WA_B0) + elink_bits_dis(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + return rc; +} + +/******************************************************************/ +/* EEE section */ +/******************************************************************/ +static uint8_t elink_eee_has_cap(struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + + if (REG_RD(sc, params->shmem2_base) <= + offsetof(struct shmem2_region, eee_status[params->port])) + return 0; + + return 1; +} + +static elink_status_t elink_eee_nvram_to_time(uint32_t nvram_mode, + uint32_t * idle_timer) +{ + switch (nvram_mode) { + case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED: + *idle_timer = ELINK_EEE_MODE_NVRAM_BALANCED_TIME; + break; + case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE: + *idle_timer = ELINK_EEE_MODE_NVRAM_AGGRESSIVE_TIME; + break; + case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY: + *idle_timer = ELINK_EEE_MODE_NVRAM_LATENCY_TIME; + break; + default: + *idle_timer = 0; + break; + } + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_eee_time_to_nvram(uint32_t idle_timer, + uint32_t * nvram_mode) +{ + switch (idle_timer) { + case ELINK_EEE_MODE_NVRAM_BALANCED_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED; + break; + case ELINK_EEE_MODE_NVRAM_AGGRESSIVE_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE; + break; + case ELINK_EEE_MODE_NVRAM_LATENCY_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY; + break; + default: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED; + break; + } + + return ELINK_STATUS_OK; +} + +static uint32_t elink_eee_calc_timer(struct elink_params *params) +{ + uint32_t eee_mode, eee_idle; + struct bnx2x_softc *sc = params->sc; + + if (params->eee_mode & ELINK_EEE_MODE_OVERRIDE_NVRAM) { + if (params->eee_mode & ELINK_EEE_MODE_OUTPUT_TIME) { + /* time value in eee_mode --> used directly */ + eee_idle = params->eee_mode & ELINK_EEE_MODE_TIMER_MASK; + } else { + /* hsi value in eee_mode --> time */ + if (elink_eee_nvram_to_time(params->eee_mode & + ELINK_EEE_MODE_NVRAM_MASK, + &eee_idle)) + return 0; + } + } else { + /* hsi values in nvram --> time */ + eee_mode = ((REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_feature_config + [params-> + port].eee_power_mode)) & + PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> + PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); + + if (elink_eee_nvram_to_time(eee_mode, &eee_idle)) + return 0; + } + + return eee_idle; +} + +static elink_status_t elink_eee_set_timers(struct elink_params *params, + struct elink_vars *vars) +{ + uint32_t eee_idle = 0, eee_mode; + struct bnx2x_softc *sc = params->sc; + + eee_idle = elink_eee_calc_timer(params); + + if (eee_idle) { + REG_WR(sc, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2), + eee_idle); + } else if ((params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI) && + (params->eee_mode & ELINK_EEE_MODE_OVERRIDE_NVRAM) && + (params->eee_mode & ELINK_EEE_MODE_OUTPUT_TIME)) { + PMD_DRV_LOG(DEBUG, "Error: Tx LPI is enabled with timer 0"); + return ELINK_STATUS_ERROR; + } + + vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT); + if (params->eee_mode & ELINK_EEE_MODE_OUTPUT_TIME) { + /* eee_idle in 1u --> eee_status in 16u */ + eee_idle >>= 4; + vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) | + SHMEM_EEE_TIME_OUTPUT_BIT; + } else { + if (elink_eee_time_to_nvram(eee_idle, &eee_mode)) + return ELINK_STATUS_ERROR; + vars->eee_status |= eee_mode; + } + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_eee_initial_config(struct elink_params *params, + struct elink_vars *vars, + uint8_t mode) +{ + vars->eee_status |= ((uint32_t) mode) << SHMEM_EEE_SUPPORTED_SHIFT; + + /* Propogate params' bits --> vars (for migration exposure) */ + if (params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI) + vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT; + else + vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT; + + if (params->eee_mode & ELINK_EEE_MODE_ADV_LPI) + vars->eee_status |= SHMEM_EEE_REQUESTED_BIT; + else + vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT; + + return elink_eee_set_timers(params, vars); +} + +static elink_status_t elink_eee_disable(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + + /* Make Certain LPI is disabled */ + REG_WR(sc, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); + + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0); + + vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_eee_advertise(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars, + uint8_t modes) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val = 0; + + /* Mask events preventing LPI generation */ + REG_WR(sc, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20); + + if (modes & SHMEM_EEE_10G_ADV) { + PMD_DRV_LOG(DEBUG, "Advertise 10GBase-T EEE"); + val |= 0x8; + } + if (modes & SHMEM_EEE_1G_ADV) { + PMD_DRV_LOG(DEBUG, "Advertise 1GBase-T EEE"); + val |= 0x4; + } + + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val); + + vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; + vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT); + + return ELINK_STATUS_OK; +} + +static void elink_update_mng_eee(struct elink_params *params, + uint32_t eee_status) +{ + struct bnx2x_softc *sc = params->sc; + + if (elink_eee_has_cap(params)) + REG_WR(sc, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port]), eee_status); +} + +static void elink_eee_an_resolve(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t adv = 0, lp = 0; + uint32_t lp_adv = 0; + uint8_t neg = 0; + + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp); + + if (lp & 0x2) { + lp_adv |= SHMEM_EEE_100M_ADV; + if (adv & 0x2) { + if (vars->line_speed == ELINK_SPEED_100) + neg = 1; + PMD_DRV_LOG(DEBUG, "EEE negotiated - 100M"); + } + } + if (lp & 0x14) { + lp_adv |= SHMEM_EEE_1G_ADV; + if (adv & 0x14) { + if (vars->line_speed == ELINK_SPEED_1000) + neg = 1; + PMD_DRV_LOG(DEBUG, "EEE negotiated - 1G"); + } + } + if (lp & 0x68) { + lp_adv |= SHMEM_EEE_10G_ADV; + if (adv & 0x68) { + if (vars->line_speed == ELINK_SPEED_10000) + neg = 1; + PMD_DRV_LOG(DEBUG, "EEE negotiated - 10G"); + } + } + + vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK; + vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT); + + if (neg) { + PMD_DRV_LOG(DEBUG, "EEE is active"); + vars->eee_status |= SHMEM_EEE_ACTIVE_BIT; + } +} + +/******************************************************************/ +/* BSC access functions from E3 */ +/******************************************************************/ +static void elink_bsc_module_sel(struct elink_params *params) +{ + int idx; + uint32_t board_cfg, sfp_ctrl; + uint32_t i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH]; + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + /* Read I2C output PINs */ + board_cfg = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.shared_hw_config.board)); + i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK; + i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >> + SHARED_HW_CFG_E3_I2C_MUX1_SHIFT; + + /* Read I2C output value */ + sfp_ctrl = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port]. + e3_cmn_pin_cfg)); + i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0; + i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0; + PMD_DRV_LOG(DEBUG, "Setting BSC switch"); + for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++) + elink_set_cfg_pin(sc, i2c_pins[idx], i2c_val[idx]); +} + +static elink_status_t elink_bsc_read(struct elink_params *params, + struct bnx2x_softc *sc, + uint8_t sl_devid, + uint16_t sl_addr, + uint8_t lc_addr, + uint8_t xfer_cnt, uint32_t * data_array) +{ + uint32_t val, i; + elink_status_t rc = ELINK_STATUS_OK; + + if (xfer_cnt > 16) { + PMD_DRV_LOG(DEBUG, "invalid xfer_cnt %d. Max is 16 bytes", + xfer_cnt); + return ELINK_STATUS_ERROR; + } + if (params) + elink_bsc_module_sel(params); + + xfer_cnt = 16 - lc_addr; + + /* Enable the engine */ + val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND); + val |= MCPR_IMC_COMMAND_ENABLE; + REG_WR(sc, MCP_REG_MCPR_IMC_COMMAND, val); + + /* Program slave device ID */ + val = (sl_devid << 16) | sl_addr; + REG_WR(sc, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val); + + /* Start xfer with 0 byte to update the address pointer ??? */ + val = (MCPR_IMC_COMMAND_ENABLE) | + (MCPR_IMC_COMMAND_WRITE_OP << + MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | + (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0); + REG_WR(sc, MCP_REG_MCPR_IMC_COMMAND, val); + + /* Poll for completion */ + i = 0; + val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND); + while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { + DELAY(10); + val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND); + if (i++ > 1000) { + PMD_DRV_LOG(DEBUG, "wr 0 byte timed out after %d try", + i); + rc = ELINK_STATUS_TIMEOUT; + break; + } + } + if (rc == ELINK_STATUS_TIMEOUT) + return rc; + + /* Start xfer with read op */ + val = (MCPR_IMC_COMMAND_ENABLE) | + (MCPR_IMC_COMMAND_READ_OP << + MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | + (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | + (xfer_cnt); + REG_WR(sc, MCP_REG_MCPR_IMC_COMMAND, val); + + /* Poll for completion */ + i = 0; + val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND); + while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { + DELAY(10); + val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND); + if (i++ > 1000) { + PMD_DRV_LOG(DEBUG, "rd op timed out after %d try", i); + rc = ELINK_STATUS_TIMEOUT; + break; + } + } + if (rc == ELINK_STATUS_TIMEOUT) + return rc; + + for (i = (lc_addr >> 2); i < 4; i++) { + data_array[i] = REG_RD(sc, (MCP_REG_MCPR_IMC_DATAREG0 + i * 4)); +#ifdef __BIG_ENDIAN + data_array[i] = ((data_array[i] & 0x000000ff) << 24) | + ((data_array[i] & 0x0000ff00) << 8) | + ((data_array[i] & 0x00ff0000) >> 8) | + ((data_array[i] & 0xff000000) >> 24); +#endif + } + return rc; +} + +static void elink_cl45_read_or_write(struct bnx2x_softc *sc, + struct elink_phy *phy, uint8_t devad, + uint16_t reg, uint16_t or_val) +{ + uint16_t val; + elink_cl45_read(sc, phy, devad, reg, &val); + elink_cl45_write(sc, phy, devad, reg, val | or_val); +} + +static void elink_cl45_read_and_write(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint8_t devad, uint16_t reg, + uint16_t and_val) +{ + uint16_t val; + elink_cl45_read(sc, phy, devad, reg, &val); + elink_cl45_write(sc, phy, devad, reg, val & and_val); +} + +static uint8_t elink_get_warpcore_lane(struct elink_params *params) +{ + uint8_t lane = 0; + struct bnx2x_softc *sc = params->sc; + uint32_t path_swap, path_swap_ovr; + uint8_t path, port; + + path = SC_PATH(sc); + port = params->port; + + if (elink_is_4_port_mode(sc)) { + uint32_t port_swap, port_swap_ovr; + + /* Figure out path swap value */ + path_swap_ovr = REG_RD(sc, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); + if (path_swap_ovr & 0x1) + path_swap = (path_swap_ovr & 0x2); + else + path_swap = REG_RD(sc, MISC_REG_FOUR_PORT_PATH_SWAP); + + if (path_swap) + path = path ^ 1; + + /* Figure out port swap value */ + port_swap_ovr = REG_RD(sc, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); + if (port_swap_ovr & 0x1) + port_swap = (port_swap_ovr & 0x2); + else + port_swap = REG_RD(sc, MISC_REG_FOUR_PORT_PORT_SWAP); + + if (port_swap) + port = port ^ 1; + + lane = (port << 1) + path; + } else { /* Two port mode - no port swap */ + + /* Figure out path swap value */ + path_swap_ovr = REG_RD(sc, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); + if (path_swap_ovr & 0x1) { + path_swap = (path_swap_ovr & 0x2); + } else { + path_swap = REG_RD(sc, MISC_REG_TWO_PORT_PATH_SWAP); + } + if (path_swap) + path = path ^ 1; + + lane = path << 1; + } + return lane; +} + +static void elink_set_aer_mmd(struct elink_params *params, + struct elink_phy *phy) +{ + uint32_t ser_lane; + uint16_t offset, aer_val; + struct bnx2x_softc *sc = params->sc; + ser_lane = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + + offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ? + (phy->addr + ser_lane) : 0; + + if (USES_WARPCORE(sc)) { + aer_val = elink_get_warpcore_lane(params); + /* In Dual-lane mode, two lanes are joined together, + * so in order to configure them, the AER broadcast method is + * used here. + * 0x200 is the broadcast address for lanes 0,1 + * 0x201 is the broadcast address for lanes 2,3 + */ + if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE) + aer_val = (aer_val >> 1) | 0x200; + } else if (CHIP_IS_E2(sc)) + aer_val = 0x3800 + offset - 1; + else + aer_val = 0x3800 + offset; + + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, aer_val); + +} + +/******************************************************************/ +/* Internal phy section */ +/******************************************************************/ + +static void elink_set_serdes_access(struct bnx2x_softc *sc, uint8_t port) +{ + uint32_t emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + + /* Set Clause 22 */ + REG_WR(sc, NIG_REG_SERDES0_CTRL_MD_ST + port * 0x10, 1); + REG_WR(sc, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000); + DELAY(500); + REG_WR(sc, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f); + DELAY(500); + /* Set Clause 45 */ + REG_WR(sc, NIG_REG_SERDES0_CTRL_MD_ST + port * 0x10, 0); +} + +static void elink_serdes_deassert(struct bnx2x_softc *sc, uint8_t port) +{ + uint32_t val; + + PMD_DRV_LOG(DEBUG, "elink_serdes_deassert"); + + val = ELINK_SERDES_RESET_BITS << (port * 16); + + /* Reset and unreset the SerDes/XGXS */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); + DELAY(500); + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); + + elink_set_serdes_access(sc, port); + + REG_WR(sc, NIG_REG_SERDES0_CTRL_MD_DEVAD + port * 0x10, + ELINK_DEFAULT_PHY_DEV_ADDR); +} + +static void elink_xgxs_specific_func(struct elink_phy *phy, + struct elink_params *params, + uint32_t action) +{ + struct bnx2x_softc *sc = params->sc; + switch (action) { + case ELINK_PHY_INIT: + /* Set correct devad */ + REG_WR(sc, NIG_REG_XGXS0_CTRL_MD_ST + params->port * 0x18, 0); + REG_WR(sc, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port * 0x18, + phy->def_md_devad); + break; + } +} + +static void elink_xgxs_deassert(struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port; + uint32_t val; + PMD_DRV_LOG(DEBUG, "elink_xgxs_deassert"); + port = params->port; + + val = ELINK_XGXS_RESET_BITS << (port * 16); + + /* Reset and unreset the SerDes/XGXS */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); + DELAY(500); + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); + elink_xgxs_specific_func(¶ms->phy[ELINK_INT_PHY], params, + ELINK_PHY_INIT); +} + +static void elink_calc_ieee_aneg_adv(struct elink_phy *phy, + struct elink_params *params, + uint16_t * ieee_fc) +{ + *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; + /* Resolve pause mode and advertisement Please refer to Table + * 28B-3 of the 802.3ab-1999 spec + */ + + switch (phy->req_flow_ctrl) { + case ELINK_FLOW_CTRL_AUTO: + switch (params->req_fc_auto_adv) { + case ELINK_FLOW_CTRL_BOTH: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + break; + case ELINK_FLOW_CTRL_RX: + case ELINK_FLOW_CTRL_TX: + *ieee_fc |= + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + break; + default: + break; + } + break; + case ELINK_FLOW_CTRL_TX: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + break; + + case ELINK_FLOW_CTRL_RX: + case ELINK_FLOW_CTRL_BOTH: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + break; + + case ELINK_FLOW_CTRL_NONE: + default: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; + break; + } + PMD_DRV_LOG(DEBUG, "ieee_fc = 0x%x", *ieee_fc); +} + +static void set_phy_vars(struct elink_params *params, struct elink_vars *vars) +{ + uint8_t actual_phy_idx, phy_index, link_cfg_idx; + uint8_t phy_config_swapped = params->multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED; + for (phy_index = ELINK_INT_PHY; phy_index < params->num_phys; + phy_index++) { + link_cfg_idx = ELINK_LINK_CONFIG_IDX(phy_index); + actual_phy_idx = phy_index; + if (phy_config_swapped) { + if (phy_index == ELINK_EXT_PHY1) + actual_phy_idx = ELINK_EXT_PHY2; + else if (phy_index == ELINK_EXT_PHY2) + actual_phy_idx = ELINK_EXT_PHY1; + } + params->phy[actual_phy_idx].req_flow_ctrl = + params->req_flow_ctrl[link_cfg_idx]; + + params->phy[actual_phy_idx].req_line_speed = + params->req_line_speed[link_cfg_idx]; + + params->phy[actual_phy_idx].speed_cap_mask = + params->speed_cap_mask[link_cfg_idx]; + + params->phy[actual_phy_idx].req_duplex = + params->req_duplex[link_cfg_idx]; + + if (params->req_line_speed[link_cfg_idx] == + ELINK_SPEED_AUTO_NEG) + vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; + + PMD_DRV_LOG(DEBUG, "req_flow_ctrl %x, req_line_speed %x," + " speed_cap_mask %x", + params->phy[actual_phy_idx].req_flow_ctrl, + params->phy[actual_phy_idx].req_line_speed, + params->phy[actual_phy_idx].speed_cap_mask); + } +} + +static void elink_ext_phy_set_pause(struct elink_params *params, + struct elink_phy *phy, + struct elink_vars *vars) +{ + uint16_t val; + struct bnx2x_softc *sc = params->sc; + /* Read modify write pause advertizing */ + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); + + val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; + + /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ + elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { + val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; + } + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { + val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; + } + PMD_DRV_LOG(DEBUG, "Ext phy AN advertize 0x%x", val); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val); +} + +static void elink_pause_resolve(struct elink_vars *vars, uint32_t pause_result) +{ /* LD LP */ + switch (pause_result) { /* ASYM P ASYM P */ + case 0xb: /* 1 0 1 1 */ + vars->flow_ctrl = ELINK_FLOW_CTRL_TX; + break; + + case 0xe: /* 1 1 1 0 */ + vars->flow_ctrl = ELINK_FLOW_CTRL_RX; + break; + + case 0x5: /* 0 1 0 1 */ + case 0x7: /* 0 1 1 1 */ + case 0xd: /* 1 1 0 1 */ + case 0xf: /* 1 1 1 1 */ + vars->flow_ctrl = ELINK_FLOW_CTRL_BOTH; + break; + + default: + break; + } + if (pause_result & (1 << 0)) + vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; + if (pause_result & (1 << 1)) + vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; + +} + +static void elink_ext_phy_update_adv_fc(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + uint16_t ld_pause; /* local */ + uint16_t lp_pause; /* link partner */ + uint16_t pause_result; + struct bnx2x_softc *sc = params->sc; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE) { + elink_cl22_read(sc, phy, 0x4, &ld_pause); + elink_cl22_read(sc, phy, 0x5, &lp_pause); + } else if (CHIP_IS_E3(sc) && ELINK_SINGLE_MEDIA_DIRECT(params)) { + uint8_t lane = elink_get_warpcore_lane(params); + uint16_t gp_status, gp_mask; + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4, + &gp_status); + gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL | + MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) << + lane; + if ((gp_status & gp_mask) == gp_mask) { + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_ADV_PAUSE, &ld_pause); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); + } else { + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LD, &ld_pause); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LP, &lp_pause); + ld_pause = ((ld_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) + << 3); + lp_pause = ((lp_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) + << 3); + } + } else { + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_ADV_PAUSE, &ld_pause); + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); + } + pause_result = (ld_pause & MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; + pause_result |= (lp_pause & MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; + PMD_DRV_LOG(DEBUG, "Ext PHY pause result 0x%x", pause_result); + elink_pause_resolve(vars, pause_result); + +} + +static uint8_t elink_ext_phy_resolve_fc(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + uint8_t ret = 0; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + if (phy->req_flow_ctrl != ELINK_FLOW_CTRL_AUTO) { + /* Update the advertised flow-controled of LD/LP in AN */ + if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) + elink_ext_phy_update_adv_fc(phy, params, vars); + /* But set the flow-control result as the requested one */ + vars->flow_ctrl = phy->req_flow_ctrl; + } else if (phy->req_line_speed != ELINK_SPEED_AUTO_NEG) + vars->flow_ctrl = params->req_fc_auto_adv; + else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + ret = 1; + elink_ext_phy_update_adv_fc(phy, params, vars); + } + return ret; +} + +/******************************************************************/ +/* Warpcore section */ +/******************************************************************/ +/* The init_internal_warpcore should mirror the xgxs, + * i.e. reset the lane (if needed), set aer for the + * init configuration, and set/clear SGMII flag. Internal + * phy init is done purely in phy_init stage. + */ +#define WC_TX_DRIVER(post2, idriver, ipre) \ + ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \ + (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \ + (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)) + +#define WC_TX_FIR(post, main, pre) \ + ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \ + (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \ + (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)) + +static void elink_warpcore_enable_AN_KR2(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t i; + static struct elink_reg_set reg_set[] = { + /* Step 1 - Program the TX/RX alignment markers */ + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0xa157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xcbe2}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0x7537}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0xa157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xcbe2}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0x7537}, + /* Step 2 - Configure the NP registers */ + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000a}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6400}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0620}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x6464}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x3150}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x3150}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620} + }; + PMD_DRV_LOG(DEBUG, "Enabling 20G-KR2"); + + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL49_USERB0_CTRL, (3 << 6)); + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + + /* Start KR2 work-around timer which handles BNX2X8073 link-parner */ + vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; + elink_update_link_attr(params, vars->link_attr_sync); +} + +static void elink_disable_kr2(struct elink_params *params, + struct elink_vars *vars, struct elink_phy *phy) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t i; + static struct elink_reg_set reg_set[] = { + /* Step 1 - Program the TX/RX alignment markers */ + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000} + }; + PMD_DRV_LOG(DEBUG, "Disabling 20G-KR2"); + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; + elink_update_link_attr(params, vars->link_attr_sync); + + vars->check_kr2_recovery_cnt = ELINK_CHECK_KR2_RECOVERY_CNT; +} + +static void elink_warpcore_set_lpi_passthrough(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + + PMD_DRV_LOG(DEBUG, "Configure WC for LPI pass through"); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c); + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC5, 0xc000); +} + +static void elink_warpcore_restart_AN_KR(struct elink_phy *phy, + struct elink_params *params) +{ + /* Restart autoneg on the leading lane only */ + struct bnx2x_softc *sc = params->sc; + uint16_t lane = elink_get_warpcore_lane(params); + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, lane); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200); + + /* Restore AER */ + elink_set_aer_mmd(params, phy); +} + +static void elink_warpcore_enable_AN_KR(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + uint16_t lane, i, cl72_ctrl, an_adv = 0; + struct bnx2x_softc *sc = params->sc; + static struct elink_reg_set reg_set[] = { + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, + {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, + /* Disable Autoneg: re-enable it after adv is done. */ + {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}, + {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0}, + }; + PMD_DRV_LOG(DEBUG, "Enable Auto Negotiation for KR"); + /* Set to default registers that may be overriden by 10G force */ + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl); + cl72_ctrl &= 0x08ff; + cl72_ctrl |= 0x3800; + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl); + + /* Check adding advertisement for 1G KX */ + if (((vars->line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (vars->line_speed == ELINK_SPEED_1000)) { + uint16_t addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; + an_adv |= (1 << 5); + + /* Enable CL37 1G Parallel Detect */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, addr, 0x1); + PMD_DRV_LOG(DEBUG, "Advertize 1G"); + } + if (((vars->line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || + (vars->line_speed == ELINK_SPEED_10000)) { + /* Check adding advertisement for 10G KR */ + an_adv |= (1 << 7); + /* Enable 10G Parallel Detect */ + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_PAR_DET_10G_CTRL, 1); + elink_set_aer_mmd(params, phy); + PMD_DRV_LOG(DEBUG, "Advertize 10G"); + } + + /* Set Transmit PMD settings */ + lane = elink_get_warpcore_lane(params); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane, + WC_TX_DRIVER(0x02, 0x06, 0x09)); + /* Configure the next lane if dual mode */ + if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE) + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * (lane + 1), + WC_TX_DRIVER(0x02, 0x06, 0x09)); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, 0x03f0); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL, 0x03f0); + + /* Advertised speeds */ + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv); + + /* Advertised and set FEC (Forward Error Correction) */ + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2, + (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY | + MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ)); + + /* Enable CL37 BAM */ + if (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port]. + default_cfg)) & + PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, + 1); + PMD_DRV_LOG(DEBUG, "Enable CL37 BAM on KR"); + } + + /* Advertise pause */ + elink_ext_phy_set_pause(params, phy, vars); + vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC7, 0x100); + + /* Over 1G - AN local device user page 1 */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL3_UP1, 0x1f); + + if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) || + (phy->req_line_speed == ELINK_SPEED_20000)) { + + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, lane); + + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX1_PCI_CTRL + + (0x10 * lane), (1 << 11)); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXS_X2_CONTROL3, 0x7); + elink_set_aer_mmd(params, phy); + + elink_warpcore_enable_AN_KR2(phy, params, vars); + } else { + elink_disable_kr2(params, vars, phy); + } + + /* Enable Autoneg: only on the main lane */ + elink_warpcore_restart_AN_KR(phy, params); +} + +static void elink_warpcore_set_10G_KR(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val16, i, lane; + static struct elink_reg_set reg_set[] = { + /* Disable Autoneg */ + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, + 0x3f00}, + {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0}, + {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, + /* Leave cl72 training enable, needed for KR */ + {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2} + }; + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + + lane = elink_get_warpcore_lane(params); + /* Global registers */ + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Disable CL36 PCS Tx */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); + val16 &= ~(0x0011 << lane); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); + + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); + val16 |= (0x0303 << (lane << 1)); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); + /* Restore AER */ + elink_set_aer_mmd(params, phy); + /* Set speed via PMA/PMD register */ + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); + + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB); + + /* Enable encoded forced speed */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30); + + /* Turn TX scramble payload only the 64/66 scrambler */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_TX66_CONTROL, 0x9); + + /* Turn RX scramble payload only the 64/66 scrambler */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, 0xF9); + + /* Set and clear loopback to cause a reset to 64/66 decoder */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); + +} + +static void elink_warpcore_set_10G_XFI(struct elink_phy *phy, + struct elink_params *params, + uint8_t is_xfi) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t misc1_val, tap_val, tx_driver_val, lane, val; + uint32_t cfg_tap_val, tx_drv_brdct, tx_equal; + + /* Hold rxSeqStart */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000); + + /* Hold tx_fifo_reset */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1); + + /* Disable CL73 AN */ + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); + + /* Disable 100FX Enable and Auto-Detect */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL1, 0xFFFA); + + /* Disable 100FX Idle detect */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL3, 0x0080); + + /* Set Block address to Remote PHY & Clear forced_speed[5] */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0xFF7F); + + /* Turn off auto-detect & fiber mode */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + 0xFFEE); + + /* Set filter_force_link, disable_false_link and parallel_detect */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + ((val | 0x0006) & 0xFFFE)); + + /* Set XFI / SFI */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val); + + misc1_val &= ~(0x1f); + + if (is_xfi) { + misc1_val |= 0x5; + tap_val = WC_TX_FIR(0x08, 0x37, 0x00); + tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03); + } else { + cfg_tap_val = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params-> + port].sfi_tap_values)); + + tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK; + + tx_drv_brdct = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >> + PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT; + + misc1_val |= 0x9; + + /* TAP values are controlled by nvram, if value there isn't 0 */ + if (tx_equal) + tap_val = (uint16_t) tx_equal; + else + tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02); + + if (tx_drv_brdct) + tx_driver_val = + WC_TX_DRIVER(0x03, (uint16_t) tx_drv_brdct, 0x06); + else + tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06); + } + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); + + /* Set Transmit PMD settings */ + lane = elink_get_warpcore_lane(params); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX_FIR_TAP, + tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane, + tx_driver_val); + + /* Enable fiber mode, enable and invert sig_det */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd); + + /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0x8080); + + elink_warpcore_set_lpi_passthrough(phy, params); + + /* 10G XFI Full Duplex */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100); + + /* Release tx_fifo_reset */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, + 0xFFFE); + /* Release rxSeqStart */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x7FFF); +} + +static void elink_warpcore_set_20G_force_KR2(struct elink_phy *phy, + struct elink_params *params) +{ + uint16_t val; + struct bnx2x_softc *sc = params->sc; + /* Set global registers, so set AER lane to 0 */ + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + + /* Disable sequencer */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~(1 << 13)); + + elink_set_aer_mmd(params, phy); + + elink_cl45_read_and_write(sc, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_PMD_KR_CONTROL, ~(1 << 1)); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); + /* Turn off CL73 */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL73_USERB0_CTRL, &val); + val &= ~(1 << 5); + val |= (1 << 6); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL73_USERB0_CTRL, val); + + /* Set 20G KR2 force speed */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x1f); + + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, (1 << 7)); + + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &val); + val &= ~(3 << 14); + val |= (1 << 15); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, val); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0x835A); + + /* Enable sequencer (over lane 0) */ + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, (1 << 13)); + + elink_set_aer_mmd(params, phy); +} + +static void elink_warpcore_set_20G_DXGXS(struct bnx2x_softc *sc, + struct elink_phy *phy, uint16_t lane) +{ + /* Rx0 anaRxControl1G */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90); + + /* Rx2 anaRxControl1G */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW0, 0xE070); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW1, 0xC0D0); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW2, 0xA0B0); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_SCW3, 0x8090); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0); + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0); + + /* Serdes Digital Misc1 */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008); + + /* Serdes Digital4 Misc3 */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0x8088); + + /* Set Transmit PMD settings */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX_FIR_TAP, + (WC_TX_FIR(0x12, 0x2d, 0x00) | + MDIO_WC_REG_TX_FIR_TAP_ENABLE)); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane, + WC_TX_DRIVER(0x02, 0x02, 0x02)); +} + +static void elink_warpcore_set_sgmii_speed(struct elink_phy *phy, + struct elink_params *params, + uint8_t fiber_mode, + uint8_t always_autoneg) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val16, digctrl_kx1, digctrl_kx2; + + /* Clear XFI clock comp in non-10G single lane mode. */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, ~(3 << 13)); + + elink_warpcore_set_lpi_passthrough(phy, params); + + if (always_autoneg || phy->req_line_speed == ELINK_SPEED_AUTO_NEG) { + /* SGMII Autoneg */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, + 0x1000); + PMD_DRV_LOG(DEBUG, "set SGMII AUTONEG"); + } else { + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + val16 &= 0xcebf; + switch (phy->req_line_speed) { + case ELINK_SPEED_10: + break; + case ELINK_SPEED_100: + val16 |= 0x2000; + break; + case ELINK_SPEED_1000: + val16 |= 0x0040; + break; + default: + PMD_DRV_LOG(DEBUG, + "Speed not supported: 0x%x", + phy->req_line_speed); + return; + } + + if (phy->req_duplex == DUPLEX_FULL) + val16 |= 0x0100; + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16); + + PMD_DRV_LOG(DEBUG, "set SGMII force speed %d", + phy->req_line_speed); + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + PMD_DRV_LOG(DEBUG, " (readback) %x", val16); + } + + /* SGMII Slave mode and disable signal detect */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1); + if (fiber_mode) + digctrl_kx1 = 1; + else + digctrl_kx1 &= 0xff4a; + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, digctrl_kx1); + + /* Turn off parallel detect */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + (digctrl_kx2 & ~(1 << 2))); + + /* Re-enable parallel detect */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + (digctrl_kx2 | (1 << 2))); + + /* Enable autodet */ + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + (digctrl_kx1 | 0x10)); +} + +static void elink_warpcore_reset_lane(struct bnx2x_softc *sc, + struct elink_phy *phy, uint8_t reset) +{ + uint16_t val; + /* Take lane out of reset after configuration is finished */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, &val); + if (reset) + val |= 0xC000; + else + val &= 0x3FFF; + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, val); + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, &val); +} + +/* Clear SFI/XFI link settings registers */ +static void elink_warpcore_clear_regs(struct elink_phy *phy, + struct elink_params *params, + uint16_t lane) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t i; + static struct elink_reg_set wc_regs[] = { + {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a}, + {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + 0x0195}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + 0x0007}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, + 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040}, + {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140} + }; + /* Set XFI clock comp as default. */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, (3 << 13)); + + for (i = 0; i < ARRAY_SIZE(wc_regs); i++) + elink_cl45_write(sc, phy, wc_regs[i].devad, wc_regs[i].reg, + wc_regs[i].val); + + lane = elink_get_warpcore_lane(params); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10 * lane, 0x0990); + +} + +static elink_status_t elink_get_mod_abs_int_cfg(struct bnx2x_softc *sc, + uint32_t shmem_base, + uint8_t port, + uint8_t * gpio_num, + uint8_t * gpio_port) +{ + uint32_t cfg_pin; + *gpio_num = 0; + *gpio_port = 0; + if (CHIP_IS_E3(sc)) { + cfg_pin = (REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port]. + e3_sfp_ctrl)) & + PORT_HW_CFG_E3_MOD_ABS_MASK) >> + PORT_HW_CFG_E3_MOD_ABS_SHIFT; + + /* Should not happen. This function called upon interrupt + * triggered by GPIO ( since EPIO can only generate interrupts + * to MCP). + * So if this function was called and none of the GPIOs was set, + * it means the shit hit the fan. + */ + if ((cfg_pin < PIN_CFG_GPIO0_P0) || + (cfg_pin > PIN_CFG_GPIO3_P1)) { + PMD_DRV_LOG(DEBUG, + "No cfg pin %x for module detect indication", + cfg_pin); + return ELINK_STATUS_ERROR; + } + + *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3; + *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2; + } else { + *gpio_num = MISC_REGISTERS_GPIO_3; + *gpio_port = port; + } + + return ELINK_STATUS_OK; +} + +static int elink_is_sfp_module_plugged(struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t gpio_num, gpio_port; + uint32_t gpio_val; + if (elink_get_mod_abs_int_cfg(sc, + params->shmem_base, params->port, + &gpio_num, &gpio_port) != ELINK_STATUS_OK) + return 0; + gpio_val = elink_cb_gpio_read(sc, gpio_num, gpio_port); + + /* Call the handling function in case module is detected */ + if (gpio_val == 0) + return 1; + else + return 0; +} + +static int elink_warpcore_get_sigdet(struct elink_phy *phy, + struct elink_params *params) +{ + uint16_t gp2_status_reg0, lane; + struct bnx2x_softc *sc = params->sc; + + lane = elink_get_warpcore_lane(params); + + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_0, + &gp2_status_reg0); + + return (gp2_status_reg0 >> (8 + lane)) & 0x1; +} + +static void elink_warpcore_config_runtime(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t serdes_net_if; + uint16_t gp_status1 = 0, lnkup = 0, lnkup_kr = 0; + + vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; + + if (!vars->turn_to_run_wc_rt) + return; + + if (vars->rx_tx_asic_rst) { + uint16_t lane = elink_get_warpcore_lane(params); + serdes_net_if = (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config + [params->port]. + default_cfg)) & + PORT_HW_CFG_NET_SERDES_IF_MASK); + + switch (serdes_net_if) { + case PORT_HW_CFG_NET_SERDES_IF_KR: + /* Do we get link yet? */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 0x81d1, + &gp_status1); + lnkup = (gp_status1 >> (8 + lane)) & 0x1; /* 1G */ + /*10G KR */ + lnkup_kr = (gp_status1 >> (12 + lane)) & 0x1; + + if (lnkup_kr || lnkup) { + vars->rx_tx_asic_rst = 0; + } else { + /* Reset the lane to see if link comes up. */ + elink_warpcore_reset_lane(sc, phy, 1); + elink_warpcore_reset_lane(sc, phy, 0); + + /* Restart Autoneg */ + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, + 0x1200); + + vars->rx_tx_asic_rst--; + PMD_DRV_LOG(DEBUG, "0x%x retry left", + vars->rx_tx_asic_rst); + } + break; + + default: + break; + } + + } + /*params->rx_tx_asic_rst */ +} + +static void elink_warpcore_config_sfi(struct elink_phy *phy, + struct elink_params *params) +{ + uint16_t lane = elink_get_warpcore_lane(params); + + elink_warpcore_clear_regs(phy, params, lane); + if ((params->req_line_speed[ELINK_LINK_CONFIG_IDX(ELINK_INT_PHY)] == + ELINK_SPEED_10000) && + (phy->media_type != ELINK_ETH_PHY_SFP_1G_FIBER)) { + PMD_DRV_LOG(DEBUG, "Setting 10G SFI"); + elink_warpcore_set_10G_XFI(phy, params, 0); + } else { + PMD_DRV_LOG(DEBUG, "Setting 1G Fiber"); + elink_warpcore_set_sgmii_speed(phy, params, 1, 0); + } +} + +static void elink_sfp_e3_set_transmitter(struct elink_params *params, + struct elink_phy *phy, uint8_t tx_en) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t cfg_pin; + uint8_t port = params->port; + + cfg_pin = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_TX_LASER_MASK; + /* Set the !tx_en since this pin is DISABLE_TX_LASER */ + PMD_DRV_LOG(DEBUG, "Setting WC TX to %d", tx_en); + + /* For 20G, the expected pin to be used is 3 pins after the current */ + elink_set_cfg_pin(sc, cfg_pin, tx_en ^ 1); + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G) + elink_set_cfg_pin(sc, cfg_pin + 3, tx_en ^ 1); +} + +static void elink_warpcore_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t serdes_net_if; + uint8_t fiber_mode; + uint16_t lane = elink_get_warpcore_lane(params); + serdes_net_if = (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port]. + default_cfg)) & + PORT_HW_CFG_NET_SERDES_IF_MASK); + PMD_DRV_LOG(DEBUG, + "Begin Warpcore init, link_speed %d, " + "serdes_net_if = 0x%x", vars->line_speed, serdes_net_if); + elink_set_aer_mmd(params, phy); + elink_warpcore_reset_lane(sc, phy, 1); + vars->phy_flags |= PHY_XGXS_FLAG; + if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) || + (phy->req_line_speed && + ((phy->req_line_speed == ELINK_SPEED_100) || + (phy->req_line_speed == ELINK_SPEED_10)))) { + vars->phy_flags |= PHY_SGMII_FLAG; + PMD_DRV_LOG(DEBUG, "Setting SGMII mode"); + elink_warpcore_clear_regs(phy, params, lane); + elink_warpcore_set_sgmii_speed(phy, params, 0, 1); + } else { + switch (serdes_net_if) { + case PORT_HW_CFG_NET_SERDES_IF_KR: + /* Enable KR Auto Neg */ + if (params->loopback_mode != ELINK_LOOPBACK_EXT) + elink_warpcore_enable_AN_KR(phy, params, vars); + else { + PMD_DRV_LOG(DEBUG, "Setting KR 10G-Force"); + elink_warpcore_set_10G_KR(phy, params); + } + break; + + case PORT_HW_CFG_NET_SERDES_IF_XFI: + elink_warpcore_clear_regs(phy, params, lane); + if (vars->line_speed == ELINK_SPEED_10000) { + PMD_DRV_LOG(DEBUG, "Setting 10G XFI"); + elink_warpcore_set_10G_XFI(phy, params, 1); + } else { + if (ELINK_SINGLE_MEDIA_DIRECT(params)) { + PMD_DRV_LOG(DEBUG, "1G Fiber"); + fiber_mode = 1; + } else { + PMD_DRV_LOG(DEBUG, "10/100/1G SGMII"); + fiber_mode = 0; + } + elink_warpcore_set_sgmii_speed(phy, + params, + fiber_mode, 0); + } + + break; + + case PORT_HW_CFG_NET_SERDES_IF_SFI: + /* Issue Module detection if module is plugged, or + * enabled transmitter to avoid current leakage in case + * no module is connected + */ + if ((params->loopback_mode == ELINK_LOOPBACK_NONE) || + (params->loopback_mode == ELINK_LOOPBACK_EXT)) { + if (elink_is_sfp_module_plugged(params)) + elink_sfp_module_detection(phy, params); + else + elink_sfp_e3_set_transmitter(params, + phy, 1); + } + + elink_warpcore_config_sfi(phy, params); + break; + + case PORT_HW_CFG_NET_SERDES_IF_DXGXS: + if (vars->line_speed != ELINK_SPEED_20000) { + PMD_DRV_LOG(DEBUG, "Speed not supported yet"); + return; + } + PMD_DRV_LOG(DEBUG, "Setting 20G DXGXS"); + elink_warpcore_set_20G_DXGXS(sc, phy, lane); + /* Issue Module detection */ + + elink_sfp_module_detection(phy, params); + break; + case PORT_HW_CFG_NET_SERDES_IF_KR2: + if (!params->loopback_mode) { + elink_warpcore_enable_AN_KR(phy, params, vars); + } else { + PMD_DRV_LOG(DEBUG, "Setting KR 20G-Force"); + elink_warpcore_set_20G_force_KR2(phy, params); + } + break; + default: + PMD_DRV_LOG(DEBUG, + "Unsupported Serdes Net Interface 0x%x", + serdes_net_if); + return; + } + } + + /* Take lane out of reset after configuration is finished */ + elink_warpcore_reset_lane(sc, phy, 0); + PMD_DRV_LOG(DEBUG, "Exit config init"); +} + +static void elink_warpcore_link_reset(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val16, lane; + elink_sfp_e3_set_transmitter(params, phy, 0); + elink_set_mdio_emac_per_phy(sc, params); + elink_set_aer_mmd(params, phy); + /* Global register */ + elink_warpcore_reset_lane(sc, phy, 1); + + /* Clear loopback settings (if any) */ + /* 10G & 20G */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0xBFFF); + + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0xfffe); + + /* Update those 1-copy registers */ + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Enable 1G MDIO (1-copy) */ + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~0x10); + + elink_cl45_read_and_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, 0xff00); + lane = elink_get_warpcore_lane(params); + /* Disable CL36 PCS Tx */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); + val16 |= (0x11 << lane); + if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE) + val16 |= (0x22 << lane); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); + + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); + val16 &= ~(0x0303 << (lane << 1)); + val16 |= (0x0101 << (lane << 1)); + if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE) { + val16 &= ~(0x0c0c << (lane << 1)); + val16 |= (0x0404 << (lane << 1)); + } + + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); + /* Restore AER */ + elink_set_aer_mmd(params, phy); + +} + +static void elink_set_warpcore_loopback(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val16; + uint32_t lane; + PMD_DRV_LOG(DEBUG, "Setting Warpcore loopback type %x, speed %d", + params->loopback_mode, phy->req_line_speed); + + if (phy->req_line_speed < ELINK_SPEED_10000 || + phy->supported & ELINK_SUPPORTED_20000baseKR2_Full) { + /* 10/100/1000/20G-KR2 */ + + /* Update those 1-copy registers */ + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Enable 1G MDIO (1-copy) */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + 0x10); + /* Set 1G loopback based on lane (1-copy) */ + lane = elink_get_warpcore_lane(params); + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); + val16 |= (1 << lane); + if (phy->flags & ELINK_FLAGS_WC_DUAL_MODE) + val16 |= (2 << lane); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, val16); + + /* Switch back to 4-copy registers */ + elink_set_aer_mmd(params, phy); + } else { + /* 10G / 20G-DXGXS */ + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, + 0x4000); + elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1); + } +} + +static void elink_sync_link(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t link_10g_plus; + if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) + vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; + vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); + if (vars->link_up) { + PMD_DRV_LOG(DEBUG, "phy link up"); + + vars->phy_link_up = 1; + vars->duplex = DUPLEX_FULL; + switch (vars->link_status & LINK_STATUS_SPEED_AND_DUPLEX_MASK) { + case ELINK_LINK_10THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case ELINK_LINK_10TFD: + vars->line_speed = ELINK_SPEED_10; + break; + + case ELINK_LINK_100TXHD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case ELINK_LINK_100T4: + case ELINK_LINK_100TXFD: + vars->line_speed = ELINK_SPEED_100; + break; + + case ELINK_LINK_1000THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case ELINK_LINK_1000TFD: + vars->line_speed = ELINK_SPEED_1000; + break; + + case ELINK_LINK_2500THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case ELINK_LINK_2500TFD: + vars->line_speed = ELINK_SPEED_2500; + break; + + case ELINK_LINK_10GTFD: + vars->line_speed = ELINK_SPEED_10000; + break; + case ELINK_LINK_20GTFD: + vars->line_speed = ELINK_SPEED_20000; + break; + default: + break; + } + vars->flow_ctrl = 0; + if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED) + vars->flow_ctrl |= ELINK_FLOW_CTRL_TX; + + if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED) + vars->flow_ctrl |= ELINK_FLOW_CTRL_RX; + + if (!vars->flow_ctrl) + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + + if (vars->line_speed && + ((vars->line_speed == ELINK_SPEED_10) || + (vars->line_speed == ELINK_SPEED_100))) { + vars->phy_flags |= PHY_SGMII_FLAG; + } else { + vars->phy_flags &= ~PHY_SGMII_FLAG; + } + if (vars->line_speed && + USES_WARPCORE(sc) && (vars->line_speed == ELINK_SPEED_1000)) + vars->phy_flags |= PHY_SGMII_FLAG; + /* Anything 10 and over uses the bmac */ + link_10g_plus = (vars->line_speed >= ELINK_SPEED_10000); + + if (link_10g_plus) { + if (USES_WARPCORE(sc)) + vars->mac_type = ELINK_MAC_TYPE_XMAC; + else + vars->mac_type = ELINK_MAC_TYPE_BMAC; + } else { + if (USES_WARPCORE(sc)) + vars->mac_type = ELINK_MAC_TYPE_UMAC; + else + vars->mac_type = ELINK_MAC_TYPE_EMAC; + } + } else { /* Link down */ + PMD_DRV_LOG(DEBUG, "phy link down"); + + vars->phy_link_up = 0; + + vars->line_speed = 0; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + + /* Indicate no mac active */ + vars->mac_type = ELINK_MAC_TYPE_NONE; + if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + if (vars->link_status & LINK_STATUS_SFP_TX_FAULT) + vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG; + } +} + +void elink_link_status_update(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint32_t sync_offset, media_types; + /* Update PHY configuration */ + set_phy_vars(params, vars); + + vars->link_status = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + port_mb[port].link_status)); + + /* Force link UP in non LOOPBACK_EXT loopback mode(s) */ + if (params->loopback_mode != ELINK_LOOPBACK_NONE && + params->loopback_mode != ELINK_LOOPBACK_EXT) + vars->link_status |= LINK_STATUS_LINK_UP; + + if (elink_eee_has_cap(params)) + vars->eee_status = REG_RD(sc, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port])); + + vars->phy_flags = PHY_XGXS_FLAG; + elink_sync_link(params, vars); + /* Sync media type */ + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].media_type); + media_types = REG_RD(sc, sync_offset); + + params->phy[ELINK_INT_PHY].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT; + params->phy[ELINK_EXT_PHY1].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT; + params->phy[ELINK_EXT_PHY2].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT; + PMD_DRV_LOG(DEBUG, "media_types = 0x%x", media_types); + + /* Sync AEU offset */ + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].aeu_int_mask); + + vars->aeu_int_mask = REG_RD(sc, sync_offset); + + /* Sync PFC status */ + if (vars->link_status & LINK_STATUS_PFC_ENABLED) + params->feature_config_flags |= + ELINK_FEATURE_CONFIG_PFC_ENABLED; + else + params->feature_config_flags &= + ~ELINK_FEATURE_CONFIG_PFC_ENABLED; + + if (SHMEM2_HAS(sc, link_attr_sync)) + vars->link_attr_sync = SHMEM2_RD(sc, + link_attr_sync[params->port]); + + PMD_DRV_LOG(DEBUG, "link_status 0x%x phy_link_up %x int_mask 0x%x", + vars->link_status, vars->phy_link_up, vars->aeu_int_mask); + PMD_DRV_LOG(DEBUG, "line_speed %x duplex %x flow_ctrl 0x%x", + vars->line_speed, vars->duplex, vars->flow_ctrl); +} + +static void elink_set_master_ln(struct elink_params *params, + struct elink_phy *phy) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t new_master_ln, ser_lane; + ser_lane = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + + /* Set the master_ln for AN */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TEST_MODE_LANE, &new_master_ln); + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TEST_MODE_LANE, + (new_master_ln | ser_lane)); +} + +static elink_status_t elink_reset_unicore(struct elink_params *params, + struct elink_phy *phy, + uint8_t set_serdes) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t mii_control; + uint16_t i; + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); + + /* Reset the unicore */ + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET)); + if (set_serdes) + elink_set_serdes_access(sc, params->port); + + /* Wait for the reset to self clear */ + for (i = 0; i < ELINK_MDIO_ACCESS_TIMEOUT; i++) { + DELAY(5); + + /* The reset erased the previous bank value */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); + + if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) { + DELAY(5); + return ELINK_STATUS_OK; + } + } + + elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, params->port); // "Warning: PHY was not initialized," + // " Port %d", + + PMD_DRV_LOG(DEBUG, "BUG! XGXS is still in reset!"); + return ELINK_STATUS_ERROR; + +} + +static void elink_set_swap_lanes(struct elink_params *params, + struct elink_phy *phy) +{ + struct bnx2x_softc *sc = params->sc; + /* Each two bits represents a lane number: + * No swap is 0123 => 0x1b no need to enable the swap + */ + uint16_t rx_lane_swap, tx_lane_swap; + + rx_lane_swap = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); + tx_lane_swap = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); + + if (rx_lane_swap != 0x1b) { + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_RX_LN_SWAP, + (rx_lane_swap | + MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | + MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); + } else { + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); + } + + if (tx_lane_swap != 0x1b) { + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TX_LN_SWAP, + (tx_lane_swap | + MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); + } else { + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); + } +} + +static void elink_set_parallel_detection(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t control2; + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, &control2); + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) + control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; + else + control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; + PMD_DRV_LOG(DEBUG, "phy->speed_cap_mask = 0x%x, control2 = 0x%x", + phy->speed_cap_mask, control2); + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, control2); + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + PMD_DRV_LOG(DEBUG, "XGXS"); + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); + + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, + &control2); + + control2 |= + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, + control2); + + /* Disable parallel detection of HiG */ + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); + } +} + +static void elink_set_autoneg(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars, uint8_t enable_cl73) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t reg_val; + + /* CL37 Autoneg */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); + + /* CL37 Autoneg Enabled */ + if (vars->line_speed == ELINK_SPEED_AUTO_NEG) + reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN; + else /* CL37 Autoneg Disabled */ + reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); + + /* Enable/Disable Autodetection */ + + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val); + reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT); + reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE; + if (vars->line_speed == ELINK_SPEED_AUTO_NEG) + reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; + else + reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); + + /* Enable TetonII and BAM autoneg */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_BAM_NEXT_PAGE, + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, ®_val); + if (vars->line_speed == ELINK_SPEED_AUTO_NEG) { + /* Enable BAM aneg Mode and TetonII aneg Mode */ + reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); + } else { + /* TetonII and BAM Autoneg Disabled */ + reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); + } + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_BAM_NEXT_PAGE, + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, reg_val); + + if (enable_cl73) { + /* Enable Cl73 FSM status bits */ + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_UCTRL, 0xe); + + /* Enable BAM Station Manager */ + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_BAM_CTRL1, + MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | + MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN + | + MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); + + /* Advertise CL73 link speeds */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV2, ®_val); + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) + reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV2, reg_val); + + /* CL73 Autoneg Enabled */ + reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; + + } else /* CL73 Autoneg Disabled */ + reg_val = 0; + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); +} + +/* Program SerDes, forced speed */ +static void elink_program_serdes(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t reg_val; + + /* Program duplex, disable autoneg and sgmii */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); + reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | + MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); + if (phy->req_duplex == DUPLEX_FULL) + reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); + + /* Program speed + * - needed only if the speed is greater than 1G (2.5G or 10G) + */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_MISC1, ®_val); + /* Clearing the speed value before setting the right speed */ + PMD_DRV_LOG(DEBUG, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x", reg_val); + + reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK | + MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); + + if (!((vars->line_speed == ELINK_SPEED_1000) || + (vars->line_speed == ELINK_SPEED_100) || + (vars->line_speed == ELINK_SPEED_10))) { + + reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M | + MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); + if (vars->line_speed == ELINK_SPEED_10000) + reg_val |= + MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; + } + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_MISC1, reg_val); + +} + +static void elink_set_brcm_cl37_advertisement(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val = 0; + + /* Set extended capabilities */ + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) + val |= MDIO_OVER_1G_UP1_2_5G; + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + val |= MDIO_OVER_1G_UP1_10G; + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_OVER_1G, MDIO_OVER_1G_UP1, val); + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_OVER_1G, MDIO_OVER_1G_UP3, 0x400); +} + +static void elink_set_ieee_aneg_advertisement(struct elink_phy *phy, + struct elink_params *params, + uint16_t ieee_fc) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val; + /* For AN, we are always publishing full duplex */ + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, &val); + val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; + val |= ((ieee_fc << 3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, val); +} + +static void elink_restart_autoneg(struct elink_phy *phy, + struct elink_params *params, + uint8_t enable_cl73) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t mii_control; + + PMD_DRV_LOG(DEBUG, "elink_restart_autoneg"); + /* Enable and restart BAM/CL37 aneg */ + + if (enable_cl73) { + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + &mii_control); + + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + (mii_control | + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN | + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); + } else { + + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); + PMD_DRV_LOG(DEBUG, + "elink_restart_autoneg mii_control before = 0x%x", + mii_control); + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + (mii_control | + MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN)); + } +} + +static void elink_initialize_sgmii_process(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t control1; + + /* In SGMII mode, the unicore is always slave */ + + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &control1); + control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; + /* Set sgmii mode (and not fiber) */ + control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, control1); + + /* If forced speed */ + if (!(vars->line_speed == ELINK_SPEED_AUTO_NEG)) { + /* Set speed, disable autoneg */ + uint16_t mii_control; + + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); + mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK | + MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); + + switch (vars->line_speed) { + case ELINK_SPEED_100: + mii_control |= + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100; + break; + case ELINK_SPEED_1000: + mii_control |= + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000; + break; + case ELINK_SPEED_10: + /* There is nothing to set for 10M */ + break; + default: + /* Invalid speed for SGMII */ + PMD_DRV_LOG(DEBUG, "Invalid line_speed 0x%x", + vars->line_speed); + break; + } + + /* Setting the full duplex */ + if (phy->req_duplex == DUPLEX_FULL) + mii_control |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, mii_control); + + } else { /* AN mode */ + /* Enable and restart AN */ + elink_restart_autoneg(phy, params, 0); + } +} + +/* Link management + */ +static elink_status_t elink_direct_parallel_detect_used(struct elink_phy *phy, + struct elink_params + *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t pd_10g, status2_1000x; + if (phy->req_line_speed != ELINK_SPEED_AUTO_NEG) + return ELINK_STATUS_OK; + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_STATUS2, &status2_1000x); + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_STATUS2, &status2_1000x); + if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) { + PMD_DRV_LOG(DEBUG, "1G parallel detect link on port %d", + params->port); + return 1; + } + + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, &pd_10g); + + if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) { + PMD_DRV_LOG(DEBUG, "10G parallel detect link on port %d", + params->port); + return 1; + } + return ELINK_STATUS_OK; +} + +static void elink_update_adv_fc(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars, uint32_t gp_status) +{ + uint16_t ld_pause; /* local driver */ + uint16_t lp_pause; /* link partner */ + uint16_t pause_result; + struct bnx2x_softc *sc = params->sc; + if ((gp_status & + (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | + MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) == + (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | + MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { + + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, &ld_pause); + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_LP_ADV1, &lp_pause); + pause_result = (ld_pause & + MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8; + pause_result |= (lp_pause & + MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) >> 10; + PMD_DRV_LOG(DEBUG, "pause_result CL73 0x%x", pause_result); + } else { + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_ADV, &ld_pause); + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, + &lp_pause); + pause_result = (ld_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) >> 5; + pause_result |= (lp_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) >> 7; + PMD_DRV_LOG(DEBUG, "pause_result CL37 0x%x", pause_result); + } + elink_pause_resolve(vars, pause_result); + +} + +static void elink_flow_ctrl_resolve(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars, uint32_t gp_status) +{ + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + + /* Resolve from gp_status in case of AN complete and not sgmii */ + if (phy->req_flow_ctrl != ELINK_FLOW_CTRL_AUTO) { + /* Update the advertised flow-controled of LD/LP in AN */ + if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) + elink_update_adv_fc(phy, params, vars, gp_status); + /* But set the flow-control result as the requested one */ + vars->flow_ctrl = phy->req_flow_ctrl; + } else if (phy->req_line_speed != ELINK_SPEED_AUTO_NEG) + vars->flow_ctrl = params->req_fc_auto_adv; + else if ((gp_status & ELINK_MDIO_AN_CL73_OR_37_COMPLETE) && + (!(vars->phy_flags & PHY_SGMII_FLAG))) { + if (elink_direct_parallel_detect_used(phy, params)) { + vars->flow_ctrl = params->req_fc_auto_adv; + return; + } + elink_update_adv_fc(phy, params, vars, gp_status); + } + PMD_DRV_LOG(DEBUG, "flow_ctrl 0x%x", vars->flow_ctrl); +} + +static void elink_check_fallback_to_cl37(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t rx_status, ustat_val, cl37_fsm_received; + PMD_DRV_LOG(DEBUG, "elink_check_fallback_to_cl37"); + /* Step 1: Make sure signal is detected */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_RX0, MDIO_RX0_RX_STATUS, &rx_status); + if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) != + (MDIO_RX0_RX_STATUS_SIGDET)) { + PMD_DRV_LOG(DEBUG, "Signal is not detected. Restoring CL73." + "rx_status(0x80b0) = 0x%x", rx_status); + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); + return; + } + /* Step 2: Check CL73 state machine */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_USTAT1, &ustat_val); + if ((ustat_val & + (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | + MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) != + (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | + MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) { + PMD_DRV_LOG(DEBUG, "CL73 state-machine is not stable. " + "ustat_val(0x8371) = 0x%x", ustat_val); + return; + } + /* Step 3: Check CL37 Message Pages received to indicate LP + * supports only CL37 + */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_REMOTE_PHY, + MDIO_REMOTE_PHY_MISC_RX_STATUS, &cl37_fsm_received); + if ((cl37_fsm_received & + (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | + MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != + (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | + MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) { + PMD_DRV_LOG(DEBUG, "No CL37 FSM were received. " + "misc_rx_status(0x8330) = 0x%x", cl37_fsm_received); + return; + } + /* The combined cl37/cl73 fsm state information indicating that + * we are connected to a device which does not support cl73, but + * does support cl37 BAM. In this case we disable cl73 and + * restart cl37 auto-neg + */ + + /* Disable CL73 */ + CL22_WR_OVER_CL45(sc, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 0); + /* Restart CL37 autoneg */ + elink_restart_autoneg(phy, params, 0); + PMD_DRV_LOG(DEBUG, "Disabling CL73, and restarting CL37 autoneg"); +} + +static void elink_xgxs_an_resolve(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars, uint32_t gp_status) +{ + if (gp_status & ELINK_MDIO_AN_CL73_OR_37_COMPLETE) + vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + + if (elink_direct_parallel_detect_used(phy, params)) + vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED; +} + +static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy, + struct elink_params *params __rte_unused, + struct elink_vars *vars, + uint16_t is_link_up, + uint16_t speed_mask, + uint16_t is_duplex) +{ + if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) + vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; + if (is_link_up) { + PMD_DRV_LOG(DEBUG, "phy link up"); + + vars->phy_link_up = 1; + vars->link_status |= LINK_STATUS_LINK_UP; + + switch (speed_mask) { + case ELINK_GP_STATUS_10M: + vars->line_speed = ELINK_SPEED_10; + if (is_duplex == DUPLEX_FULL) + vars->link_status |= ELINK_LINK_10TFD; + else + vars->link_status |= ELINK_LINK_10THD; + break; + + case ELINK_GP_STATUS_100M: + vars->line_speed = ELINK_SPEED_100; + if (is_duplex == DUPLEX_FULL) + vars->link_status |= ELINK_LINK_100TXFD; + else + vars->link_status |= ELINK_LINK_100TXHD; + break; + + case ELINK_GP_STATUS_1G: + case ELINK_GP_STATUS_1G_KX: + vars->line_speed = ELINK_SPEED_1000; + if (is_duplex == DUPLEX_FULL) + vars->link_status |= ELINK_LINK_1000TFD; + else + vars->link_status |= ELINK_LINK_1000THD; + break; + + case ELINK_GP_STATUS_2_5G: + vars->line_speed = ELINK_SPEED_2500; + if (is_duplex == DUPLEX_FULL) + vars->link_status |= ELINK_LINK_2500TFD; + else + vars->link_status |= ELINK_LINK_2500THD; + break; + + case ELINK_GP_STATUS_5G: + case ELINK_GP_STATUS_6G: + PMD_DRV_LOG(DEBUG, + "link speed unsupported gp_status 0x%x", + speed_mask); + return ELINK_STATUS_ERROR; + + case ELINK_GP_STATUS_10G_KX4: + case ELINK_GP_STATUS_10G_HIG: + case ELINK_GP_STATUS_10G_CX4: + case ELINK_GP_STATUS_10G_KR: + case ELINK_GP_STATUS_10G_SFI: + case ELINK_GP_STATUS_10G_XFI: + vars->line_speed = ELINK_SPEED_10000; + vars->link_status |= ELINK_LINK_10GTFD; + break; + case ELINK_GP_STATUS_20G_DXGXS: + case ELINK_GP_STATUS_20G_KR2: + vars->line_speed = ELINK_SPEED_20000; + vars->link_status |= ELINK_LINK_20GTFD; + break; + default: + PMD_DRV_LOG(DEBUG, + "link speed unsupported gp_status 0x%x", + speed_mask); + return ELINK_STATUS_ERROR; + } + } else { /* link_down */ + PMD_DRV_LOG(DEBUG, "phy link down"); + + vars->phy_link_up = 0; + + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->mac_type = ELINK_MAC_TYPE_NONE; + } + PMD_DRV_LOG(DEBUG, " phy_link_up %x line_speed %d", + vars->phy_link_up, vars->line_speed); + return ELINK_STATUS_OK; +} + +static elink_status_t elink_link_settings_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + + uint16_t gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask; + elink_status_t rc = ELINK_STATUS_OK; + + /* Read gp_status */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_GP_STATUS, + MDIO_GP_STATUS_TOP_AN_STATUS1, &gp_status); + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) + duplex = DUPLEX_FULL; + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) + link_up = 1; + speed_mask = gp_status & ELINK_GP_STATUS_SPEED_MASK; + PMD_DRV_LOG(DEBUG, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x", + gp_status, link_up, speed_mask); + rc = elink_get_link_speed_duplex(phy, params, vars, link_up, speed_mask, + duplex); + if (rc == ELINK_STATUS_ERROR) + return rc; + + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { + if (ELINK_SINGLE_MEDIA_DIRECT(params)) { + vars->duplex = duplex; + elink_flow_ctrl_resolve(phy, params, vars, gp_status); + if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) + elink_xgxs_an_resolve(phy, params, vars, + gp_status); + } + } else { /* Link_down */ + if ((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + ELINK_SINGLE_MEDIA_DIRECT(params)) { + /* Check signal is detected */ + elink_check_fallback_to_cl37(phy, params); + } + } + + /* Read LP advertised speeds */ + if (ELINK_SINGLE_MEDIA_DIRECT(params) && + (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)) { + uint16_t val; + + CL22_RD_OVER_CL45(sc, phy, MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_LP_ADV2, &val); + + if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 | + MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + CL22_RD_OVER_CL45(sc, phy, MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_LP_UP1, &val); + + if (val & MDIO_OVER_1G_UP1_2_5G) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE; + if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + } + + PMD_DRV_LOG(DEBUG, "duplex %x flow_ctrl 0x%x link_status 0x%x", + vars->duplex, vars->flow_ctrl, vars->link_status); + return rc; +} + +static elink_status_t elink_warpcore_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t lane; + uint16_t gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL; + elink_status_t rc = ELINK_STATUS_OK; + lane = elink_get_warpcore_lane(params); + /* Read gp_status */ + if ((params->loopback_mode) && (phy->flags & ELINK_FLAGS_WC_DUAL_MODE)) { + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up); + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up); + link_up &= 0x1; + } else if ((phy->req_line_speed > ELINK_SPEED_10000) && + (phy->supported & ELINK_SUPPORTED_20000baseMLD2_Full)) { + uint16_t temp_link_up; + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 1, &temp_link_up); + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 1, &link_up); + PMD_DRV_LOG(DEBUG, "PCS RX link status = 0x%x-->0x%x", + temp_link_up, link_up); + link_up &= (1 << 2); + if (link_up) + elink_ext_phy_resolve_fc(phy, params, vars); + } else { + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1); + PMD_DRV_LOG(DEBUG, "0x81d1 = 0x%x", gp_status1); + /* Check for either KR, 1G, or AN up. */ + link_up = ((gp_status1 >> 8) | + (gp_status1 >> 12) | (gp_status1)) & (1 << lane); + if (phy->supported & ELINK_SUPPORTED_20000baseKR2_Full) { + uint16_t an_link; + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_STATUS, &an_link); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_STATUS, &an_link); + link_up |= (an_link & (1 << 2)); + } + if (link_up && ELINK_SINGLE_MEDIA_DIRECT(params)) { + uint16_t pd, gp_status4; + if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) { + /* Check Autoneg complete */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_4, + &gp_status4); + if (gp_status4 & ((1 << 12) << lane)) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + + /* Check parallel detect used */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_PAR_DET_10G_STATUS, + &pd); + if (pd & (1 << 15)) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; + } + elink_ext_phy_resolve_fc(phy, params, vars); + vars->duplex = duplex; + } + } + + if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) && + ELINK_SINGLE_MEDIA_DIRECT(params)) { + uint16_t val; + + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG2, &val); + + if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 | + MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL3_LP_UP1, &val); + + if (val & MDIO_OVER_1G_UP1_2_5G) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE; + if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + } + + if (lane < 2) { + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed); + } else { + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed); + } + PMD_DRV_LOG(DEBUG, "lane %d gp_speed 0x%x", lane, gp_speed); + + if ((lane & 1) == 0) + gp_speed <<= 8; + gp_speed &= 0x3f00; + link_up = ! !link_up; + + /* Reset the TX FIFO to fix SGMII issue */ + rc = elink_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, + duplex); + + /* In case of KR link down, start up the recovering procedure */ + if ((!link_up) && (phy->media_type == ELINK_ETH_PHY_KR) && + (!(phy->flags & ELINK_FLAGS_WC_DUAL_MODE))) + vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; + + PMD_DRV_LOG(DEBUG, "duplex %x flow_ctrl 0x%x link_status 0x%x", + vars->duplex, vars->flow_ctrl, vars->link_status); + return rc; +} + +static void elink_set_gmii_tx_driver(struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + struct elink_phy *phy = ¶ms->phy[ELINK_INT_PHY]; + uint16_t lp_up2; + uint16_t tx_driver; + uint16_t bank; + + /* Read precomp */ + CL22_RD_OVER_CL45(sc, phy, + MDIO_REG_BANK_OVER_1G, MDIO_OVER_1G_LP_UP2, &lp_up2); + + /* Bits [10:7] at lp_up2, positioned at [15:12] */ + lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> + MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) << + MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT); + + if (lp_up2 == 0) + return; + + for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; + bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { + CL22_RD_OVER_CL45(sc, phy, + bank, MDIO_TX0_TX_DRIVER, &tx_driver); + + /* Replace tx_driver bits [15:12] */ + if (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { + tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; + tx_driver |= lp_up2; + CL22_WR_OVER_CL45(sc, phy, + bank, MDIO_TX0_TX_DRIVER, tx_driver); + } + } +} + +static elink_status_t elink_emac_program(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint16_t mode = 0; + + PMD_DRV_LOG(DEBUG, "setting link speed & duplex"); + elink_bits_dis(sc, GRCBASE_EMAC0 + port * 0x400 + + EMAC_REG_EMAC_MODE, + (EMAC_MODE_25G_MODE | + EMAC_MODE_PORT_MII_10M | EMAC_MODE_HALF_DUPLEX)); + switch (vars->line_speed) { + case ELINK_SPEED_10: + mode |= EMAC_MODE_PORT_MII_10M; + break; + + case ELINK_SPEED_100: + mode |= EMAC_MODE_PORT_MII; + break; + + case ELINK_SPEED_1000: + mode |= EMAC_MODE_PORT_GMII; + break; + + case ELINK_SPEED_2500: + mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII); + break; + + default: + /* 10G not valid for EMAC */ + PMD_DRV_LOG(DEBUG, "Invalid line_speed 0x%x", vars->line_speed); + return ELINK_STATUS_ERROR; + } + + if (vars->duplex == DUPLEX_HALF) + mode |= EMAC_MODE_HALF_DUPLEX; + elink_bits_en(sc, + GRCBASE_EMAC0 + port * 0x400 + EMAC_REG_EMAC_MODE, mode); + + elink_set_led(params, vars, ELINK_LED_MODE_OPER, vars->line_speed); + return ELINK_STATUS_OK; +} + +static void elink_set_preemphasis(struct elink_phy *phy, + struct elink_params *params) +{ + + uint16_t bank, i = 0; + struct bnx2x_softc *sc = params->sc; + + for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; + bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0), i++) { + CL22_WR_OVER_CL45(sc, phy, + bank, + MDIO_RX0_RX_EQ_BOOST, phy->rx_preemphasis[i]); + } + + for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3; + bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) { + CL22_WR_OVER_CL45(sc, phy, + bank, + MDIO_TX0_TX_DRIVER, phy->tx_preemphasis[i]); + } +} + +static void elink_xgxs_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + uint8_t enable_cl73 = (ELINK_SINGLE_MEDIA_DIRECT(params) || + (params->loopback_mode == ELINK_LOOPBACK_XGXS)); + + if (!(vars->phy_flags & PHY_SGMII_FLAG)) { + if (ELINK_SINGLE_MEDIA_DIRECT(params) && + (params->feature_config_flags & + ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) + elink_set_preemphasis(phy, params); + + /* Forced speed requested? */ + if (vars->line_speed != ELINK_SPEED_AUTO_NEG || + (ELINK_SINGLE_MEDIA_DIRECT(params) && + params->loopback_mode == ELINK_LOOPBACK_EXT)) { + PMD_DRV_LOG(DEBUG, "not SGMII, no AN"); + + /* Disable autoneg */ + elink_set_autoneg(phy, params, vars, 0); + + /* Program speed and duplex */ + elink_program_serdes(phy, params, vars); + + } else { /* AN_mode */ + PMD_DRV_LOG(DEBUG, "not SGMII, AN"); + + /* AN enabled */ + elink_set_brcm_cl37_advertisement(phy, params); + + /* Program duplex & pause advertisement (for aneg) */ + elink_set_ieee_aneg_advertisement(phy, params, + vars->ieee_fc); + + /* Enable autoneg */ + elink_set_autoneg(phy, params, vars, enable_cl73); + + /* Enable and restart AN */ + elink_restart_autoneg(phy, params, enable_cl73); + } + + } else { /* SGMII mode */ + PMD_DRV_LOG(DEBUG, "SGMII"); + + elink_initialize_sgmii_process(phy, params, vars); + } +} + +static elink_status_t elink_prepare_xgxs(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + elink_status_t rc; + vars->phy_flags |= PHY_XGXS_FLAG; + if ((phy->req_line_speed && + ((phy->req_line_speed == ELINK_SPEED_100) || + (phy->req_line_speed == ELINK_SPEED_10))) || + (!phy->req_line_speed && + (phy->speed_cap_mask >= + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && + (phy->speed_cap_mask < + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD)) + vars->phy_flags |= PHY_SGMII_FLAG; + else + vars->phy_flags &= ~PHY_SGMII_FLAG; + + elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + elink_set_aer_mmd(params, phy); + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + elink_set_master_ln(params, phy); + + rc = elink_reset_unicore(params, phy, 0); + /* Reset the SerDes and wait for reset bit return low */ + if (rc != ELINK_STATUS_OK) + return rc; + + elink_set_aer_mmd(params, phy); + /* Setting the masterLn_def again after the reset */ + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { + elink_set_master_ln(params, phy); + elink_set_swap_lanes(params, phy); + } + + return rc; +} + +static uint16_t elink_wait_reset_complete(struct bnx2x_softc *sc, + struct elink_phy *phy, + struct elink_params *params) +{ + uint16_t cnt, ctrl; + /* Wait for soft reset to get cleared up to 1 sec */ + for (cnt = 0; cnt < 1000; cnt++) { + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE) + elink_cl22_read(sc, phy, MDIO_PMA_REG_CTRL, &ctrl); + else + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, &ctrl); + if (!(ctrl & (1 << 15))) + break; + DELAY(1000 * 1); + } + + if (cnt == 1000) + elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, params->port); // "Warning: PHY was not initialized," + // " Port %d", + + PMD_DRV_LOG(DEBUG, "control reg 0x%x (after %d ms)", ctrl, cnt); + return cnt; +} + +static void elink_link_int_enable(struct elink_params *params) +{ + uint8_t port = params->port; + uint32_t mask; + struct bnx2x_softc *sc = params->sc; + + /* Setting the status to report on link up for either XGXS or SerDes */ + if (CHIP_IS_E3(sc)) { + mask = ELINK_NIG_MASK_XGXS0_LINK_STATUS; + if (!(ELINK_SINGLE_MEDIA_DIRECT(params))) + mask |= ELINK_NIG_MASK_MI_INT; + } else if (params->switch_cfg == ELINK_SWITCH_CFG_10G) { + mask = (ELINK_NIG_MASK_XGXS0_LINK10G | + ELINK_NIG_MASK_XGXS0_LINK_STATUS); + PMD_DRV_LOG(DEBUG, "enabled XGXS interrupt"); + if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) && + params->phy[ELINK_INT_PHY].type != + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) { + mask |= ELINK_NIG_MASK_MI_INT; + PMD_DRV_LOG(DEBUG, "enabled external phy int"); + } + + } else { /* SerDes */ + mask = ELINK_NIG_MASK_SERDES0_LINK_STATUS; + PMD_DRV_LOG(DEBUG, "enabled SerDes interrupt"); + if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) && + params->phy[ELINK_INT_PHY].type != + PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) { + mask |= ELINK_NIG_MASK_MI_INT; + PMD_DRV_LOG(DEBUG, "enabled external phy int"); + } + } + elink_bits_en(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, mask); + + PMD_DRV_LOG(DEBUG, "port %x, is_xgxs %x, int_status 0x%x", port, + (params->switch_cfg == ELINK_SWITCH_CFG_10G), + REG_RD(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4)); + PMD_DRV_LOG(DEBUG, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x", + REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4), + REG_RD(sc, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port * 0x18), + REG_RD(sc, + NIG_REG_SERDES0_STATUS_LINK_STATUS + port * 0x3c)); + PMD_DRV_LOG(DEBUG, " 10G %x, XGXS_LINK %x", + REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK10G + port * 0x68), + REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK_STATUS + port * 0x68)); +} + +static void elink_rearm_latch_signal(struct bnx2x_softc *sc, uint8_t port, + uint8_t exp_mi_int) +{ + uint32_t latch_status = 0; + + /* Disable the MI INT ( external phy int ) by writing 1 to the + * status register. Link down indication is high-active-signal, + * so in this case we need to write the status to clear the XOR + */ + /* Read Latched signals */ + latch_status = REG_RD(sc, NIG_REG_LATCH_STATUS_0 + port * 8); + PMD_DRV_LOG(DEBUG, "latch_status = 0x%x", latch_status); + /* Handle only those with latched-signal=up. */ + if (exp_mi_int) + elink_bits_en(sc, + NIG_REG_STATUS_INTERRUPT_PORT0 + + port * 4, ELINK_NIG_STATUS_EMAC0_MI_INT); + else + elink_bits_dis(sc, + NIG_REG_STATUS_INTERRUPT_PORT0 + + port * 4, ELINK_NIG_STATUS_EMAC0_MI_INT); + + if (latch_status & 1) { + + /* For all latched-signal=up : Re-Arm Latch signals */ + REG_WR(sc, NIG_REG_LATCH_STATUS_0 + port * 8, + (latch_status & 0xfffe) | (latch_status & 1)); + } + /* For all latched-signal=up,Write original_signal to status */ +} + +static void elink_link_int_ack(struct elink_params *params, + struct elink_vars *vars, uint8_t is_10g_plus) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + uint32_t mask; + /* First reset all status we assume only one line will be + * change at a time + */ + elink_bits_dis(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4, + (ELINK_NIG_STATUS_XGXS0_LINK10G | + ELINK_NIG_STATUS_XGXS0_LINK_STATUS | + ELINK_NIG_STATUS_SERDES0_LINK_STATUS)); + if (vars->phy_link_up) { + if (USES_WARPCORE(sc)) + mask = ELINK_NIG_STATUS_XGXS0_LINK_STATUS; + else { + if (is_10g_plus) + mask = ELINK_NIG_STATUS_XGXS0_LINK10G; + else if (params->switch_cfg == ELINK_SWITCH_CFG_10G) { + /* Disable the link interrupt by writing 1 to + * the relevant lane in the status register + */ + uint32_t ser_lane = + ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + mask = ((1 << ser_lane) << + ELINK_NIG_STATUS_XGXS0_LINK_STATUS_SIZE); + } else + mask = ELINK_NIG_STATUS_SERDES0_LINK_STATUS; + } + PMD_DRV_LOG(DEBUG, "Ack link up interrupt with mask 0x%x", + mask); + elink_bits_en(sc, + NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4, mask); + } +} + +static elink_status_t elink_format_ver(uint32_t num, uint8_t * str, + uint16_t * len) +{ + uint8_t *str_ptr = str; + uint32_t mask = 0xf0000000; + uint8_t shift = 8 * 4; + uint8_t digit; + uint8_t remove_leading_zeros = 1; + if (*len < 10) { + /* Need more than 10chars for this format */ + *str_ptr = '\0'; + (*len)--; + return ELINK_STATUS_ERROR; + } + while (shift > 0) { + + shift -= 4; + digit = ((num & mask) >> shift); + if (digit == 0 && remove_leading_zeros) { + mask = mask >> 4; + continue; + } else if (digit < 0xa) + *str_ptr = digit + '0'; + else + *str_ptr = digit - 0xa + 'a'; + remove_leading_zeros = 0; + str_ptr++; + (*len)--; + mask = mask >> 4; + if (shift == 4 * 4) { + *str_ptr = '.'; + str_ptr++; + (*len)--; + remove_leading_zeros = 1; + } + } + return ELINK_STATUS_OK; +} + +static elink_status_t elink_null_format_ver(__rte_unused uint32_t spirom_ver, + uint8_t * str, uint16_t * len) +{ + str[0] = '\0'; + (*len)--; + return ELINK_STATUS_OK; +} + +static void elink_set_xgxs_loopback(struct elink_phy *phy, + struct elink_params *params) +{ + uint8_t port = params->port; + struct bnx2x_softc *sc = params->sc; + + if (phy->req_line_speed != ELINK_SPEED_1000) { + uint32_t md_devad = 0; + + PMD_DRV_LOG(DEBUG, "XGXS 10G loopback enable"); + + if (!CHIP_IS_E3(sc)) { + /* Change the uni_phy_addr in the nig */ + md_devad = REG_RD(sc, (NIG_REG_XGXS0_CTRL_MD_DEVAD + + port * 0x18)); + + REG_WR(sc, NIG_REG_XGXS0_CTRL_MD_DEVAD + port * 0x18, + 0x5); + } + + elink_cl45_write(sc, phy, + 5, + (MDIO_REG_BANK_AER_BLOCK + + (MDIO_AER_BLOCK_AER_REG & 0xf)), 0x2800); + + elink_cl45_write(sc, phy, + 5, + (MDIO_REG_BANK_CL73_IEEEB0 + + (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), + 0x6041); + DELAY(1000 * 200); + /* Set aer mmd back */ + elink_set_aer_mmd(params, phy); + + if (!CHIP_IS_E3(sc)) { + /* And md_devad */ + REG_WR(sc, NIG_REG_XGXS0_CTRL_MD_DEVAD + port * 0x18, + md_devad); + } + } else { + uint16_t mii_ctrl; + PMD_DRV_LOG(DEBUG, "XGXS 1G loopback enable"); + elink_cl45_read(sc, phy, 5, + (MDIO_REG_BANK_COMBO_IEEE0 + + (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)), + &mii_ctrl); + elink_cl45_write(sc, phy, 5, + (MDIO_REG_BANK_COMBO_IEEE0 + + (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)), + mii_ctrl | + MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK); + } +} + +elink_status_t elink_set_led(struct elink_params *params, + struct elink_vars *vars, uint8_t mode, + uint32_t speed) +{ + uint8_t port = params->port; + uint16_t hw_led_mode = params->hw_led_mode; + elink_status_t rc = ELINK_STATUS_OK; + uint8_t phy_idx; + uint32_t tmp; + uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + struct bnx2x_softc *sc = params->sc; + PMD_DRV_LOG(DEBUG, "elink_set_led: port %x, mode %d", port, mode); + PMD_DRV_LOG(DEBUG, "speed 0x%x, hw_led_mode 0x%x", speed, hw_led_mode); + /* In case */ + for (phy_idx = ELINK_EXT_PHY1; phy_idx < ELINK_MAX_PHYS; phy_idx++) { + if (params->phy[phy_idx].set_link_led) { + params->phy[phy_idx].set_link_led(¶ms->phy[phy_idx], + params, mode); + } + } +#ifdef ELINK_INCLUDE_EMUL + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC) + return rc; +#endif + + switch (mode) { + case ELINK_LED_MODE_FRONT_PANEL_OFF: + case ELINK_LED_MODE_OFF: + REG_WR(sc, NIG_REG_LED_10G_P0 + port * 4, 0); + REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, + SHARED_HW_CFG_LED_MAC1); + + tmp = elink_cb_reg_read(sc, emac_base + EMAC_REG_EMAC_LED); + if (params->phy[ELINK_EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE) + tmp &= ~(EMAC_LED_1000MB_OVERRIDE | + EMAC_LED_100MB_OVERRIDE | + EMAC_LED_10MB_OVERRIDE); + else + tmp |= EMAC_LED_OVERRIDE; + + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_LED, tmp); + break; + + case ELINK_LED_MODE_OPER: + /* For all other phys, OPER mode is same as ON, so in case + * link is down, do nothing + */ + if (!vars->link_up) + break; + case ELINK_LED_MODE_ON: + if (((params->phy[ELINK_EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727) || + (params->phy[ELINK_EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722)) && + CHIP_IS_E2(sc) && params->num_phys == 2) { + /* This is a work-around for E2+8727 Configurations */ + if (mode == ELINK_LED_MODE_ON || + speed == ELINK_SPEED_10000) { + REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, 0); + REG_WR(sc, NIG_REG_LED_10G_P0 + port * 4, 1); + + tmp = + elink_cb_reg_read(sc, + emac_base + + EMAC_REG_EMAC_LED); + elink_cb_reg_write(sc, + emac_base + + EMAC_REG_EMAC_LED, + (tmp | EMAC_LED_OVERRIDE)); + /* Return here without enabling traffic + * LED blink and setting rate in ON mode. + * In oper mode, enabling LED blink + * and setting rate is needed. + */ + if (mode == ELINK_LED_MODE_ON) + return rc; + } + } else if (ELINK_SINGLE_MEDIA_DIRECT(params)) { + /* This is a work-around for HW issue found when link + * is up in CL73 + */ + if ((!CHIP_IS_E3(sc)) || + (CHIP_IS_E3(sc) && mode == ELINK_LED_MODE_ON)) + REG_WR(sc, NIG_REG_LED_10G_P0 + port * 4, 1); + + if (CHIP_IS_E1x(sc) || + CHIP_IS_E2(sc) || (mode == ELINK_LED_MODE_ON)) + REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, 0); + else + REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, + hw_led_mode); + } else if ((params->phy[ELINK_EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE) && + (mode == ELINK_LED_MODE_ON)) { + REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, 0); + tmp = + elink_cb_reg_read(sc, + emac_base + EMAC_REG_EMAC_LED); + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_LED, + tmp | EMAC_LED_OVERRIDE | + EMAC_LED_1000MB_OVERRIDE); + /* Break here; otherwise, it'll disable the + * intended override. + */ + break; + } else { + uint32_t nig_led_mode = ((params->hw_led_mode << + SHARED_HW_CFG_LED_MODE_SHIFT) + == + SHARED_HW_CFG_LED_EXTPHY2) + ? (SHARED_HW_CFG_LED_PHY1 >> + SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode; + REG_WR(sc, NIG_REG_LED_MODE_P0 + port * 4, + nig_led_mode); + } + + REG_WR(sc, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port * 4, + 0); + /* Set blinking rate to ~15.9Hz */ + if (CHIP_IS_E3(sc)) + REG_WR(sc, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port * 4, + LED_BLINK_RATE_VAL_E3); + else + REG_WR(sc, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port * 4, + LED_BLINK_RATE_VAL_E1X_E2); + REG_WR(sc, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port * 4, 1); + tmp = elink_cb_reg_read(sc, emac_base + EMAC_REG_EMAC_LED); + elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_LED, + (tmp & (~EMAC_LED_OVERRIDE))); + + break; + + default: + rc = ELINK_STATUS_ERROR; + PMD_DRV_LOG(DEBUG, "elink_set_led: Invalid led mode %d", mode); + break; + } + return rc; + +} + +static elink_status_t elink_link_initialize(struct elink_params *params, + struct elink_vars *vars) +{ + elink_status_t rc = ELINK_STATUS_OK; + uint8_t phy_index, non_ext_phy; + struct bnx2x_softc *sc = params->sc; + /* In case of external phy existence, the line speed would be the + * line speed linked up by the external phy. In case it is direct + * only, then the line_speed during initialization will be + * equal to the req_line_speed + */ + vars->line_speed = params->phy[ELINK_INT_PHY].req_line_speed; + + /* Initialize the internal phy in case this is a direct board + * (no external phys), or this board has external phy which requires + * to first. + */ + if (!USES_WARPCORE(sc)) + elink_prepare_xgxs(¶ms->phy[ELINK_INT_PHY], params, vars); + /* init ext phy and enable link state int */ + non_ext_phy = (ELINK_SINGLE_MEDIA_DIRECT(params) || + (params->loopback_mode == ELINK_LOOPBACK_XGXS)); + + if (non_ext_phy || + (params->phy[ELINK_EXT_PHY1].flags & ELINK_FLAGS_INIT_XGXS_FIRST) || + (params->loopback_mode == ELINK_LOOPBACK_EXT_PHY)) { + struct elink_phy *phy = ¶ms->phy[ELINK_INT_PHY]; + if (vars->line_speed == ELINK_SPEED_AUTO_NEG && + (CHIP_IS_E1x(sc) || CHIP_IS_E2(sc))) + elink_set_parallel_detection(phy, params); + if (params->phy[ELINK_INT_PHY].config_init) + params->phy[ELINK_INT_PHY].config_init(phy, + params, vars); + } + + /* Re-read this value in case it was changed inside config_init due to + * limitations of optic module + */ + vars->line_speed = params->phy[ELINK_INT_PHY].req_line_speed; + + /* Init external phy */ + if (non_ext_phy) { + if (params->phy[ELINK_INT_PHY].supported & + ELINK_SUPPORTED_FIBRE) + vars->link_status |= LINK_STATUS_SERDES_LINK; + } else { + for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + /* No need to initialize second phy in case of first + * phy only selection. In case of second phy, we do + * need to initialize the first phy, since they are + * connected. + */ + if (params->phy[phy_index].supported & + ELINK_SUPPORTED_FIBRE) + vars->link_status |= LINK_STATUS_SERDES_LINK; + + if (phy_index == ELINK_EXT_PHY2 && + (elink_phy_selection(params) == + PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { + PMD_DRV_LOG(DEBUG, + "Not initializing second phy"); + continue; + } + params->phy[phy_index].config_init(¶ms-> + phy[phy_index], + params, vars); + } + } + /* Reset the interrupt indication after phy was initialized */ + elink_bits_dis(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + + params->port * 4, + (ELINK_NIG_STATUS_XGXS0_LINK10G | + ELINK_NIG_STATUS_XGXS0_LINK_STATUS | + ELINK_NIG_STATUS_SERDES0_LINK_STATUS | + ELINK_NIG_MASK_MI_INT)); + return rc; +} + +static void elink_int_link_reset(__rte_unused struct elink_phy *phy, + struct elink_params *params) +{ + /* Reset the SerDes/XGXS */ + REG_WR(params->sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, + (0x1ff << (params->port * 16))); +} + +static void elink_common_ext_link_reset(__rte_unused struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t gpio_port; + /* HW reset */ + if (CHIP_IS_E2(sc)) + gpio_port = SC_PATH(sc); + else + gpio_port = params->port; + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, gpio_port); + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, gpio_port); + PMD_DRV_LOG(DEBUG, "reset external PHY"); +} + +static elink_status_t elink_update_link_down(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port = params->port; + + PMD_DRV_LOG(DEBUG, "Port %x: Link is down", port); + elink_set_led(params, vars, ELINK_LED_MODE_OFF, 0); + vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG; + /* Indicate no mac active */ + vars->mac_type = ELINK_MAC_TYPE_NONE; + + /* Update shared memory */ + vars->link_status &= ~ELINK_LINK_UPDATE_MASK; + vars->line_speed = 0; + elink_update_mng(params, vars->link_status); + + /* Activate nig drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + port * 4, 1); + + /* Disable emac */ + if (!CHIP_IS_E3(sc)) + REG_WR(sc, NIG_REG_NIG_EMAC0_EN + port * 4, 0); + + DELAY(1000 * 10); + /* Reset BigMac/Xmac */ + if (CHIP_IS_E1x(sc) || CHIP_IS_E2(sc)) + elink_set_bmac_rx(sc, params->port, 0); + + if (CHIP_IS_E3(sc)) { + /* Prevent LPI Generation by chip */ + REG_WR(sc, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), + 0); + REG_WR(sc, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2), + 0); + vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | + SHMEM_EEE_ACTIVE_BIT); + + elink_update_mng_eee(params, vars->eee_status); + elink_set_xmac_rxtx(params, 0); + elink_set_umac_rxtx(params, 0); + } + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_update_link_up(struct elink_params *params, + struct elink_vars *vars, + uint8_t link_10g) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t phy_idx, port = params->port; + elink_status_t rc = ELINK_STATUS_OK; + + vars->link_status |= (LINK_STATUS_LINK_UP | + LINK_STATUS_PHYSICAL_LINK_FLAG); + vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; + + if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX) + vars->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED; + + if (vars->flow_ctrl & ELINK_FLOW_CTRL_RX) + vars->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED; + if (USES_WARPCORE(sc)) { + if (link_10g) { + if (elink_xmac_enable(params, vars, 0) == + ELINK_STATUS_NO_LINK) { + PMD_DRV_LOG(DEBUG, "Found errors on XMAC"); + vars->link_up = 0; + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~LINK_STATUS_LINK_UP; + } + } else + elink_umac_enable(params, vars, 0); + elink_set_led(params, vars, + ELINK_LED_MODE_OPER, vars->line_speed); + + if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) && + (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) { + PMD_DRV_LOG(DEBUG, "Enabling LPI assertion"); + REG_WR(sc, MISC_REG_CPMU_LP_FW_ENABLE_P0 + + (params->port << 2), 1); + REG_WR(sc, MISC_REG_CPMU_LP_DR_ENABLE, 1); + REG_WR(sc, MISC_REG_CPMU_LP_MASK_ENT_P0 + + (params->port << 2), 0xfc20); + } + } + if ((CHIP_IS_E1x(sc) || CHIP_IS_E2(sc))) { + if (link_10g) { + if (elink_bmac_enable(params, vars, 0, 1) == + ELINK_STATUS_NO_LINK) { + PMD_DRV_LOG(DEBUG, "Found errors on BMAC"); + vars->link_up = 0; + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~LINK_STATUS_LINK_UP; + } + + elink_set_led(params, vars, + ELINK_LED_MODE_OPER, ELINK_SPEED_10000); + } else { + rc = elink_emac_program(params, vars); + elink_emac_enable(params, vars, 0); + + /* AN complete? */ + if ((vars->link_status & + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) + && (!(vars->phy_flags & PHY_SGMII_FLAG)) && + ELINK_SINGLE_MEDIA_DIRECT(params)) + elink_set_gmii_tx_driver(params); + } + } + + /* PBF - link up */ + if (CHIP_IS_E1x(sc)) + rc |= elink_pbf_update(params, vars->flow_ctrl, + vars->line_speed); + + /* Disable drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + port * 4, 0); + + /* Update shared memory */ + elink_update_mng(params, vars->link_status); + elink_update_mng_eee(params, vars->eee_status); + /* Check remote fault */ + for (phy_idx = ELINK_INT_PHY; phy_idx < ELINK_MAX_PHYS; phy_idx++) { + if (params->phy[phy_idx].flags & ELINK_FLAGS_TX_ERROR_CHECK) { + elink_check_half_open_conn(params, vars, 0); + break; + } + } + DELAY(1000 * 20); + return rc; +} + +/* The elink_link_update function should be called upon link + * interrupt. + * Link is considered up as follows: + * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs + * to be up + * - SINGLE_MEDIA - The link between the 577xx and the external + * phy (XGXS) need to up as well as the external link of the + * phy (PHY_EXT1) + * - DUAL_MEDIA - The link between the 577xx and the first + * external phy needs to be up, and at least one of the 2 + * external phy link must be up. + */ +elink_status_t elink_link_update(struct elink_params * params, + struct elink_vars * vars) +{ + struct bnx2x_softc *sc = params->sc; + struct elink_vars phy_vars[ELINK_MAX_PHYS]; + uint8_t port = params->port; + uint8_t link_10g_plus, phy_index; + uint8_t ext_phy_link_up = 0, cur_link_up; + elink_status_t rc = ELINK_STATUS_OK; + __rte_unused uint8_t is_mi_int = 0; + uint16_t ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; + uint8_t active_external_phy = ELINK_INT_PHY; + vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~ELINK_LINK_UPDATE_MASK; + for (phy_index = ELINK_INT_PHY; phy_index < params->num_phys; + phy_index++) { + phy_vars[phy_index].flow_ctrl = 0; + phy_vars[phy_index].link_status = ETH_LINK_DOWN; + phy_vars[phy_index].line_speed = 0; + phy_vars[phy_index].duplex = DUPLEX_FULL; + phy_vars[phy_index].phy_link_up = 0; + phy_vars[phy_index].link_up = 0; + phy_vars[phy_index].fault_detected = 0; + /* different consideration, since vars holds inner state */ + phy_vars[phy_index].eee_status = vars->eee_status; + } + + if (USES_WARPCORE(sc)) + elink_set_aer_mmd(params, ¶ms->phy[ELINK_INT_PHY]); + + PMD_DRV_LOG(DEBUG, "port %x, XGXS?%x, int_status 0x%x", + port, (vars->phy_flags & PHY_XGXS_FLAG), + REG_RD(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4)); + + is_mi_int = (uint8_t) (REG_RD(sc, NIG_REG_EMAC0_STATUS_MISC_MI_INT + + port * 0x18) > 0); + PMD_DRV_LOG(DEBUG, "int_mask 0x%x MI_INT %x, SERDES_LINK %x", + REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4), + is_mi_int, + REG_RD(sc, + NIG_REG_SERDES0_STATUS_LINK_STATUS + port * 0x3c)); + + PMD_DRV_LOG(DEBUG, " 10G %x, XGXS_LINK %x", + REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK10G + port * 0x68), + REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK_STATUS + port * 0x68)); + + /* Disable emac */ + if (!CHIP_IS_E3(sc)) + REG_WR(sc, NIG_REG_NIG_EMAC0_EN + port * 4, 0); + + /* Step 1: + * Check external link change only for external phys, and apply + * priority selection between them in case the link on both phys + * is up. Note that instead of the common vars, a temporary + * vars argument is used since each phy may have different link/ + * speed/duplex result + */ + for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + struct elink_phy *phy = ¶ms->phy[phy_index]; + if (!phy->read_status) + continue; + /* Read link status and params of this ext phy */ + cur_link_up = phy->read_status(phy, params, + &phy_vars[phy_index]); + if (cur_link_up) { + PMD_DRV_LOG(DEBUG, "phy in index %d link is up", + phy_index); + } else { + PMD_DRV_LOG(DEBUG, "phy in index %d link is down", + phy_index); + continue; + } + + if (!ext_phy_link_up) { + ext_phy_link_up = 1; + active_external_phy = phy_index; + } else { + switch (elink_phy_selection(params)) { + case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + /* In this option, the first PHY makes sure to pass the + * traffic through itself only. + * Its not clear how to reset the link on the second phy + */ + active_external_phy = ELINK_EXT_PHY1; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + /* In this option, the first PHY makes sure to pass the + * traffic through the second PHY. + */ + active_external_phy = ELINK_EXT_PHY2; + break; + default: + /* Link indication on both PHYs with the following cases + * is invalid: + * - FIRST_PHY means that second phy wasn't initialized, + * hence its link is expected to be down + * - SECOND_PHY means that first phy should not be able + * to link up by itself (using configuration) + * - DEFAULT should be overriden during initialiazation + */ + PMD_DRV_LOG(DEBUG, "Invalid link indication" + "mpc=0x%x. DISABLING LINK !!!", + params->multi_phy_config); + ext_phy_link_up = 0; + break; + } + } + } + prev_line_speed = vars->line_speed; + /* Step 2: + * Read the status of the internal phy. In case of + * DIRECT_SINGLE_MEDIA board, this link is the external link, + * otherwise this is the link between the 577xx and the first + * external phy + */ + if (params->phy[ELINK_INT_PHY].read_status) + params->phy[ELINK_INT_PHY].read_status(¶ms-> + phy[ELINK_INT_PHY], + params, vars); + /* The INT_PHY flow control reside in the vars. This include the + * case where the speed or flow control are not set to AUTO. + * Otherwise, the active external phy flow control result is set + * to the vars. The ext_phy_line_speed is needed to check if the + * speed is different between the internal phy and external phy. + * This case may be result of intermediate link speed change. + */ + if (active_external_phy > ELINK_INT_PHY) { + vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; + /* Link speed is taken from the XGXS. AN and FC result from + * the external phy. + */ + vars->link_status |= phy_vars[active_external_phy].link_status; + + /* if active_external_phy is first PHY and link is up - disable + * disable TX on second external PHY + */ + if (active_external_phy == ELINK_EXT_PHY1) { + if (params->phy[ELINK_EXT_PHY2].phy_specific_func) { + PMD_DRV_LOG(DEBUG, "Disabling TX on EXT_PHY2"); + params->phy[ELINK_EXT_PHY2]. + phy_specific_func(¶ms-> + phy[ELINK_EXT_PHY2], + params, ELINK_DISABLE_TX); + } + } + + ext_phy_line_speed = phy_vars[active_external_phy].line_speed; + vars->duplex = phy_vars[active_external_phy].duplex; + if (params->phy[active_external_phy].supported & + ELINK_SUPPORTED_FIBRE) + vars->link_status |= LINK_STATUS_SERDES_LINK; + else + vars->link_status &= ~LINK_STATUS_SERDES_LINK; + + vars->eee_status = phy_vars[active_external_phy].eee_status; + + PMD_DRV_LOG(DEBUG, "Active external phy selected: %x", + active_external_phy); + } + + for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + if (params->phy[phy_index].flags & + ELINK_FLAGS_REARM_LATCH_SIGNAL) { + elink_rearm_latch_signal(sc, port, + phy_index == + active_external_phy); + break; + } + } + PMD_DRV_LOG(DEBUG, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," + " ext_phy_line_speed = %d", vars->flow_ctrl, + vars->link_status, ext_phy_line_speed); + /* Upon link speed change set the NIG into drain mode. Comes to + * deals with possible FIFO glitch due to clk change when speed + * is decreased without link down indicator + */ + + if (vars->phy_link_up) { + if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up && + (ext_phy_line_speed != vars->line_speed)) { + PMD_DRV_LOG(DEBUG, "Internal link speed %d is" + " different than the external" + " link speed %d", vars->line_speed, + ext_phy_line_speed); + vars->phy_link_up = 0; + } else if (prev_line_speed != vars->line_speed) { + REG_WR(sc, + NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, + 0); + DELAY(1000 * 1); + } + } + + /* Anything 10 and over uses the bmac */ + link_10g_plus = (vars->line_speed >= ELINK_SPEED_10000); + + elink_link_int_ack(params, vars, link_10g_plus); + + /* In case external phy link is up, and internal link is down + * (not initialized yet probably after link initialization, it + * needs to be initialized. + * Note that after link down-up as result of cable plug, the xgxs + * link would probably become up again without the need + * initialize it + */ + if (!(ELINK_SINGLE_MEDIA_DIRECT(params))) { + PMD_DRV_LOG(DEBUG, "ext_phy_link_up = %d, int_link_up = %d," + " init_preceding = %d", ext_phy_link_up, + vars->phy_link_up, + params->phy[ELINK_EXT_PHY1].flags & + ELINK_FLAGS_INIT_XGXS_FIRST); + if (!(params->phy[ELINK_EXT_PHY1].flags & + ELINK_FLAGS_INIT_XGXS_FIRST) + && ext_phy_link_up && !vars->phy_link_up) { + vars->line_speed = ext_phy_line_speed; + if (vars->line_speed < ELINK_SPEED_1000) + vars->phy_flags |= PHY_SGMII_FLAG; + else + vars->phy_flags &= ~PHY_SGMII_FLAG; + + if (params->phy[ELINK_INT_PHY].config_init) + params->phy[ELINK_INT_PHY].config_init(¶ms-> + phy + [ELINK_INT_PHY], + params, + vars); + } + } + /* Link is up only if both local phy and external phy (in case of + * non-direct board) are up and no fault detected on active PHY. + */ + vars->link_up = (vars->phy_link_up && + (ext_phy_link_up || + ELINK_SINGLE_MEDIA_DIRECT(params)) && + (phy_vars[active_external_phy].fault_detected == 0)); + + /* Update the PFC configuration in case it was changed */ + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) + vars->link_status |= LINK_STATUS_PFC_ENABLED; + else + vars->link_status &= ~LINK_STATUS_PFC_ENABLED; + + if (vars->link_up) + rc = elink_update_link_up(params, vars, link_10g_plus); + else + rc = elink_update_link_down(params, vars); + + /* Update MCP link status was changed */ + if (params-> + feature_config_flags & ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX) + elink_cb_fw_command(sc, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0); + + return rc; +} + +/*****************************************************************************/ +/* External Phy section */ +/*****************************************************************************/ +static void elink_ext_phy_hw_reset(struct bnx2x_softc *sc, uint8_t port) +{ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + DELAY(1000 * 1); + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); +} + +static void elink_save_spirom_version(struct bnx2x_softc *sc, + __rte_unused uint8_t port, + uint32_t spirom_ver, uint32_t ver_addr) +{ + PMD_DRV_LOG(DEBUG, "FW version 0x%x:0x%x for port %d", + (uint16_t) (spirom_ver >> 16), (uint16_t) spirom_ver, port); + + if (ver_addr) + REG_WR(sc, ver_addr, spirom_ver); +} + +static void elink_save_bnx2x_spirom_ver(struct bnx2x_softc *sc, + struct elink_phy *phy, uint8_t port) +{ + uint16_t fw_ver1, fw_ver2; + + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER1, &fw_ver1); + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, &fw_ver2); + elink_save_spirom_version(sc, port, + (uint32_t) (fw_ver1 << 16 | fw_ver2), + phy->ver_addr); +} + +static void elink_ext_phy_10G_an_resolve(struct bnx2x_softc *sc, + struct elink_phy *phy, + struct elink_vars *vars) +{ + uint16_t val; + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_STATUS, &val); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_STATUS, &val); + if (val & (1 << 5)) + vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + if ((val & (1 << 0)) == 0) + vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED; +} + +/******************************************************************/ +/* common BNX2X8073/BNX2X8727 PHY SECTION */ +/******************************************************************/ +static void elink_8073_resolve_fc(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + if (phy->req_line_speed == ELINK_SPEED_10 || + phy->req_line_speed == ELINK_SPEED_100) { + vars->flow_ctrl = phy->req_flow_ctrl; + return; + } + + if (elink_ext_phy_resolve_fc(phy, params, vars) && + (vars->flow_ctrl == ELINK_FLOW_CTRL_NONE)) { + uint16_t pause_result; + uint16_t ld_pause; /* local */ + uint16_t lp_pause; /* link partner */ + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LD, &ld_pause); + + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LP, &lp_pause); + pause_result = (ld_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5; + pause_result |= (lp_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7; + + elink_pause_resolve(vars, pause_result); + PMD_DRV_LOG(DEBUG, "Ext PHY CL37 pause result 0x%x", + pause_result); + } +} + +static elink_status_t elink_8073_8727_external_rom_boot(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint8_t port) +{ + uint32_t count = 0; + uint16_t fw_ver1, fw_msgout; + elink_status_t rc = ELINK_STATUS_OK; + + /* Boot port from external ROM */ + /* EDC grst */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001); + + /* Ucode reboot and rst */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x008c); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0001); + + /* Reset internal microprocessor */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); + + /* Release srst bit */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); + + /* Delay 100ms per the PHY specifications */ + DELAY(1000 * 100); + + /* 8073 sometimes taking longer to download */ + do { + count++; + if (count > 300) { + PMD_DRV_LOG(DEBUG, + "elink_8073_8727_external_rom_boot port %x:" + "Download failed. fw version = 0x%x", + port, fw_ver1); + rc = ELINK_STATUS_ERROR; + break; + } + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER1, &fw_ver1); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout); + + DELAY(1000 * 1); + } while (fw_ver1 == 0 || fw_ver1 == 0x4321 || + ((fw_msgout & 0xff) != 0x03 && (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073))); + + /* Clear ser_boot_ctl bit */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0000); + elink_save_bnx2x_spirom_ver(sc, phy, port); + + PMD_DRV_LOG(DEBUG, + "elink_8073_8727_external_rom_boot port %x:" + "Download complete. fw version = 0x%x", port, fw_ver1); + + return rc; +} + +/******************************************************************/ +/* BNX2X8073 PHY SECTION */ +/******************************************************************/ +static elink_status_t elink_8073_is_snr_needed(struct bnx2x_softc *sc, + struct elink_phy *phy) +{ + /* This is only required for 8073A1, version 102 only */ + uint16_t val; + + /* Read 8073 HW revision */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val); + + if (val != 1) { + /* No need to workaround in 8073 A1 */ + return ELINK_STATUS_OK; + } + + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER2, &val); + + /* SNR should be applied only for version 0x102 */ + if (val != 0x102) + return ELINK_STATUS_OK; + + return 1; +} + +static elink_status_t elink_8073_xaui_wa(struct bnx2x_softc *sc, + struct elink_phy *phy) +{ + uint16_t val, cnt, cnt1; + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val); + + if (val > 0) { + /* No need to workaround in 8073 A1 */ + return ELINK_STATUS_OK; + } + /* XAUI workaround in 8073 A0: */ + + /* After loading the boot ROM and restarting Autoneg, poll + * Dev1, Reg $C820: + */ + + for (cnt = 0; cnt < 1000; cnt++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &val); + /* If bit [14] = 0 or bit [13] = 0, continue on with + * system initialization (XAUI work-around not required, as + * these bits indicate 2.5G or 1G link up). + */ + if (!(val & (1 << 14)) || !(val & (1 << 13))) { + PMD_DRV_LOG(DEBUG, "XAUI work-around not required"); + return ELINK_STATUS_OK; + } else if (!(val & (1 << 15))) { + PMD_DRV_LOG(DEBUG, "bit 15 went off"); + /* If bit 15 is 0, then poll Dev1, Reg $C841 until it's + * MSB (bit15) goes to 1 (indicating that the XAUI + * workaround has completed), then continue on with + * system initialization. + */ + for (cnt1 = 0; cnt1 < 1000; cnt1++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_XAUI_WA, + &val); + if (val & (1 << 15)) { + PMD_DRV_LOG(DEBUG, + "XAUI workaround has completed"); + return ELINK_STATUS_OK; + } + DELAY(1000 * 3); + } + break; + } + DELAY(1000 * 3); + } + PMD_DRV_LOG(DEBUG, "Warning: XAUI work-around timeout !!!"); + return ELINK_STATUS_ERROR; +} + +static void elink_807x_force_10G(struct bnx2x_softc *sc, struct elink_phy *phy) +{ + /* Force KR or KX */ + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_BNX2X_CTRL, 0x0000); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000); +} + +static void elink_8073_set_pause_cl37(struct elink_params *params, + struct elink_phy *phy, + struct elink_vars *vars) +{ + uint16_t cl37_val; + struct bnx2x_softc *sc = params->sc; + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val); + + cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ + elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) { + cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC; + } + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { + cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + } + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { + cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + } + PMD_DRV_LOG(DEBUG, "Ext phy AN advertize cl37 0x%x", cl37_val); + + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val); + DELAY(1000 * 500); +} + +static void elink_8073_specific_func(struct elink_phy *phy, + struct elink_params *params, + uint32_t action) +{ + struct bnx2x_softc *sc = params->sc; + switch (action) { + case ELINK_PHY_INIT: + /* Enable LASI */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + (1 << 2)); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, + 0x0004); + break; + } +} + +static elink_status_t elink_8073_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val = 0, tmp1; + uint8_t gpio_port; + PMD_DRV_LOG(DEBUG, "Init 8073"); + + if (CHIP_IS_E2(sc)) + gpio_port = SC_PATH(sc); + else + gpio_port = params->port; + /* Restore normal power mode */ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); + + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); + + elink_8073_specific_func(phy, params, ELINK_PHY_INIT); + elink_8073_set_pause_cl37(params, phy, vars); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); + + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); + + PMD_DRV_LOG(DEBUG, "Before rom RX_ALARM(port1): 0x%x", tmp1); + + /* Swap polarity if required - Must be done only in non-1G mode */ + if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { + /* Configure the 8073 to swap _P and _N of the KR lines */ + PMD_DRV_LOG(DEBUG, "Swapping polarity for the 8073"); + /* 10G Rx/Tx and 1G Tx signal polarity swap */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, + (val | (3 << 9))); + } + + /* Enable CL37 BAM */ + if (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port]. + default_cfg)) & + PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { + + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8073_BAM, &val); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8073_BAM, val | 1); + PMD_DRV_LOG(DEBUG, "Enable CL37 BAM on KR"); + } + if (params->loopback_mode == ELINK_LOOPBACK_EXT) { + elink_807x_force_10G(sc, phy); + PMD_DRV_LOG(DEBUG, "Forced speed 10G on 807X"); + return ELINK_STATUS_OK; + } else { + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_BNX2X_CTRL, 0x0002); + } + if (phy->req_line_speed != ELINK_SPEED_AUTO_NEG) { + if (phy->req_line_speed == ELINK_SPEED_10000) { + val = (1 << 7); + } else if (phy->req_line_speed == ELINK_SPEED_2500) { + val = (1 << 5); + /* Note that 2.5G works only when used with 1G + * advertisement + */ + } else + val = (1 << 5); + } else { + val = 0; + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + val |= (1 << 7); + + /* Note that 2.5G works only when used with 1G advertisement */ + if (phy->speed_cap_mask & + (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G | + PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) + val |= (1 << 5); + PMD_DRV_LOG(DEBUG, "807x autoneg val = 0x%x", val); + } + + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1); + + if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) && + (phy->req_line_speed == ELINK_SPEED_AUTO_NEG)) || + (phy->req_line_speed == ELINK_SPEED_2500)) { + uint16_t phy_ver; + /* Allow 2.5G for A1 and above */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, + &phy_ver); + PMD_DRV_LOG(DEBUG, "Add 2.5G"); + if (phy_ver > 0) + tmp1 |= 1; + else + tmp1 &= 0xfffe; + } else { + PMD_DRV_LOG(DEBUG, "Disable 2.5G"); + tmp1 &= 0xfffe; + } + + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1); + /* Add support for CL37 (passive mode) II */ + + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, + (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ? + 0x20 : 0x40))); + + /* Add support for CL37 (passive mode) III */ + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); + + /* The SNR will improve about 2db by changing BW and FEE main + * tap. Rest commands are executed after link is up + * Change FFE main cursor to 5 in EDC register + */ + if (elink_8073_is_snr_needed(sc, phy)) + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN, + 0xFB0C); + + /* Enable FEC (Forware Error Correction) Request in the AN */ + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1); + tmp1 |= (1 << 15); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1); + + elink_ext_phy_set_pause(params, phy, vars); + + /* Restart autoneg */ + DELAY(1000 * 500); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); + PMD_DRV_LOG(DEBUG, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x", + ((val & (1 << 5)) > 0), ((val & (1 << 7)) > 0)); + return ELINK_STATUS_OK; +} + +static uint8_t elink_8073_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t link_up = 0; + uint16_t val1, val2; + uint16_t link_status = 0; + uint16_t an1000_status = 0; + + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + + PMD_DRV_LOG(DEBUG, "8703 LASI status 0x%x", val1); + + /* Clear the interrupt LASI status register */ + elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); + elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1); + PMD_DRV_LOG(DEBUG, "807x PCS status 0x%x->0x%x", val2, val1); + /* Clear MSG-OUT */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); + + /* Check the LASI */ + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); + + PMD_DRV_LOG(DEBUG, "KR 0x9003 0x%x", val2); + + /* Check the link status */ + elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); + PMD_DRV_LOG(DEBUG, "KR PCS status 0x%x", val2); + + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); + link_up = ((val1 & 4) == 4); + PMD_DRV_LOG(DEBUG, "PMA_REG_STATUS=0x%x", val1); + + if (link_up && ((phy->req_line_speed != ELINK_SPEED_10000))) { + if (elink_8073_xaui_wa(sc, phy) != 0) + return 0; + } + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status); + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status); + + /* Check the link status on 1.1.2 */ + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); + PMD_DRV_LOG(DEBUG, "KR PMA status 0x%x->0x%x," + "an_link_status=0x%x", val2, val1, an1000_status); + + link_up = (((val1 & 4) == 4) || (an1000_status & (1 << 1))); + if (link_up && elink_8073_is_snr_needed(sc, phy)) { + /* The SNR will improve about 2dbby changing the BW and FEE main + * tap. The 1st write to change FFE main tap is set before + * restart AN. Change PLL Bandwidth in EDC register + */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH, + 0x26BC); + + /* Change CDR Bandwidth in EDC register */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH, + 0x0333); + } + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS, + &link_status); + + /* Bits 0..2 --> speed detected, bits 13..15--> link is down */ + if ((link_status & (1 << 2)) && (!(link_status & (1 << 15)))) { + link_up = 1; + vars->line_speed = ELINK_SPEED_10000; + PMD_DRV_LOG(DEBUG, "port %x: External link up in 10G", + params->port); + } else if ((link_status & (1 << 1)) && (!(link_status & (1 << 14)))) { + link_up = 1; + vars->line_speed = ELINK_SPEED_2500; + PMD_DRV_LOG(DEBUG, "port %x: External link up in 2.5G", + params->port); + } else if ((link_status & (1 << 0)) && (!(link_status & (1 << 13)))) { + link_up = 1; + vars->line_speed = ELINK_SPEED_1000; + PMD_DRV_LOG(DEBUG, "port %x: External link up in 1G", + params->port); + } else { + link_up = 0; + PMD_DRV_LOG(DEBUG, "port %x: External link is down", + params->port); + } + + if (link_up) { + /* Swap polarity if required */ + if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { + /* Configure the 8073 to swap P and N of the KR lines */ + elink_cl45_read(sc, phy, + MDIO_XS_DEVAD, + MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); + /* Set bit 3 to invert Rx in 1G mode and clear this bit + * when it`s in 10G mode. + */ + if (vars->line_speed == ELINK_SPEED_1000) { + PMD_DRV_LOG(DEBUG, "Swapping 1G polarity for" + "the 8073"); + val1 |= (1 << 3); + } else + val1 &= ~(1 << 3); + + elink_cl45_write(sc, phy, + MDIO_XS_DEVAD, + MDIO_XS_REG_8073_RX_CTRL_PCIE, val1); + } + elink_ext_phy_10G_an_resolve(sc, phy, vars); + elink_8073_resolve_fc(phy, params, vars); + vars->duplex = DUPLEX_FULL; + } + + if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG2, &val1); + + if (val1 & (1 << 5)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + if (val1 & (1 << 7)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + } + + return link_up; +} + +static void elink_8073_link_reset(__rte_unused struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t gpio_port; + if (CHIP_IS_E2(sc)) + gpio_port = SC_PATH(sc); + else + gpio_port = params->port; + PMD_DRV_LOG(DEBUG, "Setting 8073 port %d into low power mode", + gpio_port); + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, gpio_port); +} + +/******************************************************************/ +/* BNX2X8705 PHY SECTION */ +/******************************************************************/ +static elink_status_t elink_8705_config_init(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars + *vars) +{ + struct bnx2x_softc *sc = params->sc; + PMD_DRV_LOG(DEBUG, "init 8705"); + /* Restore normal power mode */ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + /* HW reset */ + elink_ext_phy_hw_reset(sc, params->port); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); + elink_wait_reset_complete(sc, phy, params); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100); + elink_cl45_write(sc, phy, MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1); + /* BNX2X8705 doesn't have microcode, hence the 0 */ + elink_save_spirom_version(sc, params->port, params->shmem_base, 0); + return ELINK_STATUS_OK; +} + +static uint8_t elink_8705_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + uint8_t link_up = 0; + uint16_t val1, rx_sd; + struct bnx2x_softc *sc = params->sc; + PMD_DRV_LOG(DEBUG, "read status 8705"); + elink_cl45_read(sc, phy, + MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1); + PMD_DRV_LOG(DEBUG, "8705 LASI status 0x%x", val1); + + elink_cl45_read(sc, phy, + MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1); + PMD_DRV_LOG(DEBUG, "8705 LASI status 0x%x", val1); + + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd); + + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xc809, &val1); + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xc809, &val1); + + PMD_DRV_LOG(DEBUG, "8705 1.c809 val=0x%x", val1); + link_up = ((rx_sd & 0x1) && (val1 & (1 << 9)) + && ((val1 & (1 << 8)) == 0)); + if (link_up) { + vars->line_speed = ELINK_SPEED_10000; + elink_ext_phy_resolve_fc(phy, params, vars); + } + return link_up; +} + +/******************************************************************/ +/* SFP+ module Section */ +/******************************************************************/ +static void elink_set_disable_pmd_transmit(struct elink_params *params, + struct elink_phy *phy, + uint8_t pmd_dis) +{ + struct bnx2x_softc *sc = params->sc; + /* Disable transmitter only for bootcodes which can enable it afterwards + * (for D3 link) + */ + if (pmd_dis) { + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED) { + PMD_DRV_LOG(DEBUG, "Disabling PMD transmitter"); + } else { + PMD_DRV_LOG(DEBUG, "NOT disabling PMD transmitter"); + return; + } + } else { + PMD_DRV_LOG(DEBUG, "Enabling PMD transmitter"); + } + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_TX_DISABLE, pmd_dis); +} + +static uint8_t elink_get_gpio_port(struct elink_params *params) +{ + uint8_t gpio_port; + uint32_t swap_val, swap_override; + struct bnx2x_softc *sc = params->sc; + if (CHIP_IS_E2(sc)) { + gpio_port = SC_PATH(sc); + } else { + gpio_port = params->port; + } + swap_val = REG_RD(sc, NIG_REG_PORT_SWAP); + swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE); + return gpio_port ^ (swap_val && swap_override); +} + +static void elink_sfp_e1e2_set_transmitter(struct elink_params *params, + struct elink_phy *phy, uint8_t tx_en) +{ + uint16_t val; + uint8_t port = params->port; + struct bnx2x_softc *sc = params->sc; + uint32_t tx_en_mode; + + /* Disable/Enable transmitter ( TX laser of the SFP+ module.) */ + tx_en_mode = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].sfp_ctrl)) & + PORT_HW_CFG_TX_LASER_MASK; + PMD_DRV_LOG(DEBUG, "Setting transmitter tx_en=%x for port %x " + "mode = %x", tx_en, port, tx_en_mode); + switch (tx_en_mode) { + case PORT_HW_CFG_TX_LASER_MDIO: + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, &val); + + if (tx_en) + val &= ~(1 << 15); + else + val |= (1 << 15); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, val); + break; + case PORT_HW_CFG_TX_LASER_GPIO0: + case PORT_HW_CFG_TX_LASER_GPIO1: + case PORT_HW_CFG_TX_LASER_GPIO2: + case PORT_HW_CFG_TX_LASER_GPIO3: + { + uint16_t gpio_pin; + uint8_t gpio_port, gpio_mode; + if (tx_en) + gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH; + else + gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW; + + gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0; + gpio_port = elink_get_gpio_port(params); + elink_cb_gpio_write(sc, gpio_pin, gpio_mode, gpio_port); + break; + } + default: + PMD_DRV_LOG(DEBUG, "Invalid TX_LASER_MDIO 0x%x", tx_en_mode); + break; + } +} + +static void elink_sfp_set_transmitter(struct elink_params *params, + struct elink_phy *phy, uint8_t tx_en) +{ + struct bnx2x_softc *sc = params->sc; + PMD_DRV_LOG(DEBUG, "Setting SFP+ transmitter to %d", tx_en); + if (CHIP_IS_E3(sc)) + elink_sfp_e3_set_transmitter(params, phy, tx_en); + else + elink_sfp_e1e2_set_transmitter(params, phy, tx_en); +} + +static elink_status_t elink_8726_read_sfp_module_eeprom(struct elink_phy *phy, + struct elink_params + *params, + uint8_t dev_addr, + uint16_t addr, + uint8_t byte_cnt, + uint8_t * o_buf, + __rte_unused uint8_t + is_init) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val = 0; + uint16_t i; + if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) { + PMD_DRV_LOG(DEBUG, "Reading from eeprom is limited to 0xf"); + return ELINK_STATUS_ERROR; + } + /* Set the read command byte count */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, + (byte_cnt | (dev_addr << 8))); + + /* Set the read command address */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, + addr); + + /* Activate read command */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, + 0x2c0f); + + /* Wait up to 500us for command complete status */ + for (i = 0; i < 100; i++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) + break; + DELAY(5); + } + + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) != + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) { + PMD_DRV_LOG(DEBUG, + "Got bad status 0x%x when reading from SFP+ EEPROM", + (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); + return ELINK_STATUS_ERROR; + } + + /* Read the buffer */ + for (i = 0; i < byte_cnt; i++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); + o_buf[i] = + (uint8_t) (val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); + } + + for (i = 0; i < 100; i++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) + return ELINK_STATUS_OK; + DELAY(1000 * 1); + } + return ELINK_STATUS_ERROR; +} + +static void elink_warpcore_power_module(struct elink_params *params, + uint8_t power) +{ + uint32_t pin_cfg; + struct bnx2x_softc *sc = params->sc; + + pin_cfg = (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port]. + e3_sfp_ctrl)) & PORT_HW_CFG_E3_PWR_DIS_MASK) + >> PORT_HW_CFG_E3_PWR_DIS_SHIFT; + + if (pin_cfg == PIN_CFG_NA) + return; + PMD_DRV_LOG(DEBUG, "Setting SFP+ module power to %d using pin cfg %d", + power, pin_cfg); + /* Low ==> corresponding SFP+ module is powered + * high ==> the SFP+ module is powered down + */ + elink_set_cfg_pin(sc, pin_cfg, power ^ 1); +} + +static elink_status_t elink_warpcore_read_sfp_module_eeprom(__rte_unused struct + elink_phy *phy, + struct elink_params + *params, + uint8_t dev_addr, + uint16_t addr, + uint8_t byte_cnt, + uint8_t * o_buf, + uint8_t is_init) +{ + elink_status_t rc = ELINK_STATUS_OK; + uint8_t i, j = 0, cnt = 0; + uint32_t data_array[4]; + uint16_t addr32; + struct bnx2x_softc *sc = params->sc; + + if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) { + PMD_DRV_LOG(DEBUG, + "Reading from eeprom is limited to 16 bytes"); + return ELINK_STATUS_ERROR; + } + + /* 4 byte aligned address */ + addr32 = addr & (~0x3); + do { + if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) { + elink_warpcore_power_module(params, 0); + /* Note that 100us are not enough here */ + DELAY(1000 * 1); + elink_warpcore_power_module(params, 1); + } + rc = elink_bsc_read(params, sc, dev_addr, addr32, 0, byte_cnt, + data_array); + } while ((rc != ELINK_STATUS_OK) && (++cnt < I2C_WA_RETRY_CNT)); + + if (rc == ELINK_STATUS_OK) { + for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) { + o_buf[j] = *((uint8_t *) data_array + i); + j++; + } + } + + return rc; +} + +static elink_status_t elink_8727_read_sfp_module_eeprom(struct elink_phy *phy, + struct elink_params + *params, + uint8_t dev_addr, + uint16_t addr, + uint8_t byte_cnt, + uint8_t * o_buf, + __rte_unused uint8_t + is_init) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val, i; + + if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) { + PMD_DRV_LOG(DEBUG, "Reading from eeprom is limited to 0xf"); + return ELINK_STATUS_ERROR; + } + + /* Set 2-wire transfer rate of SFP+ module EEPROM + * to 100Khz since some DACs(direct attached cables) do + * not work at 400Khz. + */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, + ((dev_addr << 8) | 1)); + + /* Need to read from 1.8000 to clear it */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + + /* Set the read command byte count */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, + ((byte_cnt < 2) ? 2 : byte_cnt)); + + /* Set the read command address */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, addr); + /* Set the destination address */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + 0x8004, MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); + + /* Activate read command */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 0x8002); + /* Wait appropriate time for two-wire command to finish before + * polling the status register + */ + DELAY(1000 * 1); + + /* Wait up to 500us for command complete status */ + for (i = 0; i < 100; i++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) + break; + DELAY(5); + } + + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) != + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) { + PMD_DRV_LOG(DEBUG, + "Got bad status 0x%x when reading from SFP+ EEPROM", + (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); + return ELINK_STATUS_TIMEOUT; + } + + /* Read the buffer */ + for (i = 0; i < byte_cnt; i++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); + o_buf[i] = + (uint8_t) (val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); + } + + for (i = 0; i < 100; i++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) + return ELINK_STATUS_OK; + DELAY(1000 * 1); + } + + return ELINK_STATUS_ERROR; +} + +static elink_status_t elink_read_sfp_module_eeprom(struct elink_phy *phy, + struct elink_params *params, + uint8_t dev_addr, + uint16_t addr, + uint16_t byte_cnt, + uint8_t * o_buf) +{ + elink_status_t rc = 0; + uint8_t xfer_size; + uint8_t *user_data = o_buf; + read_sfp_module_eeprom_func_p read_func; + + if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) { + PMD_DRV_LOG(DEBUG, "invalid dev_addr 0x%x", dev_addr); + return ELINK_STATUS_ERROR; + } + + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726: + read_func = elink_8726_read_sfp_module_eeprom; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722: + read_func = elink_8727_read_sfp_module_eeprom; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + read_func = elink_warpcore_read_sfp_module_eeprom; + break; + default: + return ELINK_OP_NOT_SUPPORTED; + } + + while (!rc && (byte_cnt > 0)) { + xfer_size = (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) ? + ELINK_SFP_EEPROM_PAGE_SIZE : byte_cnt; + rc = read_func(phy, params, dev_addr, addr, xfer_size, + user_data, 0); + byte_cnt -= xfer_size; + user_data += xfer_size; + addr += xfer_size; + } + return rc; +} + +static elink_status_t elink_get_edc_mode(struct elink_phy *phy, + struct elink_params *params, + uint16_t * edc_mode) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t sync_offset = 0, phy_idx, media_types; + uint8_t gport, val[2], check_limiting_mode = 0; + *edc_mode = ELINK_EDC_MODE_LIMITING; + phy->media_type = ELINK_ETH_PHY_UNSPECIFIED; + /* First check for copper cable */ + if (elink_read_sfp_module_eeprom(phy, + params, + ELINK_I2C_DEV_ADDR_A0, + ELINK_SFP_EEPROM_CON_TYPE_ADDR, + 2, (uint8_t *) val) != 0) { + PMD_DRV_LOG(DEBUG, "Failed to read from SFP+ module EEPROM"); + return ELINK_STATUS_ERROR; + } + + switch (val[0]) { + case ELINK_SFP_EEPROM_CON_TYPE_VAL_COPPER: + { + uint8_t copper_module_type; + phy->media_type = ELINK_ETH_PHY_DA_TWINAX; + /* Check if its active cable (includes SFP+ module) + * of passive cable + */ + if (elink_read_sfp_module_eeprom(phy, + params, + ELINK_I2C_DEV_ADDR_A0, + ELINK_SFP_EEPROM_FC_TX_TECH_ADDR, + 1, + &copper_module_type) != + 0) { + PMD_DRV_LOG(DEBUG, + "Failed to read copper-cable-type" + " from SFP+ EEPROM"); + return ELINK_STATUS_ERROR; + } + + if (copper_module_type & + ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { + PMD_DRV_LOG(DEBUG, + "Active Copper cable detected"); + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + *edc_mode = ELINK_EDC_MODE_ACTIVE_DAC; + else + check_limiting_mode = 1; + } else if (copper_module_type & + ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) + { + PMD_DRV_LOG(DEBUG, + "Passive Copper cable detected"); + *edc_mode = ELINK_EDC_MODE_PASSIVE_DAC; + } else { + PMD_DRV_LOG(DEBUG, + "Unknown copper-cable-type 0x%x !!!", + copper_module_type); + return ELINK_STATUS_ERROR; + } + break; + } + case ELINK_SFP_EEPROM_CON_TYPE_VAL_LC: + case ELINK_SFP_EEPROM_CON_TYPE_VAL_RJ45: + check_limiting_mode = 1; + if ((val[1] & (ELINK_SFP_EEPROM_COMP_CODE_SR_MASK | + ELINK_SFP_EEPROM_COMP_CODE_LR_MASK | + ELINK_SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { + PMD_DRV_LOG(DEBUG, "1G SFP module detected"); + gport = params->port; + phy->media_type = ELINK_ETH_PHY_SFP_1G_FIBER; + if (phy->req_line_speed != ELINK_SPEED_1000) { + phy->req_line_speed = ELINK_SPEED_1000; + if (!CHIP_IS_E1x(sc)) { + gport = SC_PATH(sc) + + (params->port << 1); + } + elink_cb_event_log(sc, ELINK_LOG_ID_NON_10G_MODULE, gport); //"Warning: Link speed was forced to 1000Mbps." + // " Current SFP module in port %d is not" + // " compliant with 10G Ethernet", + + } + } else { + int idx, cfg_idx = 0; + PMD_DRV_LOG(DEBUG, "10G Optic module detected"); + for (idx = ELINK_INT_PHY; idx < ELINK_MAX_PHYS; idx++) { + if (params->phy[idx].type == phy->type) { + cfg_idx = ELINK_LINK_CONFIG_IDX(idx); + break; + } + } + phy->media_type = ELINK_ETH_PHY_SFPP_10G_FIBER; + phy->req_line_speed = params->req_line_speed[cfg_idx]; + } + break; + default: + PMD_DRV_LOG(DEBUG, "Unable to determine module type 0x%x !!!", + val[0]); + return ELINK_STATUS_ERROR; + } + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].media_type); + media_types = REG_RD(sc, sync_offset); + /* Update media type for non-PMF sync */ + for (phy_idx = ELINK_INT_PHY; phy_idx < ELINK_MAX_PHYS; phy_idx++) { + if (&(params->phy[phy_idx]) == phy) { + media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * + phy_idx)); + media_types |= + ((phy-> + media_type & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); + break; + } + } + REG_WR(sc, sync_offset, media_types); + if (check_limiting_mode) { + uint8_t options[ELINK_SFP_EEPROM_OPTIONS_SIZE]; + if (elink_read_sfp_module_eeprom(phy, + params, + ELINK_I2C_DEV_ADDR_A0, + ELINK_SFP_EEPROM_OPTIONS_ADDR, + ELINK_SFP_EEPROM_OPTIONS_SIZE, + options) != 0) { + PMD_DRV_LOG(DEBUG, + "Failed to read Option field from module EEPROM"); + return ELINK_STATUS_ERROR; + } + if ((options[0] & ELINK_SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK)) + *edc_mode = ELINK_EDC_MODE_LINEAR; + else + *edc_mode = ELINK_EDC_MODE_LIMITING; + } + PMD_DRV_LOG(DEBUG, "EDC mode is set to 0x%x", *edc_mode); + return ELINK_STATUS_OK; +} + +/* This function read the relevant field from the module (SFP+), and verify it + * is compliant with this board + */ +static elink_status_t elink_verify_sfp_module(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t val, cmd; + uint32_t fw_resp, fw_cmd_param; + char vendor_name[ELINK_SFP_EEPROM_VENDOR_NAME_SIZE + 1]; + char vendor_pn[ELINK_SFP_EEPROM_PART_NO_SIZE + 1]; + phy->flags &= ~ELINK_FLAGS_SFP_NOT_APPROVED; + val = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_feature_config[params->port]. + config)); + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) { + PMD_DRV_LOG(DEBUG, "NOT enforcing module verification"); + return ELINK_STATUS_OK; + } + + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) { + /* Use specific phy request */ + cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL; + } else if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) { + /* Use first phy request only in case of non-dual media */ + if (ELINK_DUAL_MEDIA(params)) { + PMD_DRV_LOG(DEBUG, + "FW does not support OPT MDL verification"); + return ELINK_STATUS_ERROR; + } + cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL; + } else { + /* No support in OPT MDL detection */ + PMD_DRV_LOG(DEBUG, "FW does not support OPT MDL verification"); + return ELINK_STATUS_ERROR; + } + + fw_cmd_param = ELINK_FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl); + fw_resp = elink_cb_fw_command(sc, cmd, fw_cmd_param); + if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) { + PMD_DRV_LOG(DEBUG, "Approved module"); + return ELINK_STATUS_OK; + } + + /* Format the warning message */ + if (elink_read_sfp_module_eeprom(phy, + params, + ELINK_I2C_DEV_ADDR_A0, + ELINK_SFP_EEPROM_VENDOR_NAME_ADDR, + ELINK_SFP_EEPROM_VENDOR_NAME_SIZE, + (uint8_t *) vendor_name)) + vendor_name[0] = '\0'; + else + vendor_name[ELINK_SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; + if (elink_read_sfp_module_eeprom(phy, + params, + ELINK_I2C_DEV_ADDR_A0, + ELINK_SFP_EEPROM_PART_NO_ADDR, + ELINK_SFP_EEPROM_PART_NO_SIZE, + (uint8_t *) vendor_pn)) + vendor_pn[0] = '\0'; + else + vendor_pn[ELINK_SFP_EEPROM_PART_NO_SIZE] = '\0'; + + elink_cb_event_log(sc, ELINK_LOG_ID_UNQUAL_IO_MODULE, params->port, vendor_name, vendor_pn); // "Warning: Unqualified SFP+ module detected," + // " Port %d from %s part number %s", + + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG) + phy->flags |= ELINK_FLAGS_SFP_NOT_APPROVED; + return ELINK_STATUS_ERROR; +} + +static elink_status_t elink_wait_for_sfp_module_initialized(struct elink_phy + *phy, + struct elink_params + *params) +{ + uint8_t val; + elink_status_t rc; + uint16_t timeout; + /* Initialization time after hot-plug may take up to 300ms for + * some phys type ( e.g. JDSU ) + */ + + for (timeout = 0; timeout < 60; timeout++) { + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + rc = elink_warpcore_read_sfp_module_eeprom(phy, params, + ELINK_I2C_DEV_ADDR_A0, + 1, 1, &val, + 1); + else + rc = elink_read_sfp_module_eeprom(phy, params, + ELINK_I2C_DEV_ADDR_A0, + 1, 1, &val); + if (rc == 0) { + PMD_DRV_LOG(DEBUG, + "SFP+ module initialization took %d ms", + timeout * 5); + return ELINK_STATUS_OK; + } + DELAY(1000 * 5); + } + rc = elink_read_sfp_module_eeprom(phy, params, ELINK_I2C_DEV_ADDR_A0, + 1, 1, &val); + return rc; +} + +static void elink_8727_power_module(struct bnx2x_softc *sc, + struct elink_phy *phy, uint8_t is_power_up) +{ + /* Make sure GPIOs are not using for LED mode */ + uint16_t val; + /* In the GPIO register, bit 4 is use to determine if the GPIOs are + * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for + * output + * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0 + * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1 + * where the 1st bit is the over-current(only input), and 2nd bit is + * for power( only output ) + * + * In case of NOC feature is disabled and power is up, set GPIO control + * as input to enable listening of over-current indication + */ + if (phy->flags & ELINK_FLAGS_NOC) + return; + if (is_power_up) + val = (1 << 4); + else + /* Set GPIO control to OUTPUT, and set the power bit + * to according to the is_power_up + */ + val = (1 << 1); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, val); +} + +static elink_status_t elink_8726_set_limiting_mode(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint16_t edc_mode) +{ + uint16_t cur_limiting_mode; + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, &cur_limiting_mode); + PMD_DRV_LOG(DEBUG, "Current Limiting mode is 0x%x", cur_limiting_mode); + + if (edc_mode == ELINK_EDC_MODE_LIMITING) { + PMD_DRV_LOG(DEBUG, "Setting LIMITING MODE"); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + ELINK_EDC_MODE_LIMITING); + } else { /* LRM mode ( default ) */ + + PMD_DRV_LOG(DEBUG, "Setting LRM MODE"); + + /* Changing to LRM mode takes quite few seconds. So do it only + * if current mode is limiting (default is LRM) + */ + if (cur_limiting_mode != ELINK_EDC_MODE_LIMITING) + return ELINK_STATUS_OK; + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_LRM_MODE, 0); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER2, 0x128); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL0, 0x4008); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_LRM_MODE, 0xaaaa); + } + return ELINK_STATUS_OK; +} + +static elink_status_t elink_8727_set_limiting_mode(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint16_t edc_mode) +{ + uint16_t phy_identifier; + uint16_t rom_ver2_val; + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, &phy_identifier); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + (phy_identifier & ~(1 << 9))); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER2, &rom_ver2_val); + /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + (phy_identifier | (1 << 9))); + + return ELINK_STATUS_OK; +} + +static void elink_8727_specific_func(struct elink_phy *phy, + struct elink_params *params, + uint32_t action) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val; + switch (action) { + case ELINK_DISABLE_TX: + elink_sfp_set_transmitter(params, phy, 0); + break; + case ELINK_ENABLE_TX: + if (!(phy->flags & ELINK_FLAGS_SFP_NOT_APPROVED)) + elink_sfp_set_transmitter(params, phy, 1); + break; + case ELINK_PHY_INIT: + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + (1 << 2) | (1 << 5)); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, 0); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006); + /* Make MOD_ABS give interrupt on change */ + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_OPT_CTRL, &val); + val |= (1 << 12); + if (phy->flags & ELINK_FLAGS_NOC) + val |= (3 << 5); + /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 + * status which reflect SFP+ module over-current + */ + if (!(phy->flags & ELINK_FLAGS_NOC)) + val &= 0xff8f; /* Reset bits 4-6 */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, + val); + break; + default: + PMD_DRV_LOG(DEBUG, "Function 0x%x not supported by 8727", + action); + return; + } +} + +static void elink_set_e1e2_module_fault_led(struct elink_params *params, + uint8_t gpio_mode) +{ + struct bnx2x_softc *sc = params->sc; + + uint32_t fault_led_gpio = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info. + port_hw_config[params->port]. + sfp_ctrl)) & + PORT_HW_CFG_FAULT_MODULE_LED_MASK; + switch (fault_led_gpio) { + case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED: + return; + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3: + { + uint8_t gpio_port = elink_get_gpio_port(params); + uint16_t gpio_pin = fault_led_gpio - + PORT_HW_CFG_FAULT_MODULE_LED_GPIO0; + PMD_DRV_LOG(DEBUG, "Set fault module-detected led " + "pin %x port %x mode %x", + gpio_pin, gpio_port, gpio_mode); + elink_cb_gpio_write(sc, gpio_pin, gpio_mode, gpio_port); + } + break; + default: + PMD_DRV_LOG(DEBUG, "Error: Invalid fault led mode 0x%x", + fault_led_gpio); + } +} + +static void elink_set_e3_module_fault_led(struct elink_params *params, + uint8_t gpio_mode) +{ + uint32_t pin_cfg; + uint8_t port = params->port; + struct bnx2x_softc *sc = params->sc; + pin_cfg = (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >> + PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT; + PMD_DRV_LOG(DEBUG, "Setting Fault LED to %d using pin cfg %d", + gpio_mode, pin_cfg); + elink_set_cfg_pin(sc, pin_cfg, gpio_mode); +} + +static void elink_set_sfp_module_fault_led(struct elink_params *params, + uint8_t gpio_mode) +{ + struct bnx2x_softc *sc = params->sc; + PMD_DRV_LOG(DEBUG, "Setting SFP+ module fault LED to %d", gpio_mode); + if (CHIP_IS_E3(sc)) { + /* Low ==> if SFP+ module is supported otherwise + * High ==> if SFP+ module is not on the approved vendor list + */ + elink_set_e3_module_fault_led(params, gpio_mode); + } else + elink_set_e1e2_module_fault_led(params, gpio_mode); +} + +static void elink_warpcore_hw_reset(__rte_unused struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + elink_warpcore_power_module(params, 0); + /* Put Warpcore in low power mode */ + REG_WR(sc, MISC_REG_WC0_RESET, 0x0c0e); + + /* Put LCPLL in low power mode */ + REG_WR(sc, MISC_REG_LCPLL_E40_PWRDWN, 1); + REG_WR(sc, MISC_REG_LCPLL_E40_RESETB_ANA, 0); + REG_WR(sc, MISC_REG_LCPLL_E40_RESETB_DIG, 0); +} + +static void elink_power_sfp_module(struct elink_params *params, + struct elink_phy *phy, uint8_t power) +{ + PMD_DRV_LOG(DEBUG, "Setting SFP+ power to %x", power); + + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722: + elink_8727_power_module(params->sc, phy, power); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + elink_warpcore_power_module(params, power); + break; + default: + break; + } +} + +static void elink_warpcore_set_limiting_mode(struct elink_params *params, + struct elink_phy *phy, + uint16_t edc_mode) +{ + uint16_t val = 0; + uint16_t mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; + struct bnx2x_softc *sc = params->sc; + + uint8_t lane = elink_get_warpcore_lane(params); + /* This is a global register which controls all lanes */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); + val &= ~(0xf << (lane << 2)); + + switch (edc_mode) { + case ELINK_EDC_MODE_LINEAR: + case ELINK_EDC_MODE_LIMITING: + mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; + break; + case ELINK_EDC_MODE_PASSIVE_DAC: + case ELINK_EDC_MODE_ACTIVE_DAC: + mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; + break; + default: + break; + } + + val |= (mode << (lane << 2)); + elink_cl45_write(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val); + /* A must read */ + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); + + /* Restart microcode to re-read the new mode */ + elink_warpcore_reset_lane(sc, phy, 1); + elink_warpcore_reset_lane(sc, phy, 0); + +} + +static void elink_set_limiting_mode(struct elink_params *params, + struct elink_phy *phy, uint16_t edc_mode) +{ + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726: + elink_8726_set_limiting_mode(params->sc, phy, edc_mode); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722: + elink_8727_set_limiting_mode(params->sc, phy, edc_mode); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + elink_warpcore_set_limiting_mode(params, phy, edc_mode); + break; + } +} + +static elink_status_t elink_sfp_module_detection(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t edc_mode; + elink_status_t rc = ELINK_STATUS_OK; + + uint32_t val = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_feature_config[params-> + port]. + config)); + /* Enabled transmitter by default */ + elink_sfp_set_transmitter(params, phy, 1); + PMD_DRV_LOG(DEBUG, "SFP+ module plugged in/out detected on port %d", + params->port); + /* Power up module */ + elink_power_sfp_module(params, phy, 1); + if (elink_get_edc_mode(phy, params, &edc_mode) != 0) { + PMD_DRV_LOG(DEBUG, "Failed to get valid module type"); + return ELINK_STATUS_ERROR; + } else if (elink_verify_sfp_module(phy, params) != 0) { + /* Check SFP+ module compatibility */ + PMD_DRV_LOG(DEBUG, "Module verification failed!!"); + rc = ELINK_STATUS_ERROR; + /* Turn on fault module-detected led */ + elink_set_sfp_module_fault_led(params, + MISC_REGISTERS_GPIO_HIGH); + + /* Check if need to power down the SFP+ module */ + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) { + PMD_DRV_LOG(DEBUG, "Shutdown SFP+ module!!"); + elink_power_sfp_module(params, phy, 0); + return rc; + } + } else { + /* Turn off fault module-detected led */ + elink_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); + } + + /* Check and set limiting mode / LRM mode on 8726. On 8727 it + * is done automatically + */ + elink_set_limiting_mode(params, phy, edc_mode); + + /* Disable transmit for this module if the module is not approved, and + * laser needs to be disabled. + */ + if ((rc != 0) && + ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)) + elink_sfp_set_transmitter(params, phy, 0); + + return rc; +} + +void elink_handle_module_detect_int(struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + struct elink_phy *phy; + uint32_t gpio_val; + uint8_t gpio_num, gpio_port; + if (CHIP_IS_E3(sc)) { + phy = ¶ms->phy[ELINK_INT_PHY]; + /* Always enable TX laser,will be disabled in case of fault */ + elink_sfp_set_transmitter(params, phy, 1); + } else { + phy = ¶ms->phy[ELINK_EXT_PHY1]; + } + if (elink_get_mod_abs_int_cfg(sc, params->shmem_base, + params->port, &gpio_num, &gpio_port) == + ELINK_STATUS_ERROR) { + PMD_DRV_LOG(DEBUG, "Failed to get MOD_ABS interrupt config"); + return; + } + + /* Set valid module led off */ + elink_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH); + + /* Get current gpio val reflecting module plugged in / out */ + gpio_val = elink_cb_gpio_read(sc, gpio_num, gpio_port); + + /* Call the handling function in case module is detected */ + if (gpio_val == 0) { + elink_set_mdio_emac_per_phy(sc, params); + elink_set_aer_mmd(params, phy); + + elink_power_sfp_module(params, phy, 1); + elink_cb_gpio_int_write(sc, gpio_num, + MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, + gpio_port); + if (elink_wait_for_sfp_module_initialized(phy, params) == 0) { + elink_sfp_module_detection(phy, params); + if (CHIP_IS_E3(sc)) { + uint16_t rx_tx_in_reset; + /* In case WC is out of reset, reconfigure the + * link speed while taking into account 1G + * module limitation. + */ + elink_cl45_read(sc, phy, + MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, + &rx_tx_in_reset); + if ((!rx_tx_in_reset) && + (params->link_flags & + ELINK_PHY_INITIALIZED)) { + elink_warpcore_reset_lane(sc, phy, 1); + elink_warpcore_config_sfi(phy, params); + elink_warpcore_reset_lane(sc, phy, 0); + } + } + } else { + PMD_DRV_LOG(DEBUG, "SFP+ module is not initialized"); + } + } else { + elink_cb_gpio_int_write(sc, gpio_num, + MISC_REGISTERS_GPIO_INT_OUTPUT_SET, + gpio_port); + /* Module was plugged out. + * Disable transmit for this module + */ + phy->media_type = ELINK_ETH_PHY_NOT_PRESENT; + } +} + +/******************************************************************/ +/* Used by 8706 and 8727 */ +/******************************************************************/ +static void elink_sfp_mask_fault(struct bnx2x_softc *sc, + struct elink_phy *phy, + uint16_t alarm_status_offset, + uint16_t alarm_ctrl_offset) +{ + uint16_t alarm_status, val; + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, alarm_status_offset, &alarm_status); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, alarm_status_offset, &alarm_status); + /* Mask or enable the fault event. */ + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val); + if (alarm_status & (1 << 0)) + val &= ~(1 << 0); + else + val |= (1 << 0); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val); +} + +/******************************************************************/ +/* common BNX2X8706/BNX2X8726 PHY SECTION */ +/******************************************************************/ +static uint8_t elink_8706_8726_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + uint8_t link_up = 0; + uint16_t val1, val2, rx_sd, pcs_status; + struct bnx2x_softc *sc = params->sc; + PMD_DRV_LOG(DEBUG, "XGXS 8706/8726"); + /* Clear RX Alarm */ + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); + + elink_sfp_mask_fault(sc, phy, MDIO_PMA_LASI_TXSTAT, + MDIO_PMA_LASI_TXCTRL); + + /* Clear LASI indication */ + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); + PMD_DRV_LOG(DEBUG, "8706/8726 LASI status 0x%x--> 0x%x", val1, val2); + + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd); + elink_cl45_read(sc, phy, + MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2); + + PMD_DRV_LOG(DEBUG, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" + " link_status 0x%x", rx_sd, pcs_status, val2); + /* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status + * are set, or if the autoneg bit 1 is set + */ + link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1 << 1))); + if (link_up) { + if (val2 & (1 << 1)) + vars->line_speed = ELINK_SPEED_1000; + else + vars->line_speed = ELINK_SPEED_10000; + elink_ext_phy_resolve_fc(phy, params, vars); + vars->duplex = DUPLEX_FULL; + } + + /* Capture 10G link fault. Read twice to clear stale value. */ + if (vars->line_speed == ELINK_SPEED_10000) { + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + if (val1 & (1 << 0)) + vars->fault_detected = 1; + } + + return link_up; +} + +/******************************************************************/ +/* BNX2X8706 PHY SECTION */ +/******************************************************************/ +static uint8_t elink_8706_config_init(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars *vars) +{ + uint32_t tx_en_mode; + uint16_t cnt, val, tmp1; + struct bnx2x_softc *sc = params->sc; + + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + /* HW reset */ + elink_ext_phy_hw_reset(sc, params->port); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); + elink_wait_reset_complete(sc, phy, params); + + /* Wait until fw is loaded */ + for (cnt = 0; cnt < 100; cnt++) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val); + if (val) + break; + DELAY(1000 * 10); + } + PMD_DRV_LOG(DEBUG, "XGXS 8706 is initialized after %d ms", cnt); + if ((params->feature_config_flags & + ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { + uint8_t i; + uint16_t reg; + for (i = 0; i < 4; i++) { + reg = MDIO_XS_8706_REG_BANK_RX0 + + i * (MDIO_XS_8706_REG_BANK_RX1 - + MDIO_XS_8706_REG_BANK_RX0); + elink_cl45_read(sc, phy, MDIO_XS_DEVAD, reg, &val); + /* Clear first 3 bits of the control */ + val &= ~0x7; + /* Set control bits according to configuration */ + val |= (phy->rx_preemphasis[i] & 0x7); + PMD_DRV_LOG(DEBUG, "Setting RX Equalizer to BNX2X8706" + " reg 0x%x <-- val 0x%x", reg, val); + elink_cl45_write(sc, phy, MDIO_XS_DEVAD, reg, val); + } + } + /* Force speed */ + if (phy->req_line_speed == ELINK_SPEED_10000) { + PMD_DRV_LOG(DEBUG, "XGXS 8706 force 10Gbps"); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_DIGITAL_CTRL, 0x400); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, 0); + /* Arm LASI for link and Tx fault. */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3); + } else { + /* Force 1Gbps using autoneg with 1G advertisement */ + + /* Allow CL37 through CL73 */ + PMD_DRV_LOG(DEBUG, "XGXS 8706 AutoNeg"); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c); + + /* Enable Full-Duplex advertisement on CL37 */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020); + /* Enable CL37 AN */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); + /* 1G support */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1 << 5)); + + /* Enable clause 73 AN */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 0x0400); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); + } + elink_save_bnx2x_spirom_ver(sc, phy, params->port); + + /* If TX Laser is controlled by GPIO_0, do not let PHY go into low + * power mode, if TX Laser is disabled + */ + + tx_en_mode = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port]. + sfp_ctrl)) + & PORT_HW_CFG_TX_LASER_MASK; + + if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { + PMD_DRV_LOG(DEBUG, "Enabling TXONOFF_PWRDN_DIS"); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, + &tmp1); + tmp1 |= 0x1; + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, + tmp1); + } + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_8706_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + return elink_8706_8726_read_status(phy, params, vars); +} + +/******************************************************************/ +/* BNX2X8726 PHY SECTION */ +/******************************************************************/ +static void elink_8726_config_loopback(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + PMD_DRV_LOG(DEBUG, "PMA/PMD ext_phy_loopback: 8726"); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001); +} + +static void elink_8726_external_rom_boot(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + /* Need to wait 100ms after reset */ + DELAY(1000 * 100); + + /* Micro controller re-boot */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B); + + /* Set soft reset */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0001); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); + + /* Wait for 150ms for microcode load */ + DELAY(1000 * 150); + + /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0000); + + DELAY(1000 * 200); + elink_save_bnx2x_spirom_ver(sc, phy, params->port); +} + +static uint8_t elink_8726_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val1; + uint8_t link_up = elink_8706_8726_read_status(phy, params, vars); + if (link_up) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, + &val1); + if (val1 & (1 << 15)) { + PMD_DRV_LOG(DEBUG, "Tx is disabled"); + link_up = 0; + vars->line_speed = 0; + } + } + return link_up; +} + +static elink_status_t elink_8726_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + PMD_DRV_LOG(DEBUG, "Initializing BNX2X8726"); + + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15); + elink_wait_reset_complete(sc, phy, params); + + elink_8726_external_rom_boot(phy, params); + + /* Need to call module detected on initialization since the module + * detection triggered by actual module insertion might occur before + * driver is loaded, and when driver is loaded, it reset all + * registers, including the transmitter + */ + elink_sfp_module_detection(phy, params); + + if (phy->req_line_speed == ELINK_SPEED_1000) { + PMD_DRV_LOG(DEBUG, "Setting 1G force"); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 0x400); + } else if ((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + PMD_DRV_LOG(DEBUG, "Setting 1G clause37"); + /* Set Flow control */ + elink_ext_phy_set_pause(params, phy, vars); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); + /* Enable RX-ALARM control to receive interrupt for 1G speed + * change + */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 0x400); + + } else { /* Default 10G. Set only LASI control */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1); + } + + /* Set TX PreEmphasis if needed */ + if ((params->feature_config_flags & + ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { + PMD_DRV_LOG(DEBUG, + "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x", + phy->tx_preemphasis[0], phy->tx_preemphasis[1]); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8726_TX_CTRL1, + phy->tx_preemphasis[0]); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8726_TX_CTRL2, + phy->tx_preemphasis[1]); + } + + return ELINK_STATUS_OK; + +} + +static void elink_8726_link_reset(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + PMD_DRV_LOG(DEBUG, "elink_8726_link_reset port %d", params->port); + /* Set serial boot control for external load */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001); +} + +/******************************************************************/ +/* BNX2X8727 PHY SECTION */ +/******************************************************************/ + +static void elink_8727_set_link_led(struct elink_phy *phy, + struct elink_params *params, uint8_t mode) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t led_mode_bitmask = 0; + uint16_t gpio_pins_bitmask = 0; + uint16_t val; + /* Only NOC flavor requires to set the LED specifically */ + if (!(phy->flags & ELINK_FLAGS_NOC)) + return; + switch (mode) { + case ELINK_LED_MODE_FRONT_PANEL_OFF: + case ELINK_LED_MODE_OFF: + led_mode_bitmask = 0; + gpio_pins_bitmask = 0x03; + break; + case ELINK_LED_MODE_ON: + led_mode_bitmask = 0; + gpio_pins_bitmask = 0x02; + break; + case ELINK_LED_MODE_OPER: + led_mode_bitmask = 0x60; + gpio_pins_bitmask = 0x11; + break; + } + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, &val); + val &= 0xff8f; + val |= led_mode_bitmask; + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, &val); + val &= 0xffe0; + val |= gpio_pins_bitmask; + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, val); +} + +static void elink_8727_hw_reset(__rte_unused struct elink_phy *phy, + struct elink_params *params) +{ + uint32_t swap_val, swap_override; + uint8_t port; + /* The PHY reset is controlled by GPIO 1. Fake the port number + * to cancel the swap done in set_gpio() + */ + struct bnx2x_softc *sc = params->sc; + swap_val = REG_RD(sc, NIG_REG_PORT_SWAP); + swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE); + port = (swap_val && swap_override) ^ 1; + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); +} + +static void elink_8727_config_speed(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t tmp1, val; + /* Set option 1G speed */ + if ((phy->req_line_speed == ELINK_SPEED_1000) || + (phy->media_type == ELINK_ETH_PHY_SFP_1G_FIBER)) { + PMD_DRV_LOG(DEBUG, "Setting 1G force"); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); + PMD_DRV_LOG(DEBUG, "1.7 = 0x%x", tmp1); + /* Power down the XAUI until link is up in case of dual-media + * and 1G + */ + if (ELINK_DUAL_MEDIA(params)) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, &val); + val |= (3 << 10); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, val); + } + } else if ((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + + PMD_DRV_LOG(DEBUG, "Setting 1G clause37"); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); + } else { + /* Since the 8727 has only single reset pin, need to set the 10G + * registers although it is default + */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, + 0x0020); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, + 0x0008); + } +} + +static elink_status_t elink_8727_config_init(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars + *vars) +{ + uint32_t tx_en_mode; + uint16_t tmp1, mod_abs, tmp2; + struct bnx2x_softc *sc = params->sc; + /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ + + elink_wait_reset_complete(sc, phy, params); + + PMD_DRV_LOG(DEBUG, "Initializing BNX2X8727"); + + elink_8727_specific_func(phy, params, ELINK_PHY_INIT); + /* Initially configure MOD_ABS to interrupt when module is + * presence( bit 8) + */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); + /* Set EDC off by setting OPTXLOS signal input to low (bit 9). + * When the EDC is off it locks onto a reference clock and avoids + * becoming 'lost' + */ + mod_abs &= ~(1 << 8); + if (!(phy->flags & ELINK_FLAGS_NOC)) + mod_abs &= ~(1 << 9); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + + /* Enable/Disable PHY transmitter output */ + elink_set_disable_pmd_transmit(params, phy, 0); + + elink_8727_power_module(sc, phy, 1); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); + + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); + + elink_8727_config_speed(phy, params); + + /* Set TX PreEmphasis if needed */ + if ((params->feature_config_flags & + ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { + PMD_DRV_LOG(DEBUG, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x", + phy->tx_preemphasis[0], phy->tx_preemphasis[1]); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1, + phy->tx_preemphasis[0]); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2, + phy->tx_preemphasis[1]); + } + + /* If TX Laser is controlled by GPIO_0, do not let PHY go into low + * power mode, if TX Laser is disabled + */ + tx_en_mode = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port]. + sfp_ctrl)) + & PORT_HW_CFG_TX_LASER_MASK; + + if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { + + PMD_DRV_LOG(DEBUG, "Enabling TXONOFF_PWRDN_DIS"); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, + &tmp2); + tmp2 |= 0x1000; + tmp2 &= 0xFFEF; + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, + tmp2); + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, &tmp2); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, (tmp2 & 0x7fff)); + } + + return ELINK_STATUS_OK; +} + +static void elink_8727_handle_mod_abs(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t mod_abs, rx_alarm_status; + uint32_t val = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_feature_config[params-> + port].config)); + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, + &mod_abs); + if (mod_abs & (1 << 8)) { + + /* Module is absent */ + PMD_DRV_LOG(DEBUG, "MOD_ABS indication show module is absent"); + phy->media_type = ELINK_ETH_PHY_NOT_PRESENT; + /* 1. Set mod_abs to detect next module + * presence event + * 2. Set EDC off by setting OPTXLOS signal input to low + * (bit 9). + * When the EDC is off it locks onto a reference clock and + * avoids becoming 'lost'. + */ + mod_abs &= ~(1 << 8); + if (!(phy->flags & ELINK_FLAGS_NOC)) + mod_abs &= ~(1 << 9); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + + /* Clear RX alarm since it stays up as long as + * the mod_abs wasn't changed + */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); + + } else { + /* Module is present */ + PMD_DRV_LOG(DEBUG, "MOD_ABS indication show module is present"); + /* First disable transmitter, and if the module is ok, the + * module_detection will enable it + * 1. Set mod_abs to detect next module absent event ( bit 8) + * 2. Restore the default polarity of the OPRXLOS signal and + * this signal will then correctly indicate the presence or + * absence of the Rx signal. (bit 9) + */ + mod_abs |= (1 << 8); + if (!(phy->flags & ELINK_FLAGS_NOC)) + mod_abs |= (1 << 9); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + + /* Clear RX alarm since it stays up as long as the mod_abs + * wasn't changed. This is need to be done before calling the + * module detection, otherwise it will clear* the link update + * alarm + */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); + + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) + elink_sfp_set_transmitter(params, phy, 0); + + if (elink_wait_for_sfp_module_initialized(phy, params) == 0) { + elink_sfp_module_detection(phy, params); + } else { + PMD_DRV_LOG(DEBUG, "SFP+ module is not initialized"); + } + + /* Reconfigure link speed based on module type limitations */ + elink_8727_config_speed(phy, params); + } + + PMD_DRV_LOG(DEBUG, "8727 RX_ALARM_STATUS 0x%x", rx_alarm_status); + /* No need to check link status in case of module plugged in/out */ +} + +static uint8_t elink_8727_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t link_up = 0, oc_port = params->port; + uint16_t link_status = 0; + uint16_t rx_alarm_status, lasi_ctrl, val1; + + /* If PHY is not initialized, do not check link status */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, &lasi_ctrl); + if (!lasi_ctrl) + return 0; + + /* Check the LASI on Rx */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); + vars->line_speed = 0; + PMD_DRV_LOG(DEBUG, "8727 RX_ALARM_STATUS 0x%x", rx_alarm_status); + + elink_sfp_mask_fault(sc, phy, MDIO_PMA_LASI_TXSTAT, + MDIO_PMA_LASI_TXCTRL); + + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + + PMD_DRV_LOG(DEBUG, "8727 LASI status 0x%x", val1); + + /* Clear MSG-OUT */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); + + /* If a module is present and there is need to check + * for over current + */ + if (!(phy->flags & ELINK_FLAGS_NOC) && !(rx_alarm_status & (1 << 5))) { + /* Check over-current using 8727 GPIO0 input */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, + &val1); + + if ((val1 & (1 << 8)) == 0) { + if (!CHIP_IS_E1x(sc)) + oc_port = SC_PATH(sc) + (params->port << 1); + PMD_DRV_LOG(DEBUG, + "8727 Power fault has been detected on port %d", + oc_port); + elink_cb_event_log(sc, ELINK_LOG_ID_OVER_CURRENT, oc_port); //"Error: Power fault on Port %d has " + // "been detected and the power to " + // "that SFP+ module has been removed " + // "to prevent failure of the card. " + // "Please remove the SFP+ module and " + // "restart the system to clear this " + // "error.", + /* Disable all RX_ALARMs except for mod_abs */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXCTRL, (1 << 5)); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, &val1); + /* Wait for module_absent_event */ + val1 |= (1 << 8); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, val1); + /* Clear RX alarm */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); + elink_8727_power_module(params->sc, phy, 0); + return 0; + } + } + + /* Over current check */ + /* When module absent bit is set, check module */ + if (rx_alarm_status & (1 << 5)) { + elink_8727_handle_mod_abs(phy, params); + /* Enable all mod_abs and link detection bits */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + ((1 << 5) | (1 << 2))); + } + + if (!(phy->flags & ELINK_FLAGS_SFP_NOT_APPROVED)) { + PMD_DRV_LOG(DEBUG, "Enabling 8727 TX laser"); + elink_sfp_set_transmitter(params, phy, 1); + } else { + PMD_DRV_LOG(DEBUG, "Tx is disabled"); + return 0; + } + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); + + /* Bits 0..2 --> speed detected, + * Bits 13..15--> link is down + */ + if ((link_status & (1 << 2)) && (!(link_status & (1 << 15)))) { + link_up = 1; + vars->line_speed = ELINK_SPEED_10000; + PMD_DRV_LOG(DEBUG, "port %x: External link up in 10G", + params->port); + } else if ((link_status & (1 << 0)) && (!(link_status & (1 << 13)))) { + link_up = 1; + vars->line_speed = ELINK_SPEED_1000; + PMD_DRV_LOG(DEBUG, "port %x: External link up in 1G", + params->port); + } else { + link_up = 0; + PMD_DRV_LOG(DEBUG, "port %x: External link is down", + params->port); + } + + /* Capture 10G link fault. */ + if (vars->line_speed == ELINK_SPEED_10000) { + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + + if (val1 & (1 << 0)) { + vars->fault_detected = 1; + } + } + + if (link_up) { + elink_ext_phy_resolve_fc(phy, params, vars); + vars->duplex = DUPLEX_FULL; + PMD_DRV_LOG(DEBUG, "duplex = 0x%x", vars->duplex); + } + + if ((ELINK_DUAL_MEDIA(params)) && + (phy->req_line_speed == ELINK_SPEED_1000)) { + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, &val1); + /* In case of dual-media board and 1G, power up the XAUI side, + * otherwise power it down. For 10G it is done automatically + */ + if (link_up) + val1 &= ~(3 << 10); + else + val1 |= (3 << 10); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, val1); + } + return link_up; +} + +static void elink_8727_link_reset(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + + /* Enable/Disable PHY transmitter output */ + elink_set_disable_pmd_transmit(params, phy, 1); + + /* Disable Transmitter */ + elink_sfp_set_transmitter(params, phy, 0); + /* Clear LASI */ + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0); + +} + +/******************************************************************/ +/* BNX2X8481/BNX2X84823/BNX2X84833 PHY SECTION */ +/******************************************************************/ +static void elink_save_848xx_spirom_version(struct elink_phy *phy, + struct bnx2x_softc *sc, uint8_t port) +{ + uint16_t val, fw_ver2, cnt, i; + static struct elink_reg_set reg_set[] = { + {MDIO_PMA_DEVAD, 0xA819, 0x0014}, + {MDIO_PMA_DEVAD, 0xA81A, 0xc200}, + {MDIO_PMA_DEVAD, 0xA81B, 0x0000}, + {MDIO_PMA_DEVAD, 0xA81C, 0x0300}, + {MDIO_PMA_DEVAD, 0xA817, 0x0009} + }; + uint16_t fw_ver1; + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) { + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); + elink_save_spirom_version(sc, port, fw_ver1 & 0xfff, + phy->ver_addr); + } else { + /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ + /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + elink_cl45_write(sc, phy, reg_set[i].devad, + reg_set[i].reg, reg_set[i].val); + + for (cnt = 0; cnt < 100; cnt++) { + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA818, &val); + if (val & 1) + break; + DELAY(5); + } + if (cnt == 100) { + PMD_DRV_LOG(DEBUG, "Unable to read 848xx " + "phy fw version(1)"); + elink_save_spirom_version(sc, port, 0, phy->ver_addr); + return; + } + + /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); + for (cnt = 0; cnt < 100; cnt++) { + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA818, &val); + if (val & 1) + break; + DELAY(5); + } + if (cnt == 100) { + PMD_DRV_LOG(DEBUG, "Unable to read 848xx phy fw " + "version(2)"); + elink_save_spirom_version(sc, port, 0, phy->ver_addr); + return; + } + + /* lower 16 bits of the register SPI_FW_STATUS */ + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); + /* upper 16 bits of register SPI_FW_STATUS */ + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); + + elink_save_spirom_version(sc, port, (fw_ver2 << 16) | fw_ver1, + phy->ver_addr); + } + +} + +static void elink_848xx_set_led(struct bnx2x_softc *sc, struct elink_phy *phy) +{ + uint16_t val, offset, i; + static struct elink_reg_set reg_set[] = { + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, + MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ}, + {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD} + }; + /* PHYC_CTL_LED_CTL */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LINK_SIGNAL, &val); + val &= 0xFE00; + val |= 0x0092; + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LINK_SIGNAL, val); + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) + offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1; + else + offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; + + /* stretch_en for LED3 */ + elink_cl45_read_or_write(sc, phy, + MDIO_PMA_DEVAD, offset, + MDIO_PMA_REG_84823_LED3_STRETCH_EN); +} + +static void elink_848xx_specific_func(struct elink_phy *phy, + struct elink_params *params, + uint32_t action) +{ + struct bnx2x_softc *sc = params->sc; + switch (action) { + case ELINK_PHY_INIT: + if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) && + (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) { + /* Save spirom version */ + elink_save_848xx_spirom_version(phy, sc, params->port); + } + /* This phy uses the NIG latch mechanism since link indication + * arrives through its LED4 and not via its LASI signal, so we + * get steady signal instead of clear on read + */ + elink_bits_en(sc, NIG_REG_LATCH_BC_0 + params->port * 4, + 1 << ELINK_NIG_LATCH_BC_ENABLE_MI_INT); + + elink_848xx_set_led(sc, phy); + break; + } +} + +static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t autoneg_val, an_1000_val, an_10_100_val; + + elink_848xx_specific_func(phy, params, ELINK_PHY_INIT); + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000); + + /* set 1000 speed advertisement */ + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, + &an_1000_val); + + elink_ext_phy_set_pause(params, phy, vars); + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_AN_ADV, &an_10_100_val); + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL, + &autoneg_val); + /* Disable forced speed */ + autoneg_val &= + ~((1 << 6) | (1 << 8) | (1 << 9) | (1 << 12) | (1 << 13)); + an_10_100_val &= ~((1 << 5) | (1 << 6) | (1 << 7) | (1 << 8)); + + if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->req_line_speed == ELINK_SPEED_1000)) { + an_1000_val |= (1 << 8); + autoneg_val |= (1 << 9 | 1 << 12); + if (phy->req_duplex == DUPLEX_FULL) + an_1000_val |= (1 << 9); + PMD_DRV_LOG(DEBUG, "Advertising 1G"); + } else + an_1000_val &= ~((1 << 8) | (1 << 9)); + + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, + an_1000_val); + + /* Set 10/100 speed advertisement */ + if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) { + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { + /* Enable autoneg and restart autoneg for legacy speeds + */ + autoneg_val |= (1 << 9 | 1 << 12); + an_10_100_val |= (1 << 8); + PMD_DRV_LOG(DEBUG, "Advertising 100M-FD"); + } + + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { + /* Enable autoneg and restart autoneg for legacy speeds + */ + autoneg_val |= (1 << 9 | 1 << 12); + an_10_100_val |= (1 << 7); + PMD_DRV_LOG(DEBUG, "Advertising 100M-HD"); + } + + if ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && + (phy->supported & ELINK_SUPPORTED_10baseT_Full)) { + an_10_100_val |= (1 << 6); + autoneg_val |= (1 << 9 | 1 << 12); + PMD_DRV_LOG(DEBUG, "Advertising 10M-FD"); + } + + if ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) && + (phy->supported & ELINK_SUPPORTED_10baseT_Half)) { + an_10_100_val |= (1 << 5); + autoneg_val |= (1 << 9 | 1 << 12); + PMD_DRV_LOG(DEBUG, "Advertising 10M-HD"); + } + } + + /* Only 10/100 are allowed to work in FORCE mode */ + if ((phy->req_line_speed == ELINK_SPEED_100) && + (phy->supported & + (ELINK_SUPPORTED_100baseT_Half | ELINK_SUPPORTED_100baseT_Full))) { + autoneg_val |= (1 << 13); + /* Enabled AUTO-MDIX when autoneg is disabled */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, + (1 << 15 | 1 << 9 | 7 << 0)); + /* The PHY needs this set even for forced link. */ + an_10_100_val |= (1 << 8) | (1 << 7); + PMD_DRV_LOG(DEBUG, "Setting 100M force"); + } + if ((phy->req_line_speed == ELINK_SPEED_10) && + (phy->supported & + (ELINK_SUPPORTED_10baseT_Half | ELINK_SUPPORTED_10baseT_Full))) { + /* Enabled AUTO-MDIX when autoneg is disabled */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, + (1 << 15 | 1 << 9 | 7 << 0)); + PMD_DRV_LOG(DEBUG, "Setting 10M force"); + } + + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV, + an_10_100_val); + + if (phy->req_duplex == DUPLEX_FULL) + autoneg_val |= (1 << 8); + + /* Always write this if this is not 84833/4. + * For 84833/4, write it only when it's a forced speed. + */ + if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) && + (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) || + ((autoneg_val & (1 << 12)) == 0)) + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val); + + if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || + (phy->req_line_speed == ELINK_SPEED_10000)) { + PMD_DRV_LOG(DEBUG, "Advertising 10G"); + /* Restart autoneg for 10G */ + + elink_cl45_read_or_write(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, + 0x1000); + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x3200); + } else + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, 1); + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_8481_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + /* Restore normal power mode */ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + + /* HW reset */ + elink_ext_phy_hw_reset(sc, params->port); + elink_wait_reset_complete(sc, phy, params); + + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15); + return elink_848xx_cmn_config_init(phy, params, vars); +} + +#define PHY84833_CMDHDLR_WAIT 300 +#define PHY84833_CMDHDLR_MAX_ARGS 5 +static elink_status_t elink_84833_cmd_hdlr(struct elink_phy *phy, + struct elink_params *params, + uint16_t fw_cmd, uint16_t cmd_args[], + int argc) +{ + int idx; + uint16_t val; + struct bnx2x_softc *sc = params->sc; + /* Write CMD_OPEN_OVERRIDE to STATUS reg */ + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_OPEN_OVERRIDE); + for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_STATUS, &val); + if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) + break; + DELAY(1000 * 1); + } + if (idx >= PHY84833_CMDHDLR_WAIT) { + PMD_DRV_LOG(DEBUG, "FW cmd: FW not ready."); + return ELINK_STATUS_ERROR; + } + + /* Prepare argument(s) and issue command */ + for (idx = 0; idx < argc; idx++) { + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_DATA1 + idx, + cmd_args[idx]); + } + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_COMMAND, fw_cmd); + for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_STATUS, &val); + if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || + (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) + break; + DELAY(1000 * 1); + } + if ((idx >= PHY84833_CMDHDLR_WAIT) || + (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { + PMD_DRV_LOG(DEBUG, "FW cmd failed."); + return ELINK_STATUS_ERROR; + } + /* Gather returning data */ + for (idx = 0; idx < argc; idx++) { + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_DATA1 + idx, + &cmd_args[idx]); + } + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_CLEAR_COMPLETE); + return ELINK_STATUS_OK; +} + +static elink_status_t elink_84833_pair_swap_cfg(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars + *vars) +{ + uint32_t pair_swap; + uint16_t data[PHY84833_CMDHDLR_MAX_ARGS]; + elink_status_t status; + struct bnx2x_softc *sc = params->sc; + + /* Check for configuration. */ + pair_swap = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port]. + xgbt_phy_cfg)) & + PORT_HW_CFG_RJ45_PAIR_SWAP_MASK; + + if (pair_swap == 0) + return ELINK_STATUS_OK; + + /* Only the second argument is used for this command */ + data[1] = (uint16_t) pair_swap; + + status = elink_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_PAIR_SWAP, data, + PHY84833_CMDHDLR_MAX_ARGS); + if (status == ELINK_STATUS_OK) { + PMD_DRV_LOG(DEBUG, "Pairswap OK, val=0x%x", data[1]); + } + + return status; +} + +static uint8_t elink_84833_get_reset_gpios(struct bnx2x_softc *sc, + uint32_t shmem_base_path[], + __rte_unused uint32_t chip_id) +{ + uint32_t reset_pin[2]; + uint32_t idx; + uint8_t reset_gpios; + if (CHIP_IS_E3(sc)) { + /* Assume that these will be GPIOs, not EPIOs. */ + for (idx = 0; idx < 2; idx++) { + /* Map config param to register bit. */ + reset_pin[idx] = REG_RD(sc, shmem_base_path[idx] + + offsetof(struct shmem_region, + dev_info. + port_hw_config[0]. + e3_cmn_pin_cfg)); + reset_pin[idx] = + (reset_pin[idx] & PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + reset_pin[idx] -= PIN_CFG_GPIO0_P0; + reset_pin[idx] = (1 << reset_pin[idx]); + } + reset_gpios = (uint8_t) (reset_pin[0] | reset_pin[1]); + } else { + /* E2, look from diff place of shmem. */ + for (idx = 0; idx < 2; idx++) { + reset_pin[idx] = REG_RD(sc, shmem_base_path[idx] + + offsetof(struct shmem_region, + dev_info. + port_hw_config[0]. + default_cfg)); + reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK; + reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0; + reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT; + reset_pin[idx] = (1 << reset_pin[idx]); + } + reset_gpios = (uint8_t) (reset_pin[0] | reset_pin[1]); + } + + return reset_gpios; +} + +static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t reset_gpios; + uint32_t other_shmem_base_addr = REG_RD(sc, params->shmem2_base + + offsetof(struct shmem2_region, + other_shmem_base_addr)); + + uint32_t shmem_base_path[2]; + + /* Work around for 84833 LED failure inside RESET status */ + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_MII_CTRL, + MDIO_AN_REG_8481_MII_CTRL_FORCE_1G); + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_1G_100T_EXT_CTRL, + MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF); + + shmem_base_path[0] = params->shmem_base; + shmem_base_path[1] = other_shmem_base_addr; + + reset_gpios = elink_84833_get_reset_gpios(sc, shmem_base_path, + params->chip_id); + + elink_cb_gpio_mult_write(sc, reset_gpios, + MISC_REGISTERS_GPIO_OUTPUT_LOW); + DELAY(10); + PMD_DRV_LOG(DEBUG, "84833 hw reset on pin values 0x%x", reset_gpios); + + return ELINK_STATUS_OK; +} + +static elink_status_t elink_8483x_disable_eee(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + elink_status_t rc; + uint16_t cmd_args = 0; + + PMD_DRV_LOG(DEBUG, "Don't Advertise 10GBase-T EEE"); + + /* Prevent Phy from working in EEE and advertising it */ + rc = elink_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); + if (rc != ELINK_STATUS_OK) { + PMD_DRV_LOG(DEBUG, "EEE disable failed."); + return rc; + } + + return elink_eee_disable(phy, params, vars); +} + +static elink_status_t elink_8483x_enable_eee(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + elink_status_t rc; + uint16_t cmd_args = 1; + + rc = elink_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); + if (rc != ELINK_STATUS_OK) { + PMD_DRV_LOG(DEBUG, "EEE enable failed."); + return rc; + } + + return elink_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV); +} + +#define PHY84833_CONSTANT_LATENCY 1193 +static elink_status_t elink_848x3_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port, initialize = 1; + uint16_t val; + uint32_t actual_phy_selection; + uint16_t cmd_args[PHY84833_CMDHDLR_MAX_ARGS]; + elink_status_t rc = ELINK_STATUS_OK; + + DELAY(1000 * 1); + + if (!(CHIP_IS_E1x(sc))) + port = SC_PATH(sc); + else + port = params->port; + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823) { + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_3, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); + } else { + /* MDIO reset */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x8000); + } + + elink_wait_reset_complete(sc, phy, params); + + /* Wait for GPHY to come out of reset */ + DELAY(1000 * 50); + if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) && + (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) { + /* BNX2X84823 requires that XGXS links up first @ 10G for normal + * behavior. + */ + uint16_t temp; + temp = vars->line_speed; + vars->line_speed = ELINK_SPEED_10000; + elink_set_autoneg(¶ms->phy[ELINK_INT_PHY], params, vars, 0); + elink_program_serdes(¶ms->phy[ELINK_INT_PHY], params, vars); + vars->line_speed = temp; + } + + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_MEDIA, &val); + val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | + MDIO_CTL_REG_84823_MEDIA_LINE_MASK | + MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | + MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK | + MDIO_CTL_REG_84823_MEDIA_FIBER_1G); + + if (CHIP_IS_E3(sc)) { + val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | + MDIO_CTL_REG_84823_MEDIA_LINE_MASK); + } else { + val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI | + MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L); + } + + actual_phy_selection = elink_phy_selection(params); + + switch (actual_phy_selection) { + case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: + /* Do nothing. Essentially this is like the priority copper */ + break; + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER; + break; + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: + /* Do nothing here. The first PHY won't be initialized at all */ + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: + val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN; + initialize = 0; + break; + } + if (params->phy[ELINK_EXT_PHY2].req_line_speed == ELINK_SPEED_1000) + val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; + + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_MEDIA, val); + PMD_DRV_LOG(DEBUG, "Multi_phy config = 0x%x, Media control = 0x%x", + params->multi_phy_config, val); + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) { + elink_84833_pair_swap_cfg(phy, params, vars); + + /* Keep AutogrEEEn disabled. */ + cmd_args[0] = 0x0; + cmd_args[1] = 0x0; + cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; + cmd_args[3] = PHY84833_CONSTANT_LATENCY; + rc = elink_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_EEE_MODE, cmd_args, + PHY84833_CMDHDLR_MAX_ARGS); + if (rc != ELINK_STATUS_OK) { + PMD_DRV_LOG(DEBUG, "Cfg AutogrEEEn failed."); + } + } + if (initialize) { + rc = elink_848xx_cmn_config_init(phy, params, vars); + } else { + elink_save_848xx_spirom_version(phy, sc, params->port); + } + /* 84833 PHY has a better feature and doesn't need to support this. */ + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823) { + uint32_t cms_enable = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info. + port_hw_config[params-> + port]. + default_cfg)) & + PORT_HW_CFG_ENABLE_CMS_MASK; + + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_USER_CTRL_REG, &val); + if (cms_enable) + val |= MDIO_CTL_REG_84823_USER_CTRL_CMS; + else + val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS; + elink_cl45_write(sc, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_USER_CTRL_REG, val); + } + + elink_cl45_read(sc, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_FW_REV, &val); + + /* Configure EEE support */ + if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && + (val != MDIO_84833_TOP_CFG_FW_NO_EEE) && + elink_eee_has_cap(params)) { + rc = elink_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV); + if (rc != ELINK_STATUS_OK) { + PMD_DRV_LOG(DEBUG, "Failed to configure EEE timers"); + elink_8483x_disable_eee(phy, params, vars); + return rc; + } + + if ((phy->req_duplex == DUPLEX_FULL) && + (params->eee_mode & ELINK_EEE_MODE_ADV_LPI) && + (elink_eee_calc_timer(params) || + !(params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI))) + rc = elink_8483x_enable_eee(phy, params, vars); + else + rc = elink_8483x_disable_eee(phy, params, vars); + if (rc != ELINK_STATUS_OK) { + PMD_DRV_LOG(DEBUG, "Failed to set EEE advertisement"); + return rc; + } + } else { + vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; + } + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) { + /* Bring PHY out of super isolate mode as the final step. */ + elink_cl45_read_and_write(sc, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, + (uint16_t) ~ + MDIO_84833_SUPER_ISOLATE); + } + return rc; +} + +static uint8_t elink_848xx_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val, val1, val2; + uint8_t link_up = 0; + + /* Check 10G-BaseT link status */ + /* Check PMD signal ok */ + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, 0xFFFA, &val1); + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, &val2); + PMD_DRV_LOG(DEBUG, "BNX2X848xx: PMD_SIGNAL 1.a811 = 0x%x", val2); + + /* Check link 10G */ + if (val2 & (1 << 11)) { + vars->line_speed = ELINK_SPEED_10000; + vars->duplex = DUPLEX_FULL; + link_up = 1; + elink_ext_phy_10G_an_resolve(sc, phy, vars); + } else { /* Check Legacy speed link */ + uint16_t legacy_status, legacy_speed, mii_ctrl; + + /* Enable expansion register 0x42 (Operation mode status) */ + elink_cl45_write(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42); + + /* Get legacy speed operation status */ + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, + &legacy_status); + + PMD_DRV_LOG(DEBUG, "Legacy speed status = 0x%x", legacy_status); + link_up = ((legacy_status & (1 << 11)) == (1 << 11)); + legacy_speed = (legacy_status & (3 << 9)); + if (legacy_speed == (0 << 9)) + vars->line_speed = ELINK_SPEED_10; + else if (legacy_speed == (1 << 9)) + vars->line_speed = ELINK_SPEED_100; + else if (legacy_speed == (2 << 9)) + vars->line_speed = ELINK_SPEED_1000; + else { /* Should not happen: Treat as link down */ + vars->line_speed = 0; + link_up = 0; + } + + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_IEEE_PHY_TEST) { + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_MII_CTRL, + &mii_ctrl); + /* For IEEE testing, check for a fake link. */ + link_up |= ((mii_ctrl & 0x3040) == 0x40); + } + + if (link_up) { + if (legacy_status & (1 << 8)) + vars->duplex = DUPLEX_FULL; + else + vars->duplex = DUPLEX_HALF; + + PMD_DRV_LOG(DEBUG, + "Link is up in %dMbps, is_duplex_full= %d", + vars->line_speed, + (vars->duplex == DUPLEX_FULL)); + /* Check legacy speed AN resolution */ + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_MII_STATUS, + &val); + if (val & (1 << 5)) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_AN_EXPANSION, + &val); + if ((val & (1 << 0)) == 0) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; + } + } + if (link_up) { + PMD_DRV_LOG(DEBUG, "BNX2X848x3: link speed is %d", + vars->line_speed); + elink_ext_phy_resolve_fc(phy, params, vars); + + /* Read LP advertised speeds */ + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LP, &val); + if (val & (1 << 5)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10THD_CAPABLE; + if (val & (1 << 6)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE; + if (val & (1 << 7)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE; + if (val & (1 << 8)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE; + if (val & (1 << 9)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100T4_CAPABLE; + + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_1000T_STATUS, &val); + + if (val & (1 << 10)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE; + if (val & (1 << 11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_MASTER_STATUS, &val); + + if (val & (1 << 11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + /* Determine if EEE was negotiated */ + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) + elink_eee_an_resolve(phy, params, vars); + } + + return link_up; +} + +static elink_status_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str, + uint16_t * len) +{ + elink_status_t status = ELINK_STATUS_OK; + uint32_t spirom_ver; + spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F); + status = elink_format_ver(spirom_ver, str, len); + return status; +} + +static void elink_8481_hw_reset(__rte_unused struct elink_phy *phy, + struct elink_params *params) +{ + elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, 0); + elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, 1); +} + +static void elink_8481_link_reset(struct elink_phy *phy, + struct elink_params *params) +{ + elink_cl45_write(params->sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000); + elink_cl45_write(params->sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1); +} + +static void elink_848x3_link_reset(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port; + uint16_t val16; + + if (!(CHIP_IS_E1x(sc))) + port = SC_PATH(sc); + else + port = params->port; + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823) { + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_3, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + } else { + elink_cl45_read(sc, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val16); + val16 |= MDIO_84833_SUPER_ISOLATE; + elink_cl45_write(sc, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, val16); + } +} + +static void elink_848xx_set_link_led(struct elink_phy *phy, + struct elink_params *params, uint8_t mode) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val; + __rte_unused uint8_t port; + + if (!(CHIP_IS_E1x(sc))) + port = SC_PATH(sc); + else + port = params->port; + + switch (mode) { + case ELINK_LED_MODE_OFF: + + PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE OFF", port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + + /* Set LED masks */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, 0x0); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, 0x0); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, 0x0); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, 0x0); + + } else { + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, 0x0); + } + break; + case ELINK_LED_MODE_FRONT_PANEL_OFF: + + PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE FRONT PANEL OFF", port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + + /* Set LED masks */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, 0x0); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, 0x0); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, 0x0); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, 0x20); + + } else { + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, 0x0); + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) { + /* Disable MI_INT interrupt before setting LED4 + * source to constant off. + */ + if (REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + + params->port * 4) & + ELINK_NIG_MASK_MI_INT) { + params->link_flags |= + ELINK_LINK_FLAGS_INT_DISABLED; + + elink_bits_dis(sc, + NIG_REG_MASK_INTERRUPT_PORT0 + + params->port * 4, + ELINK_NIG_MASK_MI_INT); + } + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x0); + } + } + break; + case ELINK_LED_MODE_ON: + + PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE ON", port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + /* Set control reg */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, &val); + val &= 0x8000; + val |= 0x2492; + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, val); + + /* Set LED masks */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, 0x0); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, 0x20); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, 0x20); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, 0x0); + } else { + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, 0x20); + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) { + /* Disable MI_INT interrupt before setting LED4 + * source to constant on. + */ + if (REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + + params->port * 4) & + ELINK_NIG_MASK_MI_INT) { + params->link_flags |= + ELINK_LINK_FLAGS_INT_DISABLED; + + elink_bits_dis(sc, + NIG_REG_MASK_INTERRUPT_PORT0 + + params->port * 4, + ELINK_NIG_MASK_MI_INT); + } + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x20); + } + } + break; + + case ELINK_LED_MODE_OPER: + + PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE OPER", port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + + /* Set control reg */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, &val); + + if (!((val & + MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK) + >> + MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) + { + PMD_DRV_LOG(DEBUG, "Setting LINK_SIGNAL"); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + 0xa492); + } + + /* Set LED masks */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, 0x10); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, 0x80); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, 0x98); + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, 0x40); + + } else { + /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED + * sources are all wired through LED1, rather than only + * 10G in other modes. + */ + val = ((params->hw_led_mode << + SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80; + + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, val); + + /* Tell LED3 to blink on source */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, &val); + val &= ~(7 << 6); + val |= (1 << 6); /* A83B[8:6]= 1 */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, val); + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834) { + /* Restore LED4 source to external link, + * and re-enable interrupts. + */ + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x40); + if (params->link_flags & + ELINK_LINK_FLAGS_INT_DISABLED) { + elink_link_int_enable(params); + params->link_flags &= + ~ELINK_LINK_FLAGS_INT_DISABLED; + } + } + } + break; + } + + /* This is a workaround for E3+84833 until autoneg + * restart is fixed in f/w + */ + if (CHIP_IS_E3(sc)) { + elink_cl45_read(sc, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_1, &val); + } +} + +/******************************************************************/ +/* 54618SE PHY SECTION */ +/******************************************************************/ +static void elink_54618se_specific_func(struct elink_phy *phy, + struct elink_params *params, + uint32_t action) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t temp; + switch (action) { + case ELINK_PHY_INIT: + /* Configure LED4: set to INTR (0x6). */ + /* Accessing shadow register 0xe. */ + elink_cl22_write(sc, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_LED_SEL2); + elink_cl22_read(sc, phy, MDIO_REG_GPHY_SHADOW, &temp); + temp &= ~(0xf << 4); + temp |= (0x6 << 4); + elink_cl22_write(sc, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + /* Configure INTR based on link status change. */ + elink_cl22_write(sc, phy, + MDIO_REG_INTR_MASK, + ~MDIO_REG_INTR_MASK_LINK_STATUS); + break; + } +} + +static elink_status_t elink_54618se_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t port; + uint16_t autoneg_val, an_1000_val, an_10_100_val, fc_val, temp; + uint32_t cfg_pin; + + PMD_DRV_LOG(DEBUG, "54618SE cfg init"); + DELAY(1000 * 1); + + /* This works with E3 only, no need to check the chip + * before determining the port. + */ + port = params->port; + + cfg_pin = (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port]. + e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + + /* Drive pin high to bring the GPHY out of reset. */ + elink_set_cfg_pin(sc, cfg_pin, 1); + + /* wait for GPHY to reset */ + DELAY(1000 * 50); + + /* reset phy */ + elink_cl22_write(sc, phy, MDIO_PMA_REG_CTRL, 0x8000); + elink_wait_reset_complete(sc, phy, params); + + /* Wait for GPHY to reset */ + DELAY(1000 * 50); + + elink_54618se_specific_func(phy, params, ELINK_PHY_INIT); + /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */ + elink_cl22_write(sc, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_AUTO_DET_MED); + elink_cl22_read(sc, phy, MDIO_REG_GPHY_SHADOW, &temp); + temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD; + elink_cl22_write(sc, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + + /* Set up fc */ + /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ + elink_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + fc_val = 0; + if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) + fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; + + if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) + fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; + + /* Read all advertisement */ + elink_cl22_read(sc, phy, 0x09, &an_1000_val); + + elink_cl22_read(sc, phy, 0x04, &an_10_100_val); + + elink_cl22_read(sc, phy, MDIO_PMA_REG_CTRL, &autoneg_val); + + /* Disable forced speed */ + autoneg_val &= + ~((1 << 6) | (1 << 8) | (1 << 9) | (1 << 12) | (1 << 13)); + an_10_100_val &= + ~((1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 10) | + (1 << 11)); + + if (((phy->req_line_speed == ELINK_SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->req_line_speed == ELINK_SPEED_1000)) { + an_1000_val |= (1 << 8); + autoneg_val |= (1 << 9 | 1 << 12); + if (phy->req_duplex == DUPLEX_FULL) + an_1000_val |= (1 << 9); + PMD_DRV_LOG(DEBUG, "Advertising 1G"); + } else + an_1000_val &= ~((1 << 8) | (1 << 9)); + + elink_cl22_write(sc, phy, 0x09, an_1000_val); + elink_cl22_read(sc, phy, 0x09, &an_1000_val); + + /* Advertise 10/100 link speed */ + if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG) { + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) { + an_10_100_val |= (1 << 5); + autoneg_val |= (1 << 9 | 1 << 12); + PMD_DRV_LOG(DEBUG, "Advertising 10M-HD"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) { + an_10_100_val |= (1 << 6); + autoneg_val |= (1 << 9 | 1 << 12); + PMD_DRV_LOG(DEBUG, "Advertising 10M-FD"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { + an_10_100_val |= (1 << 7); + autoneg_val |= (1 << 9 | 1 << 12); + PMD_DRV_LOG(DEBUG, "Advertising 100M-HD"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { + an_10_100_val |= (1 << 8); + autoneg_val |= (1 << 9 | 1 << 12); + PMD_DRV_LOG(DEBUG, "Advertising 100M-FD"); + } + } + + /* Only 10/100 are allowed to work in FORCE mode */ + if (phy->req_line_speed == ELINK_SPEED_100) { + autoneg_val |= (1 << 13); + /* Enabled AUTO-MDIX when autoneg is disabled */ + elink_cl22_write(sc, phy, 0x18, (1 << 15 | 1 << 9 | 7 << 0)); + PMD_DRV_LOG(DEBUG, "Setting 100M force"); + } + if (phy->req_line_speed == ELINK_SPEED_10) { + /* Enabled AUTO-MDIX when autoneg is disabled */ + elink_cl22_write(sc, phy, 0x18, (1 << 15 | 1 << 9 | 7 << 0)); + PMD_DRV_LOG(DEBUG, "Setting 10M force"); + } + + if ((phy->flags & ELINK_FLAGS_EEE) && elink_eee_has_cap(params)) { + elink_status_t rc; + + elink_cl22_write(sc, phy, MDIO_REG_GPHY_EXP_ACCESS, + MDIO_REG_GPHY_EXP_ACCESS_TOP | + MDIO_REG_GPHY_EXP_TOP_2K_BUF); + elink_cl22_read(sc, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp); + temp &= 0xfffe; + elink_cl22_write(sc, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp); + + rc = elink_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV); + if (rc != ELINK_STATUS_OK) { + PMD_DRV_LOG(DEBUG, "Failed to configure EEE timers"); + elink_eee_disable(phy, params, vars); + } else if ((params->eee_mode & ELINK_EEE_MODE_ADV_LPI) && + (phy->req_duplex == DUPLEX_FULL) && + (elink_eee_calc_timer(params) || + !(params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI))) { + /* Need to advertise EEE only when requested, + * and either no LPI assertion was requested, + * or it was requested and a valid timer was set. + * Also notice full duplex is required for EEE. + */ + elink_eee_advertise(phy, params, vars, + SHMEM_EEE_1G_ADV); + } else { + PMD_DRV_LOG(DEBUG, "Don't Advertise 1GBase-T EEE"); + elink_eee_disable(phy, params, vars); + } + } else { + vars->eee_status &= ~SHMEM_EEE_1G_ADV << + SHMEM_EEE_SUPPORTED_SHIFT; + + if (phy->flags & ELINK_FLAGS_EEE) { + /* Handle legacy auto-grEEEn */ + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED) { + temp = 6; + PMD_DRV_LOG(DEBUG, "Enabling Auto-GrEEEn"); + } else { + temp = 0; + PMD_DRV_LOG(DEBUG, "Don't Adv. EEE"); + } + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_EEE_ADV, temp); + } + } + + elink_cl22_write(sc, phy, 0x04, an_10_100_val | fc_val); + + if (phy->req_duplex == DUPLEX_FULL) + autoneg_val |= (1 << 8); + + elink_cl22_write(sc, phy, MDIO_PMA_REG_CTRL, autoneg_val); + + return ELINK_STATUS_OK; +} + +static void elink_5461x_set_link_led(struct elink_phy *phy, + struct elink_params *params, uint8_t mode) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t temp; + + elink_cl22_write(sc, phy, + MDIO_REG_GPHY_SHADOW, MDIO_REG_GPHY_SHADOW_LED_SEL1); + elink_cl22_read(sc, phy, MDIO_REG_GPHY_SHADOW, &temp); + temp &= 0xff00; + + PMD_DRV_LOG(DEBUG, "54618x set link led (mode=%x)", mode); + switch (mode) { + case ELINK_LED_MODE_FRONT_PANEL_OFF: + case ELINK_LED_MODE_OFF: + temp |= 0x00ee; + break; + case ELINK_LED_MODE_OPER: + temp |= 0x0001; + break; + case ELINK_LED_MODE_ON: + temp |= 0x00ff; + break; + default: + break; + } + elink_cl22_write(sc, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + return; +} + +static void elink_54618se_link_reset(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t cfg_pin; + uint8_t port; + + /* In case of no EPIO routed to reset the GPHY, put it + * in low power mode. + */ + elink_cl22_write(sc, phy, MDIO_PMA_REG_CTRL, 0x800); + /* This works with E3 only, no need to check the chip + * before determining the port. + */ + port = params->port; + cfg_pin = (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port]. + e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + + /* Drive pin low to put GPHY in reset. */ + elink_set_cfg_pin(sc, cfg_pin, 0); +} + +static uint8_t elink_54618se_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val; + uint8_t link_up = 0; + uint16_t legacy_status, legacy_speed; + + /* Get speed operation status */ + elink_cl22_read(sc, phy, MDIO_REG_GPHY_AUX_STATUS, &legacy_status); + PMD_DRV_LOG(DEBUG, "54618SE read_status: 0x%x", legacy_status); + + /* Read status to clear the PHY interrupt. */ + elink_cl22_read(sc, phy, MDIO_REG_INTR_STATUS, &val); + + link_up = ((legacy_status & (1 << 2)) == (1 << 2)); + + if (link_up) { + legacy_speed = (legacy_status & (7 << 8)); + if (legacy_speed == (7 << 8)) { + vars->line_speed = ELINK_SPEED_1000; + vars->duplex = DUPLEX_FULL; + } else if (legacy_speed == (6 << 8)) { + vars->line_speed = ELINK_SPEED_1000; + vars->duplex = DUPLEX_HALF; + } else if (legacy_speed == (5 << 8)) { + vars->line_speed = ELINK_SPEED_100; + vars->duplex = DUPLEX_FULL; + } + /* Omitting 100Base-T4 for now */ + else if (legacy_speed == (3 << 8)) { + vars->line_speed = ELINK_SPEED_100; + vars->duplex = DUPLEX_HALF; + } else if (legacy_speed == (2 << 8)) { + vars->line_speed = ELINK_SPEED_10; + vars->duplex = DUPLEX_FULL; + } else if (legacy_speed == (1 << 8)) { + vars->line_speed = ELINK_SPEED_10; + vars->duplex = DUPLEX_HALF; + } else /* Should not happen */ + vars->line_speed = 0; + + PMD_DRV_LOG(DEBUG, + "Link is up in %dMbps, is_duplex_full= %d", + vars->line_speed, (vars->duplex == DUPLEX_FULL)); + + /* Check legacy speed AN resolution */ + elink_cl22_read(sc, phy, 0x01, &val); + if (val & (1 << 5)) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + elink_cl22_read(sc, phy, 0x06, &val); + if ((val & (1 << 0)) == 0) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; + + PMD_DRV_LOG(DEBUG, "BNX2X54618SE: link speed is %d", + vars->line_speed); + + elink_ext_phy_resolve_fc(phy, params, vars); + + if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + /* Report LP advertised speeds */ + elink_cl22_read(sc, phy, 0x5, &val); + + if (val & (1 << 5)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10THD_CAPABLE; + if (val & (1 << 6)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE; + if (val & (1 << 7)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE; + if (val & (1 << 8)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE; + if (val & (1 << 9)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100T4_CAPABLE; + + elink_cl22_read(sc, phy, 0xa, &val); + if (val & (1 << 10)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE; + if (val & (1 << 11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + + if ((phy->flags & ELINK_FLAGS_EEE) && + elink_eee_has_cap(params)) + elink_eee_an_resolve(phy, params, vars); + } + } + return link_up; +} + +static void elink_54618se_config_loopback(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t val; + uint32_t umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + + PMD_DRV_LOG(DEBUG, "2PMA/PMD ext_phy_loopback: 54618se"); + + /* Enable master/slave manual mmode and set to master */ + /* mii write 9 [bits set 11 12] */ + elink_cl22_write(sc, phy, 0x09, 3 << 11); + + /* forced 1G and disable autoneg */ + /* set val [mii read 0] */ + /* set val [expr $val & [bits clear 6 12 13]] */ + /* set val [expr $val | [bits set 6 8]] */ + /* mii write 0 $val */ + elink_cl22_read(sc, phy, 0x00, &val); + val &= ~((1 << 6) | (1 << 12) | (1 << 13)); + val |= (1 << 6) | (1 << 8); + elink_cl22_write(sc, phy, 0x00, val); + + /* Set external loopback and Tx using 6dB coding */ + /* mii write 0x18 7 */ + /* set val [mii read 0x18] */ + /* mii write 0x18 [expr $val | [bits set 10 15]] */ + elink_cl22_write(sc, phy, 0x18, 7); + elink_cl22_read(sc, phy, 0x18, &val); + elink_cl22_write(sc, phy, 0x18, val | (1 << 10) | (1 << 15)); + + /* This register opens the gate for the UMAC despite its name */ + REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + params->port * 4, 1); + + /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame + * length used by the MAC receive logic to check frames. + */ + REG_WR(sc, umac_base + UMAC_REG_MAXFR, 0x2710); +} + +/******************************************************************/ +/* SFX7101 PHY SECTION */ +/******************************************************************/ +static void elink_7101_config_loopback(struct elink_phy *phy, + struct elink_params *params) +{ + struct bnx2x_softc *sc = params->sc; + /* SFX7101_XGXS_TEST1 */ + elink_cl45_write(sc, phy, + MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100); +} + +static elink_status_t elink_7101_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + uint16_t fw_ver1, fw_ver2, val; + struct bnx2x_softc *sc = params->sc; + PMD_DRV_LOG(DEBUG, "Setting the SFX7101 LASI indication"); + + /* Restore normal power mode */ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + /* HW reset */ + elink_ext_phy_hw_reset(sc, params->port); + elink_wait_reset_complete(sc, phy, params); + + elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1); + PMD_DRV_LOG(DEBUG, "Setting the SFX7101 LED to blink on traffic"); + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1 << 3)); + + elink_ext_phy_set_pause(params, phy, vars); + /* Restart autoneg */ + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val); + val |= 0x200; + elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val); + + /* Save spirom version */ + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1); + + elink_cl45_read(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2); + elink_save_spirom_version(sc, params->port, + (uint32_t) (fw_ver1 << 16 | fw_ver2), + phy->ver_addr); + return ELINK_STATUS_OK; +} + +static uint8_t elink_7101_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t link_up; + uint16_t val1, val2; + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + PMD_DRV_LOG(DEBUG, "10G-base-T LASI status 0x%x->0x%x", val2, val1); + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); + elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); + PMD_DRV_LOG(DEBUG, "10G-base-T PMA status 0x%x->0x%x", val2, val1); + link_up = ((val1 & 4) == 4); + /* If link is up print the AN outcome of the SFX7101 PHY */ + if (link_up) { + elink_cl45_read(sc, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, + &val2); + vars->line_speed = ELINK_SPEED_10000; + vars->duplex = DUPLEX_FULL; + PMD_DRV_LOG(DEBUG, "SFX7101 AN status 0x%x->Master=%x", + val2, (val2 & (1 << 14))); + elink_ext_phy_10G_an_resolve(sc, phy, vars); + elink_ext_phy_resolve_fc(phy, params, vars); + + /* Read LP advertised speeds */ + if (val2 & (1 << 11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + } + return link_up; +} + +static elink_status_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str, + uint16_t * len) +{ + if (*len < 5) + return ELINK_STATUS_ERROR; + str[0] = (spirom_ver & 0xFF); + str[1] = (spirom_ver & 0xFF00) >> 8; + str[2] = (spirom_ver & 0xFF0000) >> 16; + str[3] = (spirom_ver & 0xFF000000) >> 24; + str[4] = '\0'; + *len -= 5; + return ELINK_STATUS_OK; +} + +static void elink_7101_hw_reset(__rte_unused struct elink_phy *phy, + struct elink_params *params) +{ + /* Low power mode is controlled by GPIO 2 */ + elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); + /* The PHY reset is controlled by GPIO 1 */ + elink_cb_gpio_write(params->sc, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); +} + +static void elink_7101_set_link_led(struct elink_phy *phy, + struct elink_params *params, uint8_t mode) +{ + uint16_t val = 0; + struct bnx2x_softc *sc = params->sc; + switch (mode) { + case ELINK_LED_MODE_FRONT_PANEL_OFF: + case ELINK_LED_MODE_OFF: + val = 2; + break; + case ELINK_LED_MODE_ON: + val = 1; + break; + case ELINK_LED_MODE_OPER: + val = 0; + break; + } + elink_cl45_write(sc, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LINK_LED_CNTL, val); +} + +/******************************************************************/ +/* STATIC PHY DECLARATION */ +/******************************************************************/ + +static const struct elink_phy phy_null = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN, + .addr = 0, + .def_md_devad = 0, + .flags = ELINK_FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = 0, + .media_type = ELINK_ETH_PHY_NOT_PRESENT, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t) NULL, + .read_status = (read_status_t) NULL, + .link_reset = (link_reset_t) NULL, + .config_loopback = (config_loopback_t) NULL, + .format_fw_ver = (format_fw_ver_t) NULL, + .hw_reset = (hw_reset_t) NULL, + .set_link_led = (set_link_led_t) NULL, + .phy_specific_func = (phy_specific_func_t) NULL +}; + +static const struct elink_phy phy_serdes = { + .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT, + .addr = 0xff, + .def_md_devad = 0, + .flags = 0, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_2500baseX_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t) elink_xgxs_config_init, + .read_status = (read_status_t) elink_link_settings_status, + .link_reset = (link_reset_t) elink_int_link_reset, + .config_loopback = (config_loopback_t) NULL, + .format_fw_ver = (format_fw_ver_t) NULL, + .hw_reset = (hw_reset_t) NULL, + .set_link_led = (set_link_led_t) NULL, + .phy_specific_func = (phy_specific_func_t) NULL +}; + +static const struct elink_phy phy_xgxs = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, + .addr = 0xff, + .def_md_devad = 0, + .flags = 0, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_2500baseX_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_CX4, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t) elink_xgxs_config_init, + .read_status = (read_status_t) elink_link_settings_status, + .link_reset = (link_reset_t) elink_int_link_reset, + .config_loopback = (config_loopback_t) elink_set_xgxs_loopback, + .format_fw_ver = (format_fw_ver_t) NULL, + .hw_reset = (hw_reset_t) NULL, + .set_link_led = (set_link_led_t) NULL, + .phy_specific_func = (phy_specific_func_t) elink_xgxs_specific_func +}; + +static const struct elink_phy phy_warpcore = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_TX_ERROR_CHECK, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_20000baseKR2_Full | + ELINK_SUPPORTED_20000baseMLD2_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_UNSPECIFIED, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + /* req_duplex = */ 0, + /* rsrv = */ 0, + .config_init = (config_init_t) elink_warpcore_config_init, + .read_status = (read_status_t) elink_warpcore_read_status, + .link_reset = (link_reset_t) elink_warpcore_link_reset, + .config_loopback = (config_loopback_t) elink_set_warpcore_loopback, + .format_fw_ver = (format_fw_ver_t) NULL, + .hw_reset = (hw_reset_t) elink_warpcore_hw_reset, + .set_link_led = (set_link_led_t) NULL, + .phy_specific_func = (phy_specific_func_t) NULL +}; + +static const struct elink_phy phy_7101 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t) elink_7101_config_init, + .read_status = (read_status_t) elink_7101_read_status, + .link_reset = (link_reset_t) elink_common_ext_link_reset, + .config_loopback = (config_loopback_t) elink_7101_config_loopback, + .format_fw_ver = (format_fw_ver_t) elink_7101_format_ver, + .hw_reset = (hw_reset_t) elink_7101_hw_reset, + .set_link_led = (set_link_led_t) elink_7101_set_link_led, + .phy_specific_func = (phy_specific_func_t) NULL +}; + +static const struct elink_phy phy_8073 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073, + .addr = 0xff, + .def_md_devad = 0, + .flags = 0, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_2500baseX_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_KR, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t) elink_8073_config_init, + .read_status = (read_status_t) elink_8073_read_status, + .link_reset = (link_reset_t) elink_8073_link_reset, + .config_loopback = (config_loopback_t) NULL, + .format_fw_ver = (format_fw_ver_t) elink_format_ver, + .hw_reset = (hw_reset_t) NULL, + .set_link_led = (set_link_led_t) NULL, + .phy_specific_func = (phy_specific_func_t) elink_8073_specific_func +}; + +static const struct elink_phy phy_8705 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8705, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_XFP_FIBER, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t) elink_8705_config_init, + .read_status = (read_status_t) elink_8705_read_status, + .link_reset = (link_reset_t) elink_common_ext_link_reset, + .config_loopback = (config_loopback_t) NULL, + .format_fw_ver = (format_fw_ver_t) elink_null_format_ver, + .hw_reset = (hw_reset_t) NULL, + .set_link_led = (set_link_led_t) NULL, + .phy_specific_func = (phy_specific_func_t) NULL +}; + +static const struct elink_phy phy_8706 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8706, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_SFPP_10G_FIBER, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t) elink_8706_config_init, + .read_status = (read_status_t) elink_8706_read_status, + .link_reset = (link_reset_t) elink_common_ext_link_reset, + .config_loopback = (config_loopback_t) NULL, + .format_fw_ver = (format_fw_ver_t) elink_format_ver, + .hw_reset = (hw_reset_t) NULL, + .set_link_led = (set_link_led_t) NULL, + .phy_specific_func = (phy_specific_func_t) NULL +}; + +static const struct elink_phy phy_8726 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726, + .addr = 0xff, + .def_md_devad = 0, + .flags = (ELINK_FLAGS_INIT_XGXS_FIRST | ELINK_FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_NOT_PRESENT, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t) elink_8726_config_init, + .read_status = (read_status_t) elink_8726_read_status, + .link_reset = (link_reset_t) elink_8726_link_reset, + .config_loopback = (config_loopback_t) elink_8726_config_loopback, + .format_fw_ver = (format_fw_ver_t) elink_format_ver, + .hw_reset = (hw_reset_t) NULL, + .set_link_led = (set_link_led_t) NULL, + .phy_specific_func = (phy_specific_func_t) NULL +}; + +static const struct elink_phy phy_8727 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727, + .addr = 0xff, + .def_md_devad = 0, + .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ | ELINK_FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_NOT_PRESENT, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t) elink_8727_config_init, + .read_status = (read_status_t) elink_8727_read_status, + .link_reset = (link_reset_t) elink_8727_link_reset, + .config_loopback = (config_loopback_t) NULL, + .format_fw_ver = (format_fw_ver_t) elink_format_ver, + .hw_reset = (hw_reset_t) elink_8727_hw_reset, + .set_link_led = (set_link_led_t) elink_8727_set_link_led, + .phy_specific_func = (phy_specific_func_t) elink_8727_specific_func +}; + +static const struct elink_phy phy_8481 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8481, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ | + ELINK_FLAGS_REARM_LATCH_SIGNAL, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t) elink_8481_config_init, + .read_status = (read_status_t) elink_848xx_read_status, + .link_reset = (link_reset_t) elink_8481_link_reset, + .config_loopback = (config_loopback_t) NULL, + .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, + .hw_reset = (hw_reset_t) elink_8481_hw_reset, + .set_link_led = (set_link_led_t) elink_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t) NULL +}; + +static const struct elink_phy phy_84823 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823, + .addr = 0xff, + .def_md_devad = 0, + .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ | + ELINK_FLAGS_REARM_LATCH_SIGNAL | ELINK_FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t) elink_848x3_config_init, + .read_status = (read_status_t) elink_848xx_read_status, + .link_reset = (link_reset_t) elink_848x3_link_reset, + .config_loopback = (config_loopback_t) NULL, + .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, + .hw_reset = (hw_reset_t) NULL, + .set_link_led = (set_link_led_t) elink_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func +}; + +static const struct elink_phy phy_84833 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833, + .addr = 0xff, + .def_md_devad = 0, + .flags = (ELINK_FLAGS_FAN_FAILURE_DET_REQ | + ELINK_FLAGS_REARM_LATCH_SIGNAL | + ELINK_FLAGS_TX_ERROR_CHECK | ELINK_FLAGS_TEMPERATURE), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t) elink_848x3_config_init, + .read_status = (read_status_t) elink_848xx_read_status, + .link_reset = (link_reset_t) elink_848x3_link_reset, + .config_loopback = (config_loopback_t) NULL, + .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, + .hw_reset = (hw_reset_t) elink_84833_hw_reset_phy, + .set_link_led = (set_link_led_t) elink_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func +}; + +static const struct elink_phy phy_84834 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_FAN_FAILURE_DET_REQ | + ELINK_FLAGS_REARM_LATCH_SIGNAL, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t) elink_848x3_config_init, + .read_status = (read_status_t) elink_848xx_read_status, + .link_reset = (link_reset_t) elink_848x3_link_reset, + .config_loopback = (config_loopback_t) NULL, + .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, + .hw_reset = (hw_reset_t) elink_84833_hw_reset_phy, + .set_link_led = (set_link_led_t) elink_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func +}; + +static const struct elink_phy phy_54618se = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE, + .addr = 0xff, + .def_md_devad = 0, + .flags = ELINK_FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_TP | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause), + .media_type = ELINK_ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + /* req_duplex = */ 0, + /* rsrv = */ 0, + .config_init = (config_init_t) elink_54618se_config_init, + .read_status = (read_status_t) elink_54618se_read_status, + .link_reset = (link_reset_t) elink_54618se_link_reset, + .config_loopback = (config_loopback_t) elink_54618se_config_loopback, + .format_fw_ver = (format_fw_ver_t) NULL, + .hw_reset = (hw_reset_t) NULL, + .set_link_led = (set_link_led_t) elink_5461x_set_link_led, + .phy_specific_func = (phy_specific_func_t) elink_54618se_specific_func +}; + +/*****************************************************************/ +/* */ +/* Populate the phy according. Main function: elink_populate_phy */ +/* */ +/*****************************************************************/ + +static void elink_populate_preemphasis(struct bnx2x_softc *sc, + uint32_t shmem_base, + struct elink_phy *phy, uint8_t port, + uint8_t phy_index) +{ + /* Get the 4 lanes xgxs config rx and tx */ + uint32_t rx = 0, tx = 0, i; + for (i = 0; i < 2; i++) { + /* INT_PHY and ELINK_EXT_PHY1 share the same value location in + * the shmem. When num_phys is greater than 1, than this value + * applies only to ELINK_EXT_PHY1 + */ + if (phy_index == ELINK_INT_PHY || phy_index == ELINK_EXT_PHY1) { + rx = REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port]. + xgxs_config_rx[i << 1])); + + tx = REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port]. + xgxs_config_tx[i << 1])); + } else { + rx = REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port]. + xgxs_config2_rx[i << 1])); + + tx = REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port]. + xgxs_config2_rx[i << 1])); + } + + phy->rx_preemphasis[i << 1] = ((rx >> 16) & 0xffff); + phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff); + + phy->tx_preemphasis[i << 1] = ((tx >> 16) & 0xffff); + phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff); + } +} + +static uint32_t elink_get_ext_phy_config(struct bnx2x_softc *sc, + uint32_t shmem_base, uint8_t phy_index, + uint8_t port) +{ + uint32_t ext_phy_config = 0; + switch (phy_index) { + case ELINK_EXT_PHY1: + ext_phy_config = REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port]. + external_phy_config)); + break; + case ELINK_EXT_PHY2: + ext_phy_config = REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port]. + external_phy_config2)); + break; + default: + PMD_DRV_LOG(DEBUG, "Invalid phy_index %d", phy_index); + return ELINK_STATUS_ERROR; + } + + return ext_phy_config; +} + +static elink_status_t elink_populate_int_phy(struct bnx2x_softc *sc, + uint32_t shmem_base, uint8_t port, + struct elink_phy *phy) +{ + uint32_t phy_addr; + __rte_unused uint32_t chip_id; + uint32_t switch_cfg = (REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info. + port_feature_config[port]. + link_config)) & + PORT_FEATURE_CONNECTED_SWITCH_MASK); + chip_id = + (REG_RD(sc, MISC_REG_CHIP_NUM) << 16) | + ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12); + + PMD_DRV_LOG(DEBUG, ":chip_id = 0x%x", chip_id); + if (USES_WARPCORE(sc)) { + uint32_t serdes_net_if; + phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); + *phy = phy_warpcore; + if (REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR) == 0x3) + phy->flags |= ELINK_FLAGS_4_PORT_MODE; + else + phy->flags &= ~ELINK_FLAGS_4_PORT_MODE; + /* Check Dual mode */ + serdes_net_if = (REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port]. + default_cfg)) & + PORT_HW_CFG_NET_SERDES_IF_MASK); + /* Set the appropriate supported and flags indications per + * interface type of the chip + */ + switch (serdes_net_if) { + case PORT_HW_CFG_NET_SERDES_IF_SGMII: + phy->supported &= (ELINK_SUPPORTED_10baseT_Half | + ELINK_SUPPORTED_10baseT_Full | + ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause); + phy->media_type = ELINK_ETH_PHY_BASE_T; + break; + case PORT_HW_CFG_NET_SERDES_IF_XFI: + phy->supported &= (ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause); + phy->media_type = ELINK_ETH_PHY_XFP_FIBER; + break; + case PORT_HW_CFG_NET_SERDES_IF_SFI: + phy->supported &= (ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause); + phy->media_type = ELINK_ETH_PHY_SFPP_10G_FIBER; + break; + case PORT_HW_CFG_NET_SERDES_IF_KR: + phy->media_type = ELINK_ETH_PHY_KR; + phy->supported &= (ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause); + break; + case PORT_HW_CFG_NET_SERDES_IF_DXGXS: + phy->media_type = ELINK_ETH_PHY_KR; + phy->flags |= ELINK_FLAGS_WC_DUAL_MODE; + phy->supported &= (ELINK_SUPPORTED_20000baseMLD2_Full | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause); + break; + case PORT_HW_CFG_NET_SERDES_IF_KR2: + phy->media_type = ELINK_ETH_PHY_KR; + phy->flags |= ELINK_FLAGS_WC_DUAL_MODE; + phy->supported &= (ELINK_SUPPORTED_20000baseKR2_Full | + ELINK_SUPPORTED_10000baseT_Full | + ELINK_SUPPORTED_1000baseT_Full | + ELINK_SUPPORTED_Autoneg | + ELINK_SUPPORTED_FIBRE | + ELINK_SUPPORTED_Pause | + ELINK_SUPPORTED_Asym_Pause); + phy->flags &= ~ELINK_FLAGS_TX_ERROR_CHECK; + break; + default: + PMD_DRV_LOG(DEBUG, "Unknown WC interface type 0x%x", + serdes_net_if); + break; + } + + /* Enable MDC/MDIO work-around for E3 A0 since free running MDC + * was not set as expected. For B0, ECO will be enabled so there + * won't be an issue there + */ + if (CHIP_REV(sc) == CHIP_REV_Ax) + phy->flags |= ELINK_FLAGS_MDC_MDIO_WA; + else + phy->flags |= ELINK_FLAGS_MDC_MDIO_WA_B0; + } else { + switch (switch_cfg) { + case ELINK_SWITCH_CFG_1G: + phy_addr = REG_RD(sc, + NIG_REG_SERDES0_CTRL_PHY_ADDR + + port * 0x10); + *phy = phy_serdes; + break; + case ELINK_SWITCH_CFG_10G: + phy_addr = REG_RD(sc, + NIG_REG_XGXS0_CTRL_PHY_ADDR + + port * 0x18); + *phy = phy_xgxs; + break; + default: + PMD_DRV_LOG(DEBUG, "Invalid switch_cfg"); + return ELINK_STATUS_ERROR; + } + } + phy->addr = (uint8_t) phy_addr; + phy->mdio_ctrl = elink_get_emac_base(sc, + SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH, + port); + if (CHIP_IS_E2(sc)) + phy->def_md_devad = ELINK_E2_DEFAULT_PHY_DEV_ADDR; + else + phy->def_md_devad = ELINK_DEFAULT_PHY_DEV_ADDR; + + PMD_DRV_LOG(DEBUG, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x", + port, phy->addr, phy->mdio_ctrl); + + elink_populate_preemphasis(sc, shmem_base, phy, port, ELINK_INT_PHY); + return ELINK_STATUS_OK; +} + +static elink_status_t elink_populate_ext_phy(struct bnx2x_softc *sc, + uint8_t phy_index, + uint32_t shmem_base, + uint32_t shmem2_base, + uint8_t port, + struct elink_phy *phy) +{ + uint32_t ext_phy_config, phy_type, config2; + uint32_t mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH; + ext_phy_config = elink_get_ext_phy_config(sc, shmem_base, + phy_index, port); + phy_type = ELINK_XGXS_EXT_PHY_TYPE(ext_phy_config); + /* Select the phy type */ + switch (phy_type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073: + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED; + *phy = phy_8073; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8705: + *phy = phy_8705; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8706: + *phy = phy_8706; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726: + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; + *phy = phy_8726; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727_NOC: + /* BNX2X8727_NOC => BNX2X8727 no over current */ + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; + *phy = phy_8727; + phy->flags |= ELINK_FLAGS_NOC; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727: + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; + *phy = phy_8727; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8481: + *phy = phy_8481; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84823: + *phy = phy_84823; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833: + *phy = phy_84833; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834: + *phy = phy_84834; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54616: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE: + *phy = phy_54618se; + if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X54618SE) + phy->flags |= ELINK_FLAGS_EEE; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: + *phy = phy_7101; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: + *phy = phy_null; + return ELINK_STATUS_ERROR; + default: + *phy = phy_null; + /* In case external PHY wasn't found */ + if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && + (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) + return ELINK_STATUS_ERROR; + return ELINK_STATUS_OK; + } + + phy->addr = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config); + elink_populate_preemphasis(sc, shmem_base, phy, port, phy_index); + + /* The shmem address of the phy version is located on different + * structures. In case this structure is too old, do not set + * the address + */ + config2 = REG_RD(sc, shmem_base + offsetof(struct shmem_region, + dev_info.shared_hw_config. + config2)); + if (phy_index == ELINK_EXT_PHY1) { + phy->ver_addr = shmem_base + offsetof(struct shmem_region, + port_mb[port]. + ext_phy_fw_version); + + /* Check specific mdc mdio settings */ + if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK) + mdc_mdio_access = config2 & + SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK; + } else { + uint32_t size = REG_RD(sc, shmem2_base); + + if (size > offsetof(struct shmem2_region, ext_phy_fw_version2)) { + phy->ver_addr = shmem2_base + + offsetof(struct shmem2_region, + ext_phy_fw_version2[port]); + } + /* Check specific mdc mdio settings */ + if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) + mdc_mdio_access = (config2 & + SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) + >> (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT - + SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT); + } + phy->mdio_ctrl = elink_get_emac_base(sc, mdc_mdio_access, port); + + if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834)) && + (phy->ver_addr)) { + /* Remove 100Mb link supported for BNX2X84833/4 when phy fw + * version lower than or equal to 1.39 + */ + uint32_t raw_ver = REG_RD(sc, phy->ver_addr); + if (((raw_ver & 0x7F) <= 39) && (((raw_ver & 0xF80) >> 7) <= 1)) + phy->supported &= ~(ELINK_SUPPORTED_100baseT_Half | + ELINK_SUPPORTED_100baseT_Full); + } + + PMD_DRV_LOG(DEBUG, "phy_type 0x%x port %d found in index %d", + phy_type, port, phy_index); + PMD_DRV_LOG(DEBUG, " addr=0x%x, mdio_ctl=0x%x", + phy->addr, phy->mdio_ctrl); + return ELINK_STATUS_OK; +} + +static elink_status_t elink_populate_phy(struct bnx2x_softc *sc, + uint8_t phy_index, uint32_t shmem_base, + uint32_t shmem2_base, uint8_t port, + struct elink_phy *phy) +{ + elink_status_t status = ELINK_STATUS_OK; + phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN; + if (phy_index == ELINK_INT_PHY) + return elink_populate_int_phy(sc, shmem_base, port, phy); + status = elink_populate_ext_phy(sc, phy_index, shmem_base, shmem2_base, + port, phy); + return status; +} + +static void elink_phy_def_cfg(struct elink_params *params, + struct elink_phy *phy, uint8_t phy_index) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t link_config; + /* Populate the default phy configuration for MF mode */ + if (phy_index == ELINK_EXT_PHY2) { + link_config = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_feature_config + [params->port].link_config2)); + phy->speed_cap_mask = + REG_RD(sc, + params->shmem_base + offsetof(struct shmem_region, + dev_info.port_hw_config + [params->port]. + speed_capability_mask2)); + } else { + link_config = REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_feature_config + [params->port].link_config)); + phy->speed_cap_mask = + REG_RD(sc, + params->shmem_base + offsetof(struct shmem_region, + dev_info.port_hw_config + [params->port]. + speed_capability_mask)); + } + + PMD_DRV_LOG(DEBUG, + "Default config phy idx %x cfg 0x%x speed_cap_mask 0x%x", + phy_index, link_config, phy->speed_cap_mask); + + phy->req_duplex = DUPLEX_FULL; + switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { + case PORT_FEATURE_LINK_SPEED_10M_HALF: + phy->req_duplex = DUPLEX_HALF; + case PORT_FEATURE_LINK_SPEED_10M_FULL: + phy->req_line_speed = ELINK_SPEED_10; + break; + case PORT_FEATURE_LINK_SPEED_100M_HALF: + phy->req_duplex = DUPLEX_HALF; + case PORT_FEATURE_LINK_SPEED_100M_FULL: + phy->req_line_speed = ELINK_SPEED_100; + break; + case PORT_FEATURE_LINK_SPEED_1G: + phy->req_line_speed = ELINK_SPEED_1000; + break; + case PORT_FEATURE_LINK_SPEED_2_5G: + phy->req_line_speed = ELINK_SPEED_2500; + break; + case PORT_FEATURE_LINK_SPEED_10G_CX4: + phy->req_line_speed = ELINK_SPEED_10000; + break; + default: + phy->req_line_speed = ELINK_SPEED_AUTO_NEG; + break; + } + + switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) { + case PORT_FEATURE_FLOW_CONTROL_AUTO: + phy->req_flow_ctrl = ELINK_FLOW_CTRL_AUTO; + break; + case PORT_FEATURE_FLOW_CONTROL_TX: + phy->req_flow_ctrl = ELINK_FLOW_CTRL_TX; + break; + case PORT_FEATURE_FLOW_CONTROL_RX: + phy->req_flow_ctrl = ELINK_FLOW_CTRL_RX; + break; + case PORT_FEATURE_FLOW_CONTROL_BOTH: + phy->req_flow_ctrl = ELINK_FLOW_CTRL_BOTH; + break; + default: + phy->req_flow_ctrl = ELINK_FLOW_CTRL_NONE; + break; + } +} + +uint32_t elink_phy_selection(struct elink_params *params) +{ + uint32_t phy_config_swapped, prio_cfg; + uint32_t return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT; + + phy_config_swapped = params->multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED; + + prio_cfg = params->multi_phy_config & PORT_HW_CFG_PHY_SELECTION_MASK; + + if (phy_config_swapped) { + switch (prio_cfg) { + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + return_cfg = + PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + return_cfg = + PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: + return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; + break; + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: + return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; + break; + } + } else + return_cfg = prio_cfg; + + return return_cfg; +} + +elink_status_t elink_phy_probe(struct elink_params * params) +{ + uint8_t phy_index, actual_phy_idx; + uint32_t phy_config_swapped, sync_offset, media_types; + struct bnx2x_softc *sc = params->sc; + struct elink_phy *phy; + params->num_phys = 0; + PMD_DRV_LOG(DEBUG, "Begin phy probe"); +#ifdef ELINK_INCLUDE_EMUL + if (CHIP_REV_IS_EMUL(sc)) + return ELINK_STATUS_OK; +#endif + phy_config_swapped = params->multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED; + + for (phy_index = ELINK_INT_PHY; phy_index < ELINK_MAX_PHYS; phy_index++) { + actual_phy_idx = phy_index; + if (phy_config_swapped) { + if (phy_index == ELINK_EXT_PHY1) + actual_phy_idx = ELINK_EXT_PHY2; + else if (phy_index == ELINK_EXT_PHY2) + actual_phy_idx = ELINK_EXT_PHY1; + } + PMD_DRV_LOG(DEBUG, "phy_config_swapped %x, phy_index %x," + " actual_phy_idx %x", phy_config_swapped, + phy_index, actual_phy_idx); + phy = ¶ms->phy[actual_phy_idx]; + if (elink_populate_phy(sc, phy_index, params->shmem_base, + params->shmem2_base, params->port, + phy) != ELINK_STATUS_OK) { + params->num_phys = 0; + PMD_DRV_LOG(DEBUG, "phy probe failed in phy index %d", + phy_index); + for (phy_index = ELINK_INT_PHY; + phy_index < ELINK_MAX_PHYS; phy_index++) + *phy = phy_null; + return ELINK_STATUS_ERROR; + } + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) + break; + + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET) + phy->flags &= ~ELINK_FLAGS_TX_ERROR_CHECK; + + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_MT_SUPPORT)) + phy->flags |= ELINK_FLAGS_MDC_MDIO_WA_G; + + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].media_type); + media_types = REG_RD(sc, sync_offset); + + /* Update media type for non-PMF sync only for the first time + * In case the media type changes afterwards, it will be updated + * using the update_status function + */ + if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * + actual_phy_idx))) == 0) { + media_types |= ((phy->media_type & + PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * + actual_phy_idx)); + } + REG_WR(sc, sync_offset, media_types); + + elink_phy_def_cfg(params, phy, phy_index); + params->num_phys++; + } + + PMD_DRV_LOG(DEBUG, "End phy probe. #phys found %x", params->num_phys); + return ELINK_STATUS_OK; +} + +#ifdef ELINK_INCLUDE_EMUL +static elink_status_t elink_init_e3_emul_mac(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + vars->line_speed = params->req_line_speed[0]; + /* In case link speed is auto, set speed the highest as possible */ + if (params->req_line_speed[0] == ELINK_SPEED_AUTO_NEG) { + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC) + vars->line_speed = ELINK_SPEED_2500; + else if (elink_is_4_port_mode(sc)) + vars->line_speed = ELINK_SPEED_10000; + else + vars->line_speed = ELINK_SPEED_20000; + } + if (vars->line_speed < ELINK_SPEED_10000) { + if ((params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC)) { + PMD_DRV_LOG(DEBUG, "Invalid line speed %d while UMAC is" + " disabled!", params->req_line_speed[0]); + return ELINK_STATUS_ERROR; + } + switch (vars->line_speed) { + case ELINK_SPEED_10: + vars->link_status = ELINK_LINK_10TFD; + break; + case ELINK_SPEED_100: + vars->link_status = ELINK_LINK_100TXFD; + break; + case ELINK_SPEED_1000: + vars->link_status = ELINK_LINK_1000TFD; + break; + case ELINK_SPEED_2500: + vars->link_status = ELINK_LINK_2500TFD; + break; + default: + PMD_DRV_LOG(DEBUG, "Invalid line speed %d for UMAC", + vars->line_speed); + return ELINK_STATUS_ERROR; + } + vars->link_status |= LINK_STATUS_LINK_UP; + + if (params->loopback_mode == ELINK_LOOPBACK_UMAC) + elink_umac_enable(params, vars, 1); + else + elink_umac_enable(params, vars, 0); + } else { + /* Link speed >= 10000 requires XMAC enabled */ + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC) { + PMD_DRV_LOG(DEBUG, "Invalid line speed %d while XMAC is" + " disabled!", params->req_line_speed[0]); + return ELINK_STATUS_ERROR; + } + /* Check link speed */ + switch (vars->line_speed) { + case ELINK_SPEED_10000: + vars->link_status = ELINK_LINK_10GTFD; + break; + case ELINK_SPEED_20000: + vars->link_status = ELINK_LINK_20GTFD; + break; + default: + PMD_DRV_LOG(DEBUG, "Invalid line speed %d for XMAC", + vars->line_speed); + return ELINK_STATUS_ERROR; + } + vars->link_status |= LINK_STATUS_LINK_UP; + if (params->loopback_mode == ELINK_LOOPBACK_XMAC) + elink_xmac_enable(params, vars, 1); + else + elink_xmac_enable(params, vars, 0); + } + return ELINK_STATUS_OK; +} + +static elink_status_t elink_init_emul(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + if (CHIP_IS_E3(sc)) { + if (elink_init_e3_emul_mac(params, vars) != ELINK_STATUS_OK) + return ELINK_STATUS_ERROR; + } else { + if (params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC) { + vars->line_speed = ELINK_SPEED_1000; + vars->link_status = (LINK_STATUS_LINK_UP | + ELINK_LINK_1000XFD); + if (params->loopback_mode == ELINK_LOOPBACK_EMAC) + elink_emac_enable(params, vars, 1); + else + elink_emac_enable(params, vars, 0); + } else { + vars->line_speed = ELINK_SPEED_10000; + vars->link_status = (LINK_STATUS_LINK_UP | + ELINK_LINK_10GTFD); + if (params->loopback_mode == ELINK_LOOPBACK_BMAC) + elink_bmac_enable(params, vars, 1, 1); + else + elink_bmac_enable(params, vars, 0, 1); + } + } + vars->link_up = 1; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + + if (CHIP_IS_E1x(sc)) + elink_pbf_update(params, vars->flow_ctrl, vars->line_speed); + /* Disable drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); + + /* update shared memory */ + elink_update_mng(params, vars->link_status); + return ELINK_STATUS_OK; +} +#endif +#ifdef ELINK_INCLUDE_FPGA +static elink_status_t elink_init_fpga(struct elink_params *params, + struct elink_vars *vars) +{ + /* Enable on E1.5 FPGA */ + struct bnx2x_softc *sc = params->sc; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->flow_ctrl = (ELINK_FLOW_CTRL_TX | ELINK_FLOW_CTRL_RX); + vars->link_status |= (LINK_STATUS_TX_FLOW_CONTROL_ENABLED | + LINK_STATUS_RX_FLOW_CONTROL_ENABLED); + if (CHIP_IS_E3(sc)) { + vars->line_speed = params->req_line_speed[0]; + switch (vars->line_speed) { + case ELINK_SPEED_AUTO_NEG: + vars->line_speed = ELINK_SPEED_2500; + case ELINK_SPEED_2500: + vars->link_status = ELINK_LINK_2500TFD; + break; + case ELINK_SPEED_1000: + vars->link_status = ELINK_LINK_1000XFD; + break; + case ELINK_SPEED_100: + vars->link_status = ELINK_LINK_100TXFD; + break; + case ELINK_SPEED_10: + vars->link_status = ELINK_LINK_10TFD; + break; + default: + PMD_DRV_LOG(DEBUG, "Invalid link speed %d", + params->req_line_speed[0]); + return ELINK_STATUS_ERROR; + } + vars->link_status |= LINK_STATUS_LINK_UP; + if (params->loopback_mode == ELINK_LOOPBACK_UMAC) + elink_umac_enable(params, vars, 1); + else + elink_umac_enable(params, vars, 0); + } else { + vars->line_speed = ELINK_SPEED_10000; + vars->link_status = (LINK_STATUS_LINK_UP | ELINK_LINK_10GTFD); + if (params->loopback_mode == ELINK_LOOPBACK_EMAC) + elink_emac_enable(params, vars, 1); + else + elink_emac_enable(params, vars, 0); + } + vars->link_up = 1; + + if (CHIP_IS_E1x(sc)) + elink_pbf_update(params, vars->flow_ctrl, vars->line_speed); + /* Disable drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); + + /* Update shared memory */ + elink_update_mng(params, vars->link_status); + return ELINK_STATUS_OK; +} +#endif +static void elink_init_bmac_loopback(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + vars->link_up = 1; + vars->line_speed = ELINK_SPEED_10000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->mac_type = ELINK_MAC_TYPE_BMAC; + + vars->phy_flags = PHY_XGXS_FLAG; + + elink_xgxs_deassert(params); + + /* Set bmac loopback */ + elink_bmac_enable(params, vars, 1, 1); + + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); +} + +static void elink_init_emac_loopback(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + vars->link_up = 1; + vars->line_speed = ELINK_SPEED_1000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->mac_type = ELINK_MAC_TYPE_EMAC; + + vars->phy_flags = PHY_XGXS_FLAG; + + elink_xgxs_deassert(params); + /* Set bmac loopback */ + elink_emac_enable(params, vars, 1); + elink_emac_program(params, vars); + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); +} + +static void elink_init_xmac_loopback(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + vars->link_up = 1; + if (!params->req_line_speed[0]) + vars->line_speed = ELINK_SPEED_10000; + else + vars->line_speed = params->req_line_speed[0]; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->mac_type = ELINK_MAC_TYPE_XMAC; + vars->phy_flags = PHY_XGXS_FLAG; + /* Set WC to loopback mode since link is required to provide clock + * to the XMAC in 20G mode + */ + elink_set_aer_mmd(params, ¶ms->phy[0]); + elink_warpcore_reset_lane(sc, ¶ms->phy[0], 0); + params->phy[ELINK_INT_PHY].config_loopback(¶ms->phy[ELINK_INT_PHY], + params); + + elink_xmac_enable(params, vars, 1); + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); +} + +static void elink_init_umac_loopback(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + vars->link_up = 1; + vars->line_speed = ELINK_SPEED_1000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->mac_type = ELINK_MAC_TYPE_UMAC; + vars->phy_flags = PHY_XGXS_FLAG; + elink_umac_enable(params, vars, 1); + + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); +} + +static void elink_init_xgxs_loopback(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + struct elink_phy *int_phy = ¶ms->phy[ELINK_INT_PHY]; + vars->link_up = 1; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->duplex = DUPLEX_FULL; + if (params->req_line_speed[0] == ELINK_SPEED_1000) + vars->line_speed = ELINK_SPEED_1000; + else if ((params->req_line_speed[0] == ELINK_SPEED_20000) || + (int_phy->flags & ELINK_FLAGS_WC_DUAL_MODE)) + vars->line_speed = ELINK_SPEED_20000; + else + vars->line_speed = ELINK_SPEED_10000; + + if (!USES_WARPCORE(sc)) + elink_xgxs_deassert(params); + elink_link_initialize(params, vars); + + if (params->req_line_speed[0] == ELINK_SPEED_1000) { + if (USES_WARPCORE(sc)) + elink_umac_enable(params, vars, 0); + else { + elink_emac_program(params, vars); + elink_emac_enable(params, vars, 0); + } + } else { + if (USES_WARPCORE(sc)) + elink_xmac_enable(params, vars, 0); + else + elink_bmac_enable(params, vars, 0, 1); + } + + if (params->loopback_mode == ELINK_LOOPBACK_XGXS) { + /* Set 10G XGXS loopback */ + int_phy->config_loopback(int_phy, params); + } else { + /* Set external phy loopback */ + uint8_t phy_index; + for (phy_index = ELINK_EXT_PHY1; + phy_index < params->num_phys; phy_index++) + if (params->phy[phy_index].config_loopback) + params->phy[phy_index].config_loopback(¶ms-> + phy + [phy_index], + params); + } + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); + + elink_set_led(params, vars, ELINK_LED_MODE_OPER, vars->line_speed); +} + +void elink_set_rx_filter(struct elink_params *params, uint8_t en) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t val = en * 0x1F; + + /* Open / close the gate between the NIG and the BRB */ + if (!CHIP_IS_E1x(sc)) + val |= en * 0x20; + REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + params->port * 4, val); + + REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port * 4, en * 0x3); + + REG_WR(sc, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), en); +} + +static elink_status_t elink_avoid_link_flap(struct elink_params *params, + struct elink_vars *vars) +{ + uint32_t phy_idx; + uint32_t dont_clear_stat, lfa_sts; + struct bnx2x_softc *sc = params->sc; + + /* Sync the link parameters */ + elink_link_status_update(params, vars); + + /* + * The module verification was already done by previous link owner, + * so this call is meant only to get warning message + */ + + for (phy_idx = ELINK_INT_PHY; phy_idx < params->num_phys; phy_idx++) { + struct elink_phy *phy = ¶ms->phy[phy_idx]; + if (phy->phy_specific_func) { + PMD_DRV_LOG(DEBUG, "Calling PHY specific func"); + phy->phy_specific_func(phy, params, ELINK_PHY_INIT); + } + if ((phy->media_type == ELINK_ETH_PHY_SFPP_10G_FIBER) || + (phy->media_type == ELINK_ETH_PHY_SFP_1G_FIBER) || + (phy->media_type == ELINK_ETH_PHY_DA_TWINAX)) + elink_verify_sfp_module(phy, params); + } + lfa_sts = REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts)); + + dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT; + + /* Re-enable the NIG/MAC */ + if (CHIP_IS_E3(sc)) { + if (!dont_clear_stat) { + REG_WR(sc, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_MSTAT0 << + params->port)); + REG_WR(sc, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_MSTAT0 << + params->port)); + } + if (vars->line_speed < ELINK_SPEED_10000) + elink_umac_enable(params, vars, 0); + else + elink_xmac_enable(params, vars, 0); + } else { + if (vars->line_speed < ELINK_SPEED_10000) + elink_emac_enable(params, vars, 0); + else + elink_bmac_enable(params, vars, 0, !dont_clear_stat); + } + + /* Increment LFA count */ + lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) | + (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >> + LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff) + << LINK_FLAP_AVOIDANCE_COUNT_OFFSET)); + /* Clear link flap reason */ + lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK; + + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts), lfa_sts); + + /* Disable NIG DRAIN */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); + + /* Enable interrupts */ + elink_link_int_enable(params); + return ELINK_STATUS_OK; +} + +static void elink_cannot_avoid_link_flap(struct elink_params *params, + struct elink_vars *vars, + int lfa_status) +{ + uint32_t lfa_sts, cfg_idx, tmp_val; + struct bnx2x_softc *sc = params->sc; + + elink_link_reset(params, vars, 1); + + if (!params->lfa_base) + return; + /* Store the new link parameters */ + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, req_duplex), + params->req_duplex[0] | (params->req_duplex[1] << 16)); + + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, req_flow_ctrl), + params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16)); + + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, req_line_speed), + params->req_line_speed[0] | (params->req_line_speed[1] << 16)); + + for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) { + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, + speed_cap_mask[cfg_idx]), + params->speed_cap_mask[cfg_idx]); + } + + tmp_val = REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)); + tmp_val &= ~REQ_FC_AUTO_ADV_MASK; + tmp_val |= params->req_fc_auto_adv; + + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, additional_config), tmp_val); + + lfa_sts = REG_RD(sc, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts)); + + /* Clear the "Don't Clear Statistics" bit, and set reason */ + lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT; + + /* Set link flap reason */ + lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK; + lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) << + LFA_LINK_FLAP_REASON_OFFSET); + + /* Increment link flap counter */ + lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) | + (((((lfa_sts & LINK_FLAP_COUNT_MASK) >> + LINK_FLAP_COUNT_OFFSET) + 1) & 0xff) + << LINK_FLAP_COUNT_OFFSET)); + REG_WR(sc, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts), lfa_sts); + /* Proceed with regular link initialization */ +} + +elink_status_t elink_phy_init(struct elink_params *params, + struct elink_vars *vars) +{ + int lfa_status; + struct bnx2x_softc *sc = params->sc; + PMD_DRV_LOG(DEBUG, "Phy Initialization started"); + PMD_DRV_LOG(DEBUG, "(1) req_speed %d, req_flowctrl %d", + params->req_line_speed[0], params->req_flow_ctrl[0]); + PMD_DRV_LOG(DEBUG, "(2) req_speed %d, req_flowctrl %d", + params->req_line_speed[1], params->req_flow_ctrl[1]); + PMD_DRV_LOG(DEBUG, "req_adv_flow_ctrl 0x%x", params->req_fc_auto_adv); + vars->link_status = 0; + vars->phy_link_up = 0; + vars->link_up = 0; + vars->line_speed = 0; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = ELINK_FLOW_CTRL_NONE; + vars->mac_type = ELINK_MAC_TYPE_NONE; + vars->phy_flags = 0; + vars->check_kr2_recovery_cnt = 0; + params->link_flags = ELINK_PHY_INITIALIZED; + /* Driver opens NIG-BRB filters */ + elink_set_rx_filter(params, 1); + /* Check if link flap can be avoided */ + lfa_status = elink_check_lfa(params); + + if (lfa_status == 0) { + PMD_DRV_LOG(DEBUG, "Link Flap Avoidance in progress"); + return elink_avoid_link_flap(params, vars); + } + + PMD_DRV_LOG(DEBUG, "Cannot avoid link flap lfa_sta=0x%x", lfa_status); + elink_cannot_avoid_link_flap(params, vars, lfa_status); + + /* Disable attentions */ + elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 + params->port * 4, + (ELINK_NIG_MASK_XGXS0_LINK_STATUS | + ELINK_NIG_MASK_XGXS0_LINK10G | + ELINK_NIG_MASK_SERDES0_LINK_STATUS | + ELINK_NIG_MASK_MI_INT)); +#ifdef ELINK_INCLUDE_EMUL + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC)) +#endif + + elink_emac_init(params); + + if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) + vars->link_status |= LINK_STATUS_PFC_ENABLED; + + if ((params->num_phys == 0) && !CHIP_REV_IS_SLOW(sc)) { + PMD_DRV_LOG(DEBUG, "No phy found for initialization !!"); + return ELINK_STATUS_ERROR; + } + set_phy_vars(params, vars); + + PMD_DRV_LOG(DEBUG, "Num of phys on board: %d", params->num_phys); +#ifdef ELINK_INCLUDE_FPGA + if (CHIP_REV_IS_FPGA(sc)) { + return elink_init_fpga(params, vars); + } else +#endif +#ifdef ELINK_INCLUDE_EMUL + if (CHIP_REV_IS_EMUL(sc)) { + return elink_init_emul(params, vars); + } else +#endif + switch (params->loopback_mode) { + case ELINK_LOOPBACK_BMAC: + elink_init_bmac_loopback(params, vars); + break; + case ELINK_LOOPBACK_EMAC: + elink_init_emac_loopback(params, vars); + break; + case ELINK_LOOPBACK_XMAC: + elink_init_xmac_loopback(params, vars); + break; + case ELINK_LOOPBACK_UMAC: + elink_init_umac_loopback(params, vars); + break; + case ELINK_LOOPBACK_XGXS: + case ELINK_LOOPBACK_EXT_PHY: + elink_init_xgxs_loopback(params, vars); + break; + default: + if (!CHIP_IS_E3(sc)) { + if (params->switch_cfg == ELINK_SWITCH_CFG_10G) + elink_xgxs_deassert(params); + else + elink_serdes_deassert(sc, params->port); + } + elink_link_initialize(params, vars); + DELAY(1000 * 30); + elink_link_int_enable(params); + break; + } + elink_update_mng(params, vars->link_status); + + elink_update_mng_eee(params, vars->eee_status); + return ELINK_STATUS_OK; +} + +static elink_status_t elink_link_reset(struct elink_params *params, + struct elink_vars *vars, + uint8_t reset_ext_phy) +{ + struct bnx2x_softc *sc = params->sc; + uint8_t phy_index, port = params->port, clear_latch_ind = 0; + PMD_DRV_LOG(DEBUG, "Resetting the link of port %d", port); + /* Disable attentions */ + vars->link_status = 0; + elink_update_mng(params, vars->link_status); + vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | + SHMEM_EEE_ACTIVE_BIT); + elink_update_mng_eee(params, vars->eee_status); + elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, + (ELINK_NIG_MASK_XGXS0_LINK_STATUS | + ELINK_NIG_MASK_XGXS0_LINK10G | + ELINK_NIG_MASK_SERDES0_LINK_STATUS | + ELINK_NIG_MASK_MI_INT)); + + /* Activate nig drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + port * 4, 1); + + /* Disable nig egress interface */ + if (!CHIP_IS_E3(sc)) { + REG_WR(sc, NIG_REG_BMAC0_OUT_EN + port * 4, 0); + REG_WR(sc, NIG_REG_EGRESS_EMAC0_OUT_EN + port * 4, 0); + } +#ifdef ELINK_INCLUDE_EMUL + /* Stop BigMac rx */ + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC)) +#endif + if (!CHIP_IS_E3(sc)) + elink_set_bmac_rx(sc, port, 0); +#ifdef ELINK_INCLUDE_EMUL + /* Stop XMAC/UMAC rx */ + if (!(params->feature_config_flags & + ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC)) +#endif + if (CHIP_IS_E3(sc) && !CHIP_REV_IS_FPGA(sc)) { + elink_set_xmac_rxtx(params, 0); + elink_set_umac_rxtx(params, 0); + } + /* Disable emac */ + if (!CHIP_IS_E3(sc)) + REG_WR(sc, NIG_REG_NIG_EMAC0_EN + port * 4, 0); + + DELAY(1000 * 10); + /* The PHY reset is controlled by GPIO 1 + * Hold it as vars low + */ + /* Clear link led */ + elink_set_mdio_emac_per_phy(sc, params); + elink_set_led(params, vars, ELINK_LED_MODE_OFF, 0); + + if (reset_ext_phy && (!CHIP_REV_IS_SLOW(sc))) { + for (phy_index = ELINK_EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + if (params->phy[phy_index].link_reset) { + elink_set_aer_mmd(params, + ¶ms->phy[phy_index]); + params->phy[phy_index].link_reset(¶ms-> + phy + [phy_index], + params); + } + if (params->phy[phy_index].flags & + ELINK_FLAGS_REARM_LATCH_SIGNAL) + clear_latch_ind = 1; + } + } + + if (clear_latch_ind) { + /* Clear latching indication */ + elink_rearm_latch_signal(sc, port, 0); + elink_bits_dis(sc, NIG_REG_LATCH_BC_0 + port * 4, + 1 << ELINK_NIG_LATCH_BC_ENABLE_MI_INT); + } +#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA) + if (!CHIP_REV_IS_SLOW(sc)) +#endif + if (params->phy[ELINK_INT_PHY].link_reset) + params->phy[ELINK_INT_PHY].link_reset(¶ms-> + phy + [ELINK_INT_PHY], + params); + + /* Disable nig ingress interface */ + if (!CHIP_IS_E3(sc)) { + /* Reset BigMac */ + REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + REG_WR(sc, NIG_REG_BMAC0_IN_EN + port * 4, 0); + REG_WR(sc, NIG_REG_EMAC0_IN_EN + port * 4, 0); + } else { + uint32_t xmac_base = + (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + elink_set_xumac_nig(params, 0, 0); + if (REG_RD(sc, MISC_REG_RESET_REG_2) & + MISC_REGISTERS_RESET_REG_2_XMAC) + REG_WR(sc, xmac_base + XMAC_REG_CTRL, + XMAC_CTRL_REG_SOFT_RESET); + } + vars->link_up = 0; + vars->phy_flags = 0; + return ELINK_STATUS_OK; +} + +elink_status_t elink_lfa_reset(struct elink_params * params, + struct elink_vars * vars) +{ + struct bnx2x_softc *sc = params->sc; + vars->link_up = 0; + vars->phy_flags = 0; + params->link_flags &= ~ELINK_PHY_INITIALIZED; + if (!params->lfa_base) + return elink_link_reset(params, vars, 1); + /* + * Activate NIG drain so that during this time the device won't send + * anything while it is unable to response. + */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 1); + + /* + * Close gracefully the gate from BMAC to NIG such that no half packets + * are passed. + */ + if (!CHIP_IS_E3(sc)) + elink_set_bmac_rx(sc, params->port, 0); + + if (CHIP_IS_E3(sc)) { + elink_set_xmac_rxtx(params, 0); + elink_set_umac_rxtx(params, 0); + } + /* Wait 10ms for the pipe to clean up */ + DELAY(1000 * 10); + + /* Clean the NIG-BRB using the network filters in a way that will + * not cut a packet in the middle. + */ + elink_set_rx_filter(params, 0); + + /* + * Re-open the gate between the BMAC and the NIG, after verifying the + * gate to the BRB is closed, otherwise packets may arrive to the + * firmware before driver had initialized it. The target is to achieve + * minimum management protocol down time. + */ + if (!CHIP_IS_E3(sc)) + elink_set_bmac_rx(sc, params->port, 1); + + if (CHIP_IS_E3(sc)) { + elink_set_xmac_rxtx(params, 1); + elink_set_umac_rxtx(params, 1); + } + /* Disable NIG drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); + return ELINK_STATUS_OK; +} + +/****************************************************************************/ +/* Common function */ +/****************************************************************************/ +static elink_status_t elink_8073_common_init_phy(struct bnx2x_softc *sc, + uint32_t shmem_base_path[], + uint32_t shmem2_base_path[], + uint8_t phy_index, + __rte_unused uint32_t chip_id) +{ + struct elink_phy phy[PORT_MAX]; + struct elink_phy *phy_blk[PORT_MAX]; + uint16_t val; + int8_t port = 0; + int8_t port_of_path = 0; + uint32_t swap_val, swap_override; + swap_val = REG_RD(sc, NIG_REG_PORT_SWAP); + swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE); + port ^= (swap_val && swap_override); + elink_ext_phy_hw_reset(sc, port); + /* PART1 - Reset both phys */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + uint32_t shmem_base, shmem2_base; + /* In E2, same phy is using for port0 of the two paths */ + if (CHIP_IS_E1x(sc)) { + shmem_base = shmem_base_path[0]; + shmem2_base = shmem2_base_path[0]; + port_of_path = port; + } else { + shmem_base = shmem_base_path[port]; + shmem2_base = shmem2_base_path[port]; + port_of_path = 0; + } + + /* Extract the ext phy address for the port */ + if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base, + port_of_path, &phy[port]) != + ELINK_STATUS_OK) { + PMD_DRV_LOG(DEBUG, "populate_phy failed"); + return ELINK_STATUS_ERROR; + } + /* Disable attentions */ + elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 + + port_of_path * 4, + (ELINK_NIG_MASK_XGXS0_LINK_STATUS | + ELINK_NIG_MASK_XGXS0_LINK10G | + ELINK_NIG_MASK_SERDES0_LINK_STATUS | + ELINK_NIG_MASK_MI_INT)); + + /* Need to take the phy out of low power mode in order + * to write to access its registers + */ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); + + /* Reset the phy */ + elink_cl45_write(sc, &phy[port], + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15); + } + + /* Add delay of 150ms after reset */ + DELAY(1000 * 150); + + if (phy[PORT_0].addr & 0x1) { + phy_blk[PORT_0] = &(phy[PORT_1]); + phy_blk[PORT_1] = &(phy[PORT_0]); + } else { + phy_blk[PORT_0] = &(phy[PORT_0]); + phy_blk[PORT_1] = &(phy[PORT_1]); + } + + /* PART2 - Download firmware to both phys */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + if (CHIP_IS_E1x(sc)) + port_of_path = port; + else + port_of_path = 0; + + PMD_DRV_LOG(DEBUG, "Loading spirom for phy address 0x%x", + phy_blk[port]->addr); + if (elink_8073_8727_external_rom_boot(sc, phy_blk[port], + port_of_path)) + return ELINK_STATUS_ERROR; + + /* Only set bit 10 = 1 (Tx power down) */ + elink_cl45_read(sc, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, &val); + + /* Phase1 of TX_POWER_DOWN reset */ + elink_cl45_write(sc, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, (val | 1 << 10)); + } + + /* Toggle Transmitter: Power down and then up with 600ms delay + * between + */ + DELAY(1000 * 600); + + /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + /* Phase2 of POWER_DOWN_RESET */ + /* Release bit 10 (Release Tx power down) */ + elink_cl45_read(sc, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, &val); + + elink_cl45_write(sc, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, + (val & (~(1 << 10)))); + DELAY(1000 * 15); + + /* Read modify write the SPI-ROM version select register */ + elink_cl45_read(sc, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_EDC_FFE_MAIN, &val); + elink_cl45_write(sc, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1 << 12))); + + /* set GPIO2 back to LOW */ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + } + return ELINK_STATUS_OK; +} + +static elink_status_t elink_8726_common_init_phy(struct bnx2x_softc *sc, + uint32_t shmem_base_path[], + uint32_t shmem2_base_path[], + uint8_t phy_index, + __rte_unused uint32_t chip_id) +{ + uint32_t val; + int8_t port; + struct elink_phy phy; + /* Use port1 because of the static port-swap */ + /* Enable the module detection interrupt */ + val = REG_RD(sc, MISC_REG_GPIO_EVENT_EN); + val |= ((1 << MISC_REGISTERS_GPIO_3) | + (1 << + (MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT))); + REG_WR(sc, MISC_REG_GPIO_EVENT_EN, val); + + elink_ext_phy_hw_reset(sc, 0); + DELAY(1000 * 5); + for (port = 0; port < PORT_MAX; port++) { + uint32_t shmem_base, shmem2_base; + + /* In E2, same phy is using for port0 of the two paths */ + if (CHIP_IS_E1x(sc)) { + shmem_base = shmem_base_path[0]; + shmem2_base = shmem2_base_path[0]; + } else { + shmem_base = shmem_base_path[port]; + shmem2_base = shmem2_base_path[port]; + } + /* Extract the ext phy address for the port */ + if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base, + port, &phy) != ELINK_STATUS_OK) { + PMD_DRV_LOG(DEBUG, "populate phy failed"); + return ELINK_STATUS_ERROR; + } + + /* Reset phy */ + elink_cl45_write(sc, &phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001); + + /* Set fault module detected LED on */ + elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_0, + MISC_REGISTERS_GPIO_HIGH, port); + } + + return ELINK_STATUS_OK; +} + +static void elink_get_ext_phy_reset_gpio(struct bnx2x_softc *sc, + uint32_t shmem_base, uint8_t * io_gpio, + uint8_t * io_port) +{ + + uint32_t phy_gpio_reset = REG_RD(sc, shmem_base + + offsetof(struct shmem_region, + dev_info. + port_hw_config[PORT_0]. + default_cfg)); + switch (phy_gpio_reset) { + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0: + *io_gpio = 0; + *io_port = 0; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0: + *io_gpio = 1; + *io_port = 0; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0: + *io_gpio = 2; + *io_port = 0; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0: + *io_gpio = 3; + *io_port = 0; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1: + *io_gpio = 0; + *io_port = 1; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1: + *io_gpio = 1; + *io_port = 1; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1: + *io_gpio = 2; + *io_port = 1; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1: + *io_gpio = 3; + *io_port = 1; + break; + default: + /* Don't override the io_gpio and io_port */ + break; + } +} + +static elink_status_t elink_8727_common_init_phy(struct bnx2x_softc *sc, + uint32_t shmem_base_path[], + uint32_t shmem2_base_path[], + uint8_t phy_index, + __rte_unused uint32_t chip_id) +{ + int8_t port, reset_gpio; + uint32_t swap_val, swap_override; + struct elink_phy phy[PORT_MAX]; + struct elink_phy *phy_blk[PORT_MAX]; + int8_t port_of_path; + swap_val = REG_RD(sc, NIG_REG_PORT_SWAP); + swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE); + + reset_gpio = MISC_REGISTERS_GPIO_1; + port = 1; + + /* Retrieve the reset gpio/port which control the reset. + * Default is GPIO1, PORT1 + */ + elink_get_ext_phy_reset_gpio(sc, shmem_base_path[0], + (uint8_t *) & reset_gpio, + (uint8_t *) & port); + + /* Calculate the port based on port swap */ + port ^= (swap_val && swap_override); + + /* Initiate PHY reset */ + elink_cb_gpio_write(sc, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW, + port); + DELAY(1000 * 1); + elink_cb_gpio_write(sc, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH, + port); + + DELAY(1000 * 5); + + /* PART1 - Reset both phys */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + uint32_t shmem_base, shmem2_base; + + /* In E2, same phy is using for port0 of the two paths */ + if (CHIP_IS_E1x(sc)) { + shmem_base = shmem_base_path[0]; + shmem2_base = shmem2_base_path[0]; + port_of_path = port; + } else { + shmem_base = shmem_base_path[port]; + shmem2_base = shmem2_base_path[port]; + port_of_path = 0; + } + + /* Extract the ext phy address for the port */ + if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base, + port_of_path, &phy[port]) != + ELINK_STATUS_OK) { + PMD_DRV_LOG(DEBUG, "populate phy failed"); + return ELINK_STATUS_ERROR; + } + /* disable attentions */ + elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 + + port_of_path * 4, + (ELINK_NIG_MASK_XGXS0_LINK_STATUS | + ELINK_NIG_MASK_XGXS0_LINK10G | + ELINK_NIG_MASK_SERDES0_LINK_STATUS | + ELINK_NIG_MASK_MI_INT)); + + /* Reset the phy */ + elink_cl45_write(sc, &phy[port], + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15); + } + + /* Add delay of 150ms after reset */ + DELAY(1000 * 150); + if (phy[PORT_0].addr & 0x1) { + phy_blk[PORT_0] = &(phy[PORT_1]); + phy_blk[PORT_1] = &(phy[PORT_0]); + } else { + phy_blk[PORT_0] = &(phy[PORT_0]); + phy_blk[PORT_1] = &(phy[PORT_1]); + } + /* PART2 - Download firmware to both phys */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + if (CHIP_IS_E1x(sc)) + port_of_path = port; + else + port_of_path = 0; + PMD_DRV_LOG(DEBUG, "Loading spirom for phy address 0x%x", + phy_blk[port]->addr); + if (elink_8073_8727_external_rom_boot(sc, phy_blk[port], + port_of_path)) + return ELINK_STATUS_ERROR; + /* Disable PHY transmitter output */ + elink_cl45_write(sc, phy_blk[port], + MDIO_PMA_DEVAD, MDIO_PMA_REG_TX_DISABLE, 1); + + } + return ELINK_STATUS_OK; +} + +static elink_status_t elink_84833_common_init_phy(struct bnx2x_softc *sc, + uint32_t shmem_base_path[], + __rte_unused uint32_t + shmem2_base_path[], + __rte_unused uint8_t + phy_index, uint32_t chip_id) +{ + uint8_t reset_gpios; + reset_gpios = elink_84833_get_reset_gpios(sc, shmem_base_path, chip_id); + elink_cb_gpio_mult_write(sc, reset_gpios, + MISC_REGISTERS_GPIO_OUTPUT_LOW); + DELAY(10); + elink_cb_gpio_mult_write(sc, reset_gpios, + MISC_REGISTERS_GPIO_OUTPUT_HIGH); + PMD_DRV_LOG(DEBUG, "84833 reset pulse on pin values 0x%x", reset_gpios); + return ELINK_STATUS_OK; +} + +static elink_status_t elink_ext_phy_common_init(struct bnx2x_softc *sc, + uint32_t shmem_base_path[], + uint32_t shmem2_base_path[], + uint8_t phy_index, + uint32_t ext_phy_type, + uint32_t chip_id) +{ + elink_status_t rc = ELINK_STATUS_OK; + + switch (ext_phy_type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8073: + rc = elink_8073_common_init_phy(sc, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8722: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727_NOC: + rc = elink_8727_common_init_phy(sc, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726: + /* GPIO1 affects both ports, so there's need to pull + * it for single port alone + */ + rc = elink_8726_common_init_phy(sc, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84834: + /* GPIO3's are linked, and so both need to be toggled + * to obtain required 2us pulse. + */ + rc = elink_84833_common_init_phy(sc, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: + rc = ELINK_STATUS_ERROR; + break; + default: + PMD_DRV_LOG(DEBUG, + "ext_phy 0x%x common init not required", + ext_phy_type); + break; + } + + if (rc != ELINK_STATUS_OK) + elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, 0); // "Warning: PHY was not initialized," + // " Port %d", + + return rc; +} + +elink_status_t elink_common_init_phy(struct bnx2x_softc * sc, + uint32_t shmem_base_path[], + uint32_t shmem2_base_path[], + uint32_t chip_id, + __rte_unused uint8_t one_port_enabled) +{ + elink_status_t rc = ELINK_STATUS_OK; + uint32_t phy_ver, val; + uint8_t phy_index = 0; + uint32_t ext_phy_type, ext_phy_config; +#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA) + if (CHIP_REV_IS_EMUL(sc) || CHIP_REV_IS_FPGA(sc)) + return ELINK_STATUS_OK; +#endif + + elink_set_mdio_clk(sc, GRCBASE_EMAC0); + elink_set_mdio_clk(sc, GRCBASE_EMAC1); + PMD_DRV_LOG(DEBUG, "Begin common phy init"); + if (CHIP_IS_E3(sc)) { + /* Enable EPIO */ + val = REG_RD(sc, MISC_REG_GEN_PURP_HWG); + REG_WR(sc, MISC_REG_GEN_PURP_HWG, val | 1); + } + /* Check if common init was already done */ + phy_ver = REG_RD(sc, shmem_base_path[0] + + offsetof(struct shmem_region, + port_mb[PORT_0].ext_phy_fw_version)); + if (phy_ver) { + PMD_DRV_LOG(DEBUG, "Not doing common init; phy ver is 0x%x", + phy_ver); + return ELINK_STATUS_OK; + } + + /* Read the ext_phy_type for arbitrary port(0) */ + for (phy_index = ELINK_EXT_PHY1; phy_index < ELINK_MAX_PHYS; + phy_index++) { + ext_phy_config = elink_get_ext_phy_config(sc, + shmem_base_path[0], + phy_index, 0); + ext_phy_type = ELINK_XGXS_EXT_PHY_TYPE(ext_phy_config); + rc |= elink_ext_phy_common_init(sc, shmem_base_path, + shmem2_base_path, + phy_index, ext_phy_type, + chip_id); + } + return rc; +} + +static void elink_check_over_curr(struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t cfg_pin; + uint8_t port = params->port; + uint32_t pin_val; + + cfg_pin = (REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port]. + e3_cmn_pin_cfg1)) & + PORT_HW_CFG_E3_OVER_CURRENT_MASK) >> + PORT_HW_CFG_E3_OVER_CURRENT_SHIFT; + + /* Ignore check if no external input PIN available */ + if (elink_get_cfg_pin(sc, cfg_pin, &pin_val) != ELINK_STATUS_OK) + return; + + if (!pin_val) { + if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) { + elink_cb_event_log(sc, ELINK_LOG_ID_OVER_CURRENT, params->port); //"Error: Power fault on Port %d has" + // " been detected and the power to " + // "that SFP+ module has been removed" + // " to prevent failure of the card." + // " Please remove the SFP+ module and" + // " restart the system to clear this" + // " error.", + vars->phy_flags |= PHY_OVER_CURRENT_FLAG; + elink_warpcore_power_module(params, 0); + } + } else + vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; +} + +/* Returns 0 if no change occured since last check; 1 otherwise. */ +static uint8_t elink_analyze_link_error(struct elink_params *params, + struct elink_vars *vars, + uint32_t status, uint32_t phy_flag, + uint32_t link_flag, uint8_t notify) +{ + struct bnx2x_softc *sc = params->sc; + /* Compare new value with previous value */ + uint8_t led_mode; + uint32_t old_status = (vars->phy_flags & phy_flag) ? 1 : 0; + + if ((status ^ old_status) == 0) + return 0; + + /* If values differ */ + switch (phy_flag) { + case PHY_HALF_OPEN_CONN_FLAG: + PMD_DRV_LOG(DEBUG, "Analyze Remote Fault"); + break; + case PHY_SFP_TX_FAULT_FLAG: + PMD_DRV_LOG(DEBUG, "Analyze TX Fault"); + break; + default: + PMD_DRV_LOG(DEBUG, "Analyze UNKNOWN"); + } + PMD_DRV_LOG(DEBUG, "Link changed:[%x %x]->%x", vars->link_up, + old_status, status); + + /* a. Update shmem->link_status accordingly + * b. Update elink_vars->link_up + */ + if (status) { + vars->link_status &= ~LINK_STATUS_LINK_UP; + vars->link_status |= link_flag; + vars->link_up = 0; + vars->phy_flags |= phy_flag; + + /* activate nig drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 1); + /* Set LED mode to off since the PHY doesn't know about these + * errors + */ + led_mode = ELINK_LED_MODE_OFF; + } else { + vars->link_status |= LINK_STATUS_LINK_UP; + vars->link_status &= ~link_flag; + vars->link_up = 1; + vars->phy_flags &= ~phy_flag; + led_mode = ELINK_LED_MODE_OPER; + + /* Clear nig drain */ + REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); + } + elink_sync_link(params, vars); + /* Update the LED according to the link state */ + elink_set_led(params, vars, led_mode, ELINK_SPEED_10000); + + /* Update link status in the shared memory */ + elink_update_mng(params, vars->link_status); + + /* C. Trigger General Attention */ + vars->periodic_flags |= ELINK_PERIODIC_FLAGS_LINK_EVENT; + if (notify) + elink_cb_notify_link_changed(sc); + + return 1; +} + +/****************************************************************************** +* Description: +* This function checks for half opened connection change indication. +* When such change occurs, it calls the elink_analyze_link_error +* to check if Remote Fault is set or cleared. Reception of remote fault +* status message in the MAC indicates that the peer's MAC has detected +* a fault, for example, due to break in the TX side of fiber. +* +******************************************************************************/ +static elink_status_t elink_check_half_open_conn(struct elink_params *params, + struct elink_vars *vars, + uint8_t notify) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t lss_status = 0; + uint32_t mac_base; + /* In case link status is physically up @ 10G do */ + if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) || + (REG_RD(sc, NIG_REG_EGRESS_EMAC0_PORT + params->port * 4))) + return ELINK_STATUS_OK; + + if (CHIP_IS_E3(sc) && + (REG_RD(sc, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_XMAC))) { + /* Check E3 XMAC */ + /* Note that link speed cannot be queried here, since it may be + * zero while link is down. In case UMAC is active, LSS will + * simply not be set + */ + mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + /* Clear stick bits (Requires rising edge) */ + REG_WR(sc, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); + REG_WR(sc, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS | + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS); + if (REG_RD(sc, mac_base + XMAC_REG_RX_LSS_STATUS)) + lss_status = 1; + + elink_analyze_link_error(params, vars, lss_status, + PHY_HALF_OPEN_CONN_FLAG, + LINK_STATUS_NONE, notify); + } else if (REG_RD(sc, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { + /* Check E1X / E2 BMAC */ + uint32_t lss_status_reg; + uint32_t wb_data[2]; + mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + /* Read BIGMAC_REGISTER_RX_LSS_STATUS */ + if (CHIP_IS_E2(sc)) + lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT; + else + lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS; + + REG_RD_DMAE(sc, mac_base + lss_status_reg, wb_data, 2); + lss_status = (wb_data[0] > 0); + + elink_analyze_link_error(params, vars, lss_status, + PHY_HALF_OPEN_CONN_FLAG, + LINK_STATUS_NONE, notify); + } + return ELINK_STATUS_OK; +} + +static void elink_sfp_tx_fault_detection(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) +{ + struct bnx2x_softc *sc = params->sc; + uint32_t cfg_pin, value = 0; + uint8_t led_change, port = params->port; + + /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */ + cfg_pin = (REG_RD(sc, params->shmem_base + offsetof(struct shmem_region, + dev_info. + port_hw_config + [port]. + e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_TX_FAULT_MASK) >> + PORT_HW_CFG_E3_TX_FAULT_SHIFT; + + if (elink_get_cfg_pin(sc, cfg_pin, &value)) { + PMD_DRV_LOG(DEBUG, "Failed to read pin 0x%02x", cfg_pin); + return; + } + + led_change = elink_analyze_link_error(params, vars, value, + PHY_SFP_TX_FAULT_FLAG, + LINK_STATUS_SFP_TX_FAULT, 1); + + if (led_change) { + /* Change TX_Fault led, set link status for further syncs */ + uint8_t led_mode; + + if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) { + led_mode = MISC_REGISTERS_GPIO_HIGH; + vars->link_status |= LINK_STATUS_SFP_TX_FAULT; + } else { + led_mode = MISC_REGISTERS_GPIO_LOW; + vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT; + } + + /* If module is unapproved, led should be on regardless */ + if (!(phy->flags & ELINK_FLAGS_SFP_NOT_APPROVED)) { + PMD_DRV_LOG(DEBUG, "Change TX_Fault LED: ->%x", + led_mode); + elink_set_e3_module_fault_led(params, led_mode); + } + } +} + +static void elink_kr2_recovery(struct elink_params *params, + struct elink_vars *vars, struct elink_phy *phy) +{ + PMD_DRV_LOG(DEBUG, "KR2 recovery"); + + elink_warpcore_enable_AN_KR2(phy, params, vars); + elink_warpcore_restart_AN_KR(phy, params); +} + +static void elink_check_kr2_wa(struct elink_params *params, + struct elink_vars *vars, struct elink_phy *phy) +{ + struct bnx2x_softc *sc = params->sc; + uint16_t base_page, next_page, not_kr2_device, lane; + int sigdet; + + /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery + * Since some switches tend to reinit the AN process and clear the + * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled + * and recovered many times + */ + if (vars->check_kr2_recovery_cnt > 0) { + vars->check_kr2_recovery_cnt--; + return; + } + + sigdet = elink_warpcore_get_sigdet(phy, params); + if (!sigdet) { + if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + elink_kr2_recovery(params, vars, phy); + PMD_DRV_LOG(DEBUG, "No sigdet"); + } + return; + } + + lane = elink_get_warpcore_lane(params); + CL22_WR_OVER_CL45(sc, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, lane); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &base_page); + elink_cl45_read(sc, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG2, &next_page); + elink_set_aer_mmd(params, phy); + + /* CL73 has not begun yet */ + if (base_page == 0) { + if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + elink_kr2_recovery(params, vars, phy); + PMD_DRV_LOG(DEBUG, "No BP"); + } + return; + } + + /* In case NP bit is not set in the BasePage, or it is set, + * but only KX is advertised, declare this link partner as non-KR2 + * device. + */ + not_kr2_device = (((base_page & 0x8000) == 0) || + (((base_page & 0x8000) && + ((next_page & 0xe0) == 0x2)))); + + /* In case KR2 is already disabled, check if we need to re-enable it */ + if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + if (!not_kr2_device) { + PMD_DRV_LOG(DEBUG, "BP=0x%x, NP=0x%x", base_page, + next_page); + elink_kr2_recovery(params, vars, phy); + } + return; + } + /* KR2 is enabled, but not KR2 device */ + if (not_kr2_device) { + /* Disable KR2 on both lanes */ + PMD_DRV_LOG(DEBUG, "BP=0x%x, NP=0x%x", base_page, next_page); + elink_disable_kr2(params, vars, phy); + /* Restart AN on leading lane */ + elink_warpcore_restart_AN_KR(phy, params); + return; + } +} + +void elink_period_func(struct elink_params *params, struct elink_vars *vars) +{ + uint16_t phy_idx; + struct bnx2x_softc *sc = params->sc; + for (phy_idx = ELINK_INT_PHY; phy_idx < ELINK_MAX_PHYS; phy_idx++) { + if (params->phy[phy_idx].flags & ELINK_FLAGS_TX_ERROR_CHECK) { + elink_set_aer_mmd(params, ¶ms->phy[phy_idx]); + if (elink_check_half_open_conn(params, vars, 1) != + ELINK_STATUS_OK) { + PMD_DRV_LOG(DEBUG, "Fault detection failed"); + } + break; + } + } + + if (CHIP_IS_E3(sc)) { + struct elink_phy *phy = ¶ms->phy[ELINK_INT_PHY]; + elink_set_aer_mmd(params, phy); + if ((phy->supported & ELINK_SUPPORTED_20000baseKR2_Full) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) + elink_check_kr2_wa(params, vars, phy); + elink_check_over_curr(params, vars); + if (vars->rx_tx_asic_rst) + elink_warpcore_config_runtime(phy, params, vars); + + if ((REG_RD(sc, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port]. + default_cfg)) + & PORT_HW_CFG_NET_SERDES_IF_MASK) == + PORT_HW_CFG_NET_SERDES_IF_SFI) { + if (elink_is_sfp_module_plugged(params)) { + elink_sfp_tx_fault_detection(phy, params, vars); + } else if (vars->link_status & LINK_STATUS_SFP_TX_FAULT) { + /* Clean trail, interrupt corrects the leds */ + vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT; + vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG; + /* Update link status in the shared memory */ + elink_update_mng(params, vars->link_status); + } + } + } +} + +uint8_t elink_fan_failure_det_req(struct bnx2x_softc *sc, + uint32_t shmem_base, + uint32_t shmem2_base, uint8_t port) +{ + uint8_t phy_index, fan_failure_det_req = 0; + struct elink_phy phy; + for (phy_index = ELINK_EXT_PHY1; phy_index < ELINK_MAX_PHYS; + phy_index++) { + if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base, + port, &phy) + != ELINK_STATUS_OK) { + PMD_DRV_LOG(DEBUG, "populate phy failed"); + return 0; + } + fan_failure_det_req |= (phy.flags & + ELINK_FLAGS_FAN_FAILURE_DET_REQ); + } + return fan_failure_det_req; +} + +void elink_hw_reset_phy(struct elink_params *params) +{ + uint8_t phy_index; + struct bnx2x_softc *sc = params->sc; + elink_update_mng(params, 0); + elink_bits_dis(sc, NIG_REG_MASK_INTERRUPT_PORT0 + params->port * 4, + (ELINK_NIG_MASK_XGXS0_LINK_STATUS | + ELINK_NIG_MASK_XGXS0_LINK10G | + ELINK_NIG_MASK_SERDES0_LINK_STATUS | + ELINK_NIG_MASK_MI_INT)); + + for (phy_index = ELINK_INT_PHY; phy_index < ELINK_MAX_PHYS; phy_index++) { + if (params->phy[phy_index].hw_reset) { + params->phy[phy_index].hw_reset(¶ms->phy[phy_index], + params); + params->phy[phy_index] = phy_null; + } + } +} + +void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars, + __rte_unused uint32_t chip_id, uint32_t shmem_base, + uint32_t shmem2_base, uint8_t port) +{ + uint8_t gpio_num = 0xff, gpio_port = 0xff, phy_index; + uint32_t val; + uint32_t offset, aeu_mask, swap_val, swap_override, sync_offset; + if (CHIP_IS_E3(sc)) { + if (elink_get_mod_abs_int_cfg(sc, + shmem_base, + port, + &gpio_num, + &gpio_port) != ELINK_STATUS_OK) + return; + } else { + struct elink_phy phy; + for (phy_index = ELINK_EXT_PHY1; phy_index < ELINK_MAX_PHYS; + phy_index++) { + if (elink_populate_phy(sc, phy_index, shmem_base, + shmem2_base, port, &phy) + != ELINK_STATUS_OK) { + PMD_DRV_LOG(DEBUG, "populate phy failed"); + return; + } + if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726) { + gpio_num = MISC_REGISTERS_GPIO_3; + gpio_port = port; + break; + } + } + } + + if (gpio_num == 0xff) + return; + + /* Set GPIO3 to trigger SFP+ module insertion/removal */ + elink_cb_gpio_write(sc, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, + gpio_port); + + swap_val = REG_RD(sc, NIG_REG_PORT_SWAP); + swap_override = REG_RD(sc, NIG_REG_STRAP_OVERRIDE); + gpio_port ^= (swap_val && swap_override); + + vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 << + (gpio_num + (gpio_port << 2)); + + sync_offset = shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].aeu_int_mask); + REG_WR(sc, sync_offset, vars->aeu_int_mask); + + PMD_DRV_LOG(DEBUG, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x", + gpio_num, gpio_port, vars->aeu_int_mask); + + if (port == 0) + offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; + else + offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0; + + /* Open appropriate AEU for interrupts */ + aeu_mask = REG_RD(sc, offset); + aeu_mask |= vars->aeu_int_mask; + REG_WR(sc, offset, aeu_mask); + + /* Enable the GPIO to trigger interrupt */ + val = REG_RD(sc, MISC_REG_GPIO_EVENT_EN); + val |= 1 << (gpio_num + (gpio_port << 2)); + REG_WR(sc, MISC_REG_GPIO_EVENT_EN, val); +} diff --git a/drivers/net/bnx2x/elink.h b/drivers/net/bnx2x/elink.h new file mode 100644 index 00000000..c4f886a7 --- /dev/null +++ b/drivers/net/bnx2x/elink.h @@ -0,0 +1,588 @@ +/* + * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * + * Eric Davis <edavis@broadcom.com> + * David Christensen <davidch@broadcom.com> + * Gary Zambrano <zambrano@broadcom.com> + * + * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. + * Copyright (c) 2015 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. + */ + +#ifndef ELINK_H +#define ELINK_H + +#define ELINK_DEBUG + + + + + + +/***********************************************************/ +/* CLC Call backs functions */ +/***********************************************************/ +/* CLC device structure */ +struct bnx2x_softc; + +extern uint32_t elink_cb_reg_read(struct bnx2x_softc *sc, uint32_t reg_addr); +extern void elink_cb_reg_write(struct bnx2x_softc *sc, uint32_t reg_addr, uint32_t val); + +/* mode - 0( LOW ) /1(HIGH)*/ +extern uint8_t elink_cb_gpio_write(struct bnx2x_softc *sc, + uint16_t gpio_num, + uint8_t mode, uint8_t port); +extern uint8_t elink_cb_gpio_mult_write(struct bnx2x_softc *sc, + uint8_t pins, + uint8_t mode); + +extern uint32_t elink_cb_gpio_read(struct bnx2x_softc *sc, uint16_t gpio_num, uint8_t port); +extern uint8_t elink_cb_gpio_int_write(struct bnx2x_softc *sc, + uint16_t gpio_num, + uint8_t mode, uint8_t port); + +extern uint32_t elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param); + +/* This function is called every 1024 bytes downloading of phy firmware. +Driver can use it to print to screen indication for download progress */ +extern void elink_cb_download_progress(struct bnx2x_softc *sc, uint32_t cur, uint32_t total); + +/* Each log type has its own parameters */ +typedef enum elink_log_id { + ELINK_LOG_ID_UNQUAL_IO_MODULE = 0, /* uint8_t port, const char* vendor_name, const char* vendor_pn */ + ELINK_LOG_ID_OVER_CURRENT = 1, /* uint8_t port */ + ELINK_LOG_ID_PHY_UNINITIALIZED = 2, /* uint8_t port */ + ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT= 3, /* No params */ + ELINK_LOG_ID_NON_10G_MODULE = 4, /* uint8_t port */ +}elink_log_id_t; + +typedef enum elink_status { + ELINK_STATUS_OK = 0, + ELINK_STATUS_ERROR, + ELINK_STATUS_TIMEOUT, + ELINK_STATUS_NO_LINK, + ELINK_STATUS_INVALID_IMAGE, + ELINK_OP_NOT_SUPPORTED = 122 +} elink_status_t; +extern void elink_cb_event_log(struct bnx2x_softc *sc, const elink_log_id_t log_id, ...); +extern void elink_cb_load_warpcore_microcode(void); + +extern void elink_cb_notify_link_changed(struct bnx2x_softc *sc); + +#define ELINK_EVENT_LOG_LEVEL_ERROR 1 +#define ELINK_EVENT_LOG_LEVEL_WARNING 2 +#define ELINK_EVENT_ID_SFP_UNQUALIFIED_MODULE 1 +#define ELINK_EVENT_ID_SFP_POWER_FAULT 2 + +#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) +/* Debug prints */ + +/***********************************************************/ +/* Defines */ +/***********************************************************/ +#define ELINK_DEFAULT_PHY_DEV_ADDR 3 +#define ELINK_E2_DEFAULT_PHY_DEV_ADDR 5 + + +#define DUPLEX_FULL 1 +#define DUPLEX_HALF 2 + +#define ELINK_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO +#define ELINK_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX +#define ELINK_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX +#define ELINK_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH +#define ELINK_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE + +#define ELINK_NET_SERDES_IF_XFI 1 +#define ELINK_NET_SERDES_IF_SFI 2 +#define ELINK_NET_SERDES_IF_KR 3 +#define ELINK_NET_SERDES_IF_DXGXS 4 + +#define ELINK_SPEED_AUTO_NEG 0 +#define ELINK_SPEED_10 10 +#define ELINK_SPEED_100 100 +#define ELINK_SPEED_1000 1000 +#define ELINK_SPEED_2500 2500 +#define ELINK_SPEED_10000 10000 +#define ELINK_SPEED_20000 20000 + +#define ELINK_I2C_DEV_ADDR_A0 0xa0 +#define ELINK_I2C_DEV_ADDR_A2 0xa2 + +#define ELINK_SFP_EEPROM_PAGE_SIZE 16 +#define ELINK_SFP_EEPROM_VENDOR_NAME_ADDR 0x14 +#define ELINK_SFP_EEPROM_VENDOR_NAME_SIZE 16 +#define ELINK_SFP_EEPROM_VENDOR_OUI_ADDR 0x25 +#define ELINK_SFP_EEPROM_VENDOR_OUI_SIZE 3 +#define ELINK_SFP_EEPROM_PART_NO_ADDR 0x28 +#define ELINK_SFP_EEPROM_PART_NO_SIZE 16 +#define ELINK_SFP_EEPROM_REVISION_ADDR 0x38 +#define ELINK_SFP_EEPROM_REVISION_SIZE 4 +#define ELINK_SFP_EEPROM_SERIAL_ADDR 0x44 +#define ELINK_SFP_EEPROM_SERIAL_SIZE 16 +#define ELINK_SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */ +#define ELINK_SFP_EEPROM_DATE_SIZE 6 +#define ELINK_SFP_EEPROM_DIAG_TYPE_ADDR 0x5c +#define ELINK_SFP_EEPROM_DIAG_TYPE_SIZE 1 +#define ELINK_SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2) +#define ELINK_SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e +#define ELINK_SFP_EEPROM_SFF_8472_COMP_SIZE 1 + +#define ELINK_SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e +#define ELINK_SFP_EEPROM_A2_CC_DMI_ADDR 0x5f + +#define ELINK_PWR_FLT_ERR_MSG_LEN 250 + +#define ELINK_XGXS_EXT_PHY_TYPE(ext_phy_config) \ + ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) +#define ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config) \ + (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \ + PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT) +#define ELINK_SERDES_EXT_PHY_TYPE(ext_phy_config) \ + ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK) + +/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */ +#define ELINK_SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1) +/* Single Media board contains single external phy */ +#define ELINK_SINGLE_MEDIA(params) (params->num_phys == 2) +/* Dual Media board contains two external phy with different media */ +#define ELINK_DUAL_MEDIA(params) (params->num_phys == 3) + +#define ELINK_FW_PARAM_PHY_ADDR_MASK 0x000000FF +#define ELINK_FW_PARAM_PHY_TYPE_MASK 0x0000FF00 +#define ELINK_FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000 +#define ELINK_FW_PARAM_MDIO_CTRL_OFFSET 16 +#define ELINK_FW_PARAM_PHY_ADDR(fw_param) (fw_param & \ + ELINK_FW_PARAM_PHY_ADDR_MASK) +#define ELINK_FW_PARAM_PHY_TYPE(fw_param) (fw_param & \ + ELINK_FW_PARAM_PHY_TYPE_MASK) +#define ELINK_FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \ + ELINK_FW_PARAM_MDIO_CTRL_MASK) >> \ + ELINK_FW_PARAM_MDIO_CTRL_OFFSET) +#define ELINK_FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ + (phy_addr | phy_type | mdio_access << ELINK_FW_PARAM_MDIO_CTRL_OFFSET) + + +#define ELINK_PFC_BRB_FULL_LB_XOFF_THRESHOLD 170 +#define ELINK_PFC_BRB_FULL_LB_XON_THRESHOLD 250 + +#define ELINK_MAXVAL(a, b) (((a) > (b)) ? (a) : (b)) + +#define ELINK_BMAC_CONTROL_RX_ENABLE 2 +/***********************************************************/ +/* Structs */ +/***********************************************************/ +#define ELINK_INT_PHY 0 +#define ELINK_EXT_PHY1 1 +#define ELINK_EXT_PHY2 2 +#define ELINK_MAX_PHYS 3 + +/* Same configuration is shared between the XGXS and the first external phy */ +#define ELINK_LINK_CONFIG_SIZE (ELINK_MAX_PHYS - 1) +#define ELINK_LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == ELINK_INT_PHY) ? \ + 0 : (_phy_idx - 1)) +/***********************************************************/ +/* elink_phy struct */ +/* Defines the required arguments and function per phy */ +/***********************************************************/ +struct elink_vars; +struct elink_params; +struct elink_phy; + +typedef uint8_t (*config_init_t)(struct elink_phy *phy, struct elink_params *params, + struct elink_vars *vars); +typedef uint8_t (*read_status_t)(struct elink_phy *phy, struct elink_params *params, + struct elink_vars *vars); +typedef void (*link_reset_t)(struct elink_phy *phy, + struct elink_params *params); +typedef void (*config_loopback_t)(struct elink_phy *phy, + struct elink_params *params); +typedef uint8_t (*format_fw_ver_t)(uint32_t raw, uint8_t *str, uint16_t *len); +typedef void (*hw_reset_t)(struct elink_phy *phy, struct elink_params *params); +typedef void (*set_link_led_t)(struct elink_phy *phy, + struct elink_params *params, uint8_t mode); +typedef void (*phy_specific_func_t)(struct elink_phy *phy, + struct elink_params *params, uint32_t action); +struct elink_reg_set { + uint8_t devad; + uint16_t reg; + uint16_t val; +}; + +struct elink_phy { + uint32_t type; + + /* Loaded during init */ + uint8_t addr; + uint8_t def_md_devad; + uint16_t flags; + /* No Over-Current detection */ +#define ELINK_FLAGS_NOC (1<<1) + /* Fan failure detection required */ +#define ELINK_FLAGS_FAN_FAILURE_DET_REQ (1<<2) + /* Initialize first the XGXS and only then the phy itself */ +#define ELINK_FLAGS_INIT_XGXS_FIRST (1<<3) +#define ELINK_FLAGS_WC_DUAL_MODE (1<<4) +#define ELINK_FLAGS_4_PORT_MODE (1<<5) +#define ELINK_FLAGS_REARM_LATCH_SIGNAL (1<<6) +#define ELINK_FLAGS_SFP_NOT_APPROVED (1<<7) +#define ELINK_FLAGS_MDC_MDIO_WA (1<<8) +#define ELINK_FLAGS_DUMMY_READ (1<<9) +#define ELINK_FLAGS_MDC_MDIO_WA_B0 (1<<10) +#define ELINK_FLAGS_SFP_MODULE_PLUGGED_IN_WC (1<<11) +#define ELINK_FLAGS_TX_ERROR_CHECK (1<<12) +#define ELINK_FLAGS_EEE (1<<13) +#define ELINK_FLAGS_TEMPERATURE (1<<14) +#define ELINK_FLAGS_MDC_MDIO_WA_G (1<<15) + + /* preemphasis values for the rx side */ + uint16_t rx_preemphasis[4]; + + /* preemphasis values for the tx side */ + uint16_t tx_preemphasis[4]; + + /* EMAC address for access MDIO */ + uint32_t mdio_ctrl; + + uint32_t supported; +#define ELINK_SUPPORTED_10baseT_Half (1<<0) +#define ELINK_SUPPORTED_10baseT_Full (1<<1) +#define ELINK_SUPPORTED_100baseT_Half (1<<2) +#define ELINK_SUPPORTED_100baseT_Full (1<<3) +#define ELINK_SUPPORTED_1000baseT_Full (1<<4) +#define ELINK_SUPPORTED_2500baseX_Full (1<<5) +#define ELINK_SUPPORTED_10000baseT_Full (1<<6) +#define ELINK_SUPPORTED_TP (1<<7) +#define ELINK_SUPPORTED_FIBRE (1<<8) +#define ELINK_SUPPORTED_Autoneg (1<<9) +#define ELINK_SUPPORTED_Pause (1<<10) +#define ELINK_SUPPORTED_Asym_Pause (1<<11) +#define ELINK_SUPPORTED_20000baseMLD2_Full (1<<21) +#define ELINK_SUPPORTED_20000baseKR2_Full (1<<22) + + uint32_t media_type; +#define ELINK_ETH_PHY_UNSPECIFIED 0x0 +#define ELINK_ETH_PHY_SFPP_10G_FIBER 0x1 +#define ELINK_ETH_PHY_XFP_FIBER 0x2 +#define ELINK_ETH_PHY_DA_TWINAX 0x3 +#define ELINK_ETH_PHY_BASE_T 0x4 +#define ELINK_ETH_PHY_SFP_1G_FIBER 0x5 +#define ELINK_ETH_PHY_KR 0xf0 +#define ELINK_ETH_PHY_CX4 0xf1 +#define ELINK_ETH_PHY_NOT_PRESENT 0xff + + /* The address in which version is located*/ + uint32_t ver_addr; + + uint16_t req_flow_ctrl; + + uint16_t req_line_speed; + + uint32_t speed_cap_mask; + + uint16_t req_duplex; + uint16_t rsrv; + /* Called per phy/port init, and it configures LASI, speed, autoneg, + duplex, flow control negotiation, etc. */ + config_init_t config_init; + + /* Called due to interrupt. It determines the link, speed */ + read_status_t read_status; + + /* Called when driver is unloading. Should reset the phy */ + link_reset_t link_reset; + + /* Set the loopback configuration for the phy */ + config_loopback_t config_loopback; + + /* Format the given raw number into str up to len */ + format_fw_ver_t format_fw_ver; + + /* Reset the phy (both ports) */ + hw_reset_t hw_reset; + + /* Set link led mode (on/off/oper)*/ + set_link_led_t set_link_led; + + /* PHY Specific tasks */ + phy_specific_func_t phy_specific_func; +#define ELINK_DISABLE_TX 1 +#define ELINK_ENABLE_TX 2 +#define ELINK_PHY_INIT 3 +}; + +/* Inputs parameters to the CLC */ +struct elink_params { + + uint8_t port; + + /* Default / User Configuration */ + uint8_t loopback_mode; +#define ELINK_LOOPBACK_NONE 0 +#define ELINK_LOOPBACK_EMAC 1 +#define ELINK_LOOPBACK_BMAC 2 +#define ELINK_LOOPBACK_XGXS 3 +#define ELINK_LOOPBACK_EXT_PHY 4 +#define ELINK_LOOPBACK_EXT 5 +#define ELINK_LOOPBACK_UMAC 6 +#define ELINK_LOOPBACK_XMAC 7 + + /* Device parameters */ + uint8_t mac_addr[6]; + + uint16_t req_duplex[ELINK_LINK_CONFIG_SIZE]; + uint16_t req_flow_ctrl[ELINK_LINK_CONFIG_SIZE]; + + uint16_t req_line_speed[ELINK_LINK_CONFIG_SIZE]; /* Also determine AutoNeg */ + + /* shmem parameters */ + uint32_t shmem_base; + uint32_t shmem2_base; + uint32_t speed_cap_mask[ELINK_LINK_CONFIG_SIZE]; + uint32_t switch_cfg; +#define ELINK_SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH +#define ELINK_SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH +#define ELINK_SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT + + uint32_t lane_config; + + /* Phy register parameter */ + uint32_t chip_id; + + /* features */ + uint32_t feature_config_flags; +#define ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) +#define ELINK_FEATURE_CONFIG_PFC_ENABLED (1<<1) +#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) +#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) +#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC (1<<4) +#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC (1<<5) +#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC (1<<6) +#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC (1<<7) +#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX (1<<8) +#define ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9) +#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10) +#define ELINK_FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11) +#define ELINK_FEATURE_CONFIG_IEEE_PHY_TEST (1<<12) +#define ELINK_FEATURE_CONFIG_MT_SUPPORT (1<<13) +#define ELINK_FEATURE_CONFIG_BOOT_FROM_SAN (1<<14) + + /* Will be populated during common init */ + struct elink_phy phy[ELINK_MAX_PHYS]; + + /* Will be populated during common init */ + uint8_t num_phys; + + uint8_t rsrv; + + /* Used to configure the EEE Tx LPI timer, has several modes of + * operation, according to bits 29:28 - + * 2'b00: Timer will be configured by nvram, output will be the value + * from nvram. + * 2'b01: Timer will be configured by nvram, output will be in + * microseconds. + * 2'b10: bits 1:0 contain an nvram value which will be used instead + * of the one located in the nvram. Output will be that value. + * 2'b11: bits 19:0 contain the idle timer in microseconds; output + * will be in microseconds. + * Bits 31:30 should be 2'b11 in order for EEE to be enabled. + */ + uint32_t eee_mode; +#define ELINK_EEE_MODE_NVRAM_BALANCED_TIME (0xa00) +#define ELINK_EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100) +#define ELINK_EEE_MODE_NVRAM_LATENCY_TIME (0x6000) +#define ELINK_EEE_MODE_NVRAM_MASK (0x3) +#define ELINK_EEE_MODE_TIMER_MASK (0xfffff) +#define ELINK_EEE_MODE_OUTPUT_TIME (1<<28) +#define ELINK_EEE_MODE_OVERRIDE_NVRAM (1<<29) +#define ELINK_EEE_MODE_ENABLE_LPI (1<<30) +#define ELINK_EEE_MODE_ADV_LPI (1<<31) + + uint16_t hw_led_mode; /* part of the hw_config read from the shmem */ + uint32_t multi_phy_config; + + /* Device pointer passed to all callback functions */ + struct bnx2x_softc *sc; + uint16_t req_fc_auto_adv; /* Should be set to TX / BOTH when + req_flow_ctrl is set to AUTO */ + uint16_t link_flags; +#define ELINK_LINK_FLAGS_INT_DISABLED (1<<0) +#define ELINK_PHY_INITIALIZED (1<<1) + uint32_t lfa_base; +}; + +/* Output parameters */ +struct elink_vars { + uint8_t phy_flags; +#define PHY_XGXS_FLAG (1<<0) +#define PHY_SGMII_FLAG (1<<1) +#define PHY_PHYSICAL_LINK_FLAG (1<<2) +#define PHY_HALF_OPEN_CONN_FLAG (1<<3) +#define PHY_OVER_CURRENT_FLAG (1<<4) +#define PHY_SFP_TX_FAULT_FLAG (1<<5) + + uint8_t mac_type; +#define ELINK_MAC_TYPE_NONE 0 +#define ELINK_MAC_TYPE_EMAC 1 +#define ELINK_MAC_TYPE_BMAC 2 +#define ELINK_MAC_TYPE_UMAC 3 +#define ELINK_MAC_TYPE_XMAC 4 + + uint8_t phy_link_up; /* internal phy link indication */ + uint8_t link_up; + + uint16_t line_speed; + uint16_t duplex; + + uint16_t flow_ctrl; + uint16_t ieee_fc; + + /* The same definitions as the shmem parameter */ + uint32_t link_status; + uint32_t eee_status; + uint8_t fault_detected; + uint8_t check_kr2_recovery_cnt; +#define ELINK_CHECK_KR2_RECOVERY_CNT 5 + uint16_t periodic_flags; +#define ELINK_PERIODIC_FLAGS_LINK_EVENT 0x0001 + + uint32_t aeu_int_mask; + uint8_t rx_tx_asic_rst; + uint8_t turn_to_run_wc_rt; + uint16_t rsrv2; + /* The same definitions as the shmem2 parameter */ + uint32_t link_attr_sync; +}; + +/***********************************************************/ +/* Functions */ +/***********************************************************/ +elink_status_t elink_phy_init(struct elink_params *params, struct elink_vars *vars); + +/* Reset the link. Should be called when driver or interface goes down + Before calling phy firmware upgrade, the reset_ext_phy should be set + to 0 */ +elink_status_t elink_lfa_reset(struct elink_params *params, struct elink_vars *vars); +/* elink_link_update should be called upon link interrupt */ +elink_status_t elink_link_update(struct elink_params *params, struct elink_vars *vars); + +/* Reads the link_status from the shmem, + and update the link vars accordingly */ +void elink_link_status_update(struct elink_params *input, + struct elink_vars *output); + +/* Set/Unset the led + Basically, the CLC takes care of the led for the link, but in case one needs + to set/unset the led unnaturally, set the "mode" to ELINK_LED_MODE_OPER to + blink the led, and ELINK_LED_MODE_OFF to set the led off.*/ +elink_status_t elink_set_led(struct elink_params *params, + struct elink_vars *vars, uint8_t mode, uint32_t speed); +#define ELINK_LED_MODE_OFF 0 +#define ELINK_LED_MODE_ON 1 +#define ELINK_LED_MODE_OPER 2 +#define ELINK_LED_MODE_FRONT_PANEL_OFF 3 + +/* elink_handle_module_detect_int should be called upon module detection + interrupt */ +void elink_handle_module_detect_int(struct elink_params *params); + +/* One-time initialization for external phy after power up */ +elink_status_t elink_common_init_phy(struct bnx2x_softc *sc, uint32_t shmem_base_path[], + uint32_t shmem2_base_path[], uint32_t chip_id, uint8_t one_port_enabled); + +void elink_hw_reset_phy(struct elink_params *params); + +/* Check swap bit and adjust PHY order */ +uint32_t elink_phy_selection(struct elink_params *params); + +/* Probe the phys on board, and populate them in "params" */ +elink_status_t elink_phy_probe(struct elink_params *params); + +/* Checks if fan failure detection is required on one of the phys on board */ +uint8_t elink_fan_failure_det_req(struct bnx2x_softc *sc, uint32_t shmem_base, + uint32_t shmem2_base, uint8_t port); + +/* Open / close the gate between the NIG and the BRB */ +void elink_set_rx_filter(struct elink_params *params, uint8_t en); + +/* DCBX structs */ + +/* Number of maximum COS per chip */ +#define ELINK_DCBX_E2E3_MAX_NUM_COS (2) +#define ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0 (6) +#define ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1 (3) +#define ELINK_DCBX_E3B0_MAX_NUM_COS ( \ + ELINK_MAXVAL(ELINK_DCBX_E3B0_MAX_NUM_COS_PORT0, \ + ELINK_DCBX_E3B0_MAX_NUM_COS_PORT1)) + +#define ELINK_DCBX_MAX_NUM_COS ( \ + ELINK_MAXVAL(ELINK_DCBX_E3B0_MAX_NUM_COS, \ + ELINK_DCBX_E2E3_MAX_NUM_COS)) + +/* PFC port configuration params */ +struct elink_nig_brb_pfc_port_params { + /* NIG */ + uint32_t pause_enable; + uint32_t llfc_out_en; + uint32_t llfc_enable; + uint32_t pkt_priority_to_cos; + uint8_t num_of_rx_cos_priority_mask; + uint32_t rx_cos_priority_mask[ELINK_DCBX_MAX_NUM_COS]; + uint32_t llfc_high_priority_classes; + uint32_t llfc_low_priority_classes; +}; + + +/* ETS port configuration params */ +struct elink_ets_bw_params { + uint8_t bw; +}; + +struct elink_ets_sp_params { + /** + * valid values are 0 - 5. 0 is highest strict priority. + * There can't be two COS's with the same pri. + */ + uint8_t pri; +}; + +enum elink_cos_state { + elink_cos_state_strict = 0, + elink_cos_state_bw = 1, +}; + +struct elink_ets_cos_params { + enum elink_cos_state state ; + union { + struct elink_ets_bw_params bw_params; + struct elink_ets_sp_params sp_params; + } params; +}; + +struct elink_ets_params { + uint8_t num_of_cos; /* Number of valid COS entries*/ + struct elink_ets_cos_params cos[ELINK_DCBX_MAX_NUM_COS]; +}; + +/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB + * when link is already up + */ +elink_status_t elink_update_pfc(struct elink_params *params, + struct elink_vars *vars, + struct elink_nig_brb_pfc_port_params *pfc_params); + +void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars, + uint32_t chip_id, uint32_t shmem_base, uint32_t shmem2_base, + uint8_t port); + +void elink_period_func(struct elink_params *params, struct elink_vars *vars); + +void elink_enable_pmd_tx(struct elink_params *params); + + + +#endif /* ELINK_H */ diff --git a/drivers/net/bnx2x/rte_pmd_bnx2x_version.map b/drivers/net/bnx2x/rte_pmd_bnx2x_version.map new file mode 100644 index 00000000..bd8138a0 --- /dev/null +++ b/drivers/net/bnx2x/rte_pmd_bnx2x_version.map @@ -0,0 +1,4 @@ +DPDK_2.1 { + + local: *; +}; diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile new file mode 100644 index 00000000..10c794c4 --- /dev/null +++ b/drivers/net/bonding/Makefile @@ -0,0 +1,68 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_bond.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_eth_bond_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_api.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_pmd.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_args.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_8023ad.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_alb.c + +# +# Export include files +# +SYMLINK-y-include += rte_eth_bond.h +SYMLINK-y-include += rte_eth_bond_8023ad.h + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_kvargs +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_cmdline + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/bonding/rte_eth_bond.h b/drivers/net/bonding/rte_eth_bond.h new file mode 100644 index 00000000..8efbf071 --- /dev/null +++ b/drivers/net/bonding/rte_eth_bond.h @@ -0,0 +1,377 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_ETH_BOND_H_ +#define _RTE_ETH_BOND_H_ + +/** + * @file rte_eth_bond.h + * + * RTE Link Bonding Ethernet Device + * Link Bonding for 1GbE and 10GbE ports to allow the aggregation of multiple + * (slave) NICs into a single logical interface. The bonded device processes + * these interfaces based on the mode of operation specified and supported. + * This implementation supports 4 modes of operation round robin, active backup + * balance and broadcast. Providing redundant links, fault tolerance and/or + * load balancing of network ports + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include <rte_ether.h> + +/* Supported modes of operation of link bonding library */ + +#define BONDING_MODE_ROUND_ROBIN (0) +/**< Round Robin (Mode 0). + * In this mode all transmitted packets will be balanced equally across all + * active slaves of the bonded in a round robin fashion. */ +#define BONDING_MODE_ACTIVE_BACKUP (1) +/**< Active Backup (Mode 1). + * In this mode all packets transmitted will be transmitted on the primary + * slave until such point as the primary slave is no longer available and then + * transmitted packets will be sent on the next available slaves. The primary + * slave can be defined by the user but defaults to the first active slave + * available if not specified. */ +#define BONDING_MODE_BALANCE (2) +/**< Balance (Mode 2). + * In this mode all packets transmitted will be balanced across the available + * slaves using one of three available transmit policies - l2, l2+3 or l3+4. + * See BALANCE_XMIT_POLICY macros definitions for further details on transmit + * policies. */ +#define BONDING_MODE_BROADCAST (3) +/**< Broadcast (Mode 3). + * In this mode all transmitted packets will be transmitted on all available + * active slaves of the bonded. */ +#define BONDING_MODE_8023AD (4) +/**< 802.3AD (Mode 4). + * + * This mode provides auto negotiation/configuration + * of peers and well as link status changes monitoring using out of band + * LACP (link aggregation control protocol) messages. For further details of + * LACP specification see the IEEE 802.3ad/802.1AX standards. It is also + * described here + * https://www.kernel.org/doc/Documentation/networking/bonding.txt. + * + * Important Usage Notes: + * - for LACP mode to work the rx/tx burst functions must be invoked + * at least once every 100ms, otherwise the out-of-band LACP messages will not + * be handled with the expected latency and this may cause the link status to be + * incorrectly marked as down or failure to correctly negotiate with peers. + * - For optimal performance during initial handshaking the array of mbufs provided + * to rx_burst should be at least 2 times the slave count size. + * + */ +#define BONDING_MODE_TLB (5) +/**< Adaptive TLB (Mode 5) + * This mode provides an adaptive transmit load balancing. It dynamically + * changes the transmitting slave, according to the computed load. Statistics + * are collected in 100ms intervals and scheduled every 10ms */ +#define BONDING_MODE_ALB (6) +/**< Adaptive Load Balancing (Mode 6) + * This mode includes adaptive TLB and receive load balancing (RLB). In RLB the + * bonding driver intercepts ARP replies send by local system and overwrites its + * source MAC address, so that different peers send data to the server on + * different slave interfaces. When local system sends ARP request, it saves IP + * information from it. When ARP reply from that peer is received, its MAC is + * stored, one of slave MACs assigned and ARP reply send to that peer. + */ + +/* Balance Mode Transmit Policies */ +#define BALANCE_XMIT_POLICY_LAYER2 (0) +/**< Layer 2 (Ethernet MAC) */ +#define BALANCE_XMIT_POLICY_LAYER23 (1) +/**< Layer 2+3 (Ethernet MAC + IP Addresses) transmit load balancing */ +#define BALANCE_XMIT_POLICY_LAYER34 (2) +/**< Layer 3+4 (IP Addresses + UDP Ports) transmit load balancing */ + +/** + * Create a bonded rte_eth_dev device + * + * @param name Name of new link bonding device. + * @param mode Mode to initialize bonding device in. + * @param socket_id Socket Id on which to allocate eth_dev resources. + * + * @return + * Port Id of created rte_eth_dev on success, negative value otherwise + */ +int +rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id); + +/** + * Free a bonded rte_eth_dev device + * + * @param name Name of the link bonding device. + * + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_free(const char *name); + +/** + * Add a rte_eth_dev device as a slave to the bonded device + * + * @param bonded_port_id Port ID of bonded device. + * @param slave_port_id Port ID of slave device. + * + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id); + +/** + * Remove a slave rte_eth_dev device from the bonded device + * + * @param bonded_port_id Port ID of bonded device. + * @param slave_port_id Port ID of slave device. + * + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id); + +/** + * Set link bonding mode of bonded device + * + * @param bonded_port_id Port ID of bonded device. + * @param mode Bonding mode to set + * + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_mode_set(uint8_t bonded_port_id, uint8_t mode); + +/** + * Get link bonding mode of bonded device + * + * @param bonded_port_id Port ID of bonded device. + * + * @return + * link bonding mode on success, negative value otherwise + */ +int +rte_eth_bond_mode_get(uint8_t bonded_port_id); + +/** + * Set slave rte_eth_dev as primary slave of bonded device + * + * @param bonded_port_id Port ID of bonded device. + * @param slave_port_id Port ID of slave device. + * + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id); + +/** + * Get primary slave of bonded device + * + * @param bonded_port_id Port ID of bonded device. + * + * @return + * Port Id of primary slave on success, -1 on failure + */ +int +rte_eth_bond_primary_get(uint8_t bonded_port_id); + +/** + * Populate an array with list of the slaves port id's of the bonded device + * + * @param bonded_port_id Port ID of bonded eth_dev to interrogate + * @param slaves Array to be populated with the current active slaves + * @param len Length of slaves array + * + * @return + * Number of slaves associated with bonded device on success, + * negative value otherwise + */ +int +rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], uint8_t len); + +/** + * Populate an array with list of the active slaves port id's of the bonded + * device. + * + * @param bonded_port_id Port ID of bonded eth_dev to interrogate + * @param slaves Array to be populated with the current active slaves + * @param len Length of slaves array + * + * @return + * Number of active slaves associated with bonded device on success, + * negative value otherwise + */ +int +rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], + uint8_t len); + +/** + * Set explicit MAC address to use on bonded device and it's slaves. + * + * @param bonded_port_id Port ID of bonded device. + * @param mac_addr MAC Address to use on bonded device overriding + * slaves MAC addresses + * + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_mac_address_set(uint8_t bonded_port_id, + struct ether_addr *mac_addr); + +/** + * Reset bonded device to use MAC from primary slave on bonded device and it's + * slaves. + * + * @param bonded_port_id Port ID of bonded device. + * + * @return + * 0 on success, negative value otherwise + */ +int +rte_eth_bond_mac_address_reset(uint8_t bonded_port_id); + +/** + * Set the transmit policy for bonded device to use when it is operating in + * balance mode, this parameter is otherwise ignored in other modes of + * operation. + * + * @param bonded_port_id Port ID of bonded device. + * @param policy Balance mode transmission policy. + * + * @return + * 0 on success, negative value otherwise. + */ +int +rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, uint8_t policy); + +/** + * Get the transmit policy set on bonded device for balance mode operation + * + * @param bonded_port_id Port ID of bonded device. + * + * @return + * Balance transmit policy on success, negative value otherwise. + */ +int +rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id); + +/** + * Set the link monitoring frequency (in ms) for monitoring the link status of + * slave devices + * + * @param bonded_port_id Port ID of bonded device. + * @param internal_ms Monitoring interval in milliseconds + * + * @return + * 0 on success, negative value otherwise. + */ + +int +rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, uint32_t internal_ms); + +/** + * Get the current link monitoring frequency (in ms) for monitoring of the link + * status of slave devices + * + * @param bonded_port_id Port ID of bonded device. + * + * @return + * Monitoring interval on success, negative value otherwise. + */ +int +rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id); + + +/** + * Set the period in milliseconds for delaying the disabling of a bonded link + * when the link down status has been detected + * + * @param bonded_port_id Port ID of bonded device. + * @param delay_ms Delay period in milliseconds. + * + * @return + * 0 on success, negative value otherwise. + */ +int +rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms); + +/** + * Get the period in milliseconds set for delaying the disabling of a bonded + * link when the link down status has been detected + * + * @param bonded_port_id Port ID of bonded device. + * + * @return + * Delay period on success, negative value otherwise. + */ +int +rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id); + +/** + * Set the period in milliseconds for delaying the enabling of a bonded link + * when the link up status has been detected + * + * @param bonded_port_id Port ID of bonded device. + * @param delay_ms Delay period in milliseconds. + * + * @return + * 0 on success, negative value otherwise. + */ +int +rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms); + +/** + * Get the period in milliseconds set for delaying the enabling of a bonded + * link when the link up status has been detected + * + * @param bonded_port_id Port ID of bonded device. + * + * @return + * Delay period on success, negative value otherwise. + */ +int +rte_eth_bond_link_up_prop_delay_get(uint8_t bonded_port_id); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c new file mode 100644 index 00000000..8b4db507 --- /dev/null +++ b/drivers/net/bonding/rte_eth_bond_8023ad.c @@ -0,0 +1,1223 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stddef.h> +#include <string.h> +#include <stdbool.h> + +#include <rte_alarm.h> +#include <rte_malloc.h> +#include <rte_errno.h> +#include <rte_cycles.h> + +#include "rte_eth_bond_private.h" + +#ifdef RTE_LIBRTE_BOND_DEBUG_8023AD +#define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \ + bond_dbg_get_time_diff_ms(), slave_id, \ + __func__, ##__VA_ARGS__) + +static uint64_t start_time; + +static unsigned +bond_dbg_get_time_diff_ms(void) +{ + uint64_t now; + + now = rte_rdtsc(); + if (start_time == 0) + start_time = now; + + return ((now - start_time) * 1000) / rte_get_tsc_hz(); +} + +static void +bond_print_lacp(struct lacpdu *l) +{ + char a_address[18]; + char p_address[18]; + char a_state[256] = { 0 }; + char p_state[256] = { 0 }; + + static const char * const state_labels[] = { + "ACT", "TIMEOUT", "AGG", "SYNC", "COL", "DIST", "DEF", "EXP" + }; + + int a_len = 0; + int p_len = 0; + uint8_t i; + uint8_t *addr; + + addr = l->actor.port_params.system.addr_bytes; + snprintf(a_address, sizeof(a_address), "%02X:%02X:%02X:%02X:%02X:%02X", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + addr = l->partner.port_params.system.addr_bytes; + snprintf(p_address, sizeof(p_address), "%02X:%02X:%02X:%02X:%02X:%02X", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + for (i = 0; i < 8; i++) { + if ((l->actor.state >> i) & 1) { + a_len += snprintf(&a_state[a_len], RTE_DIM(a_state) - a_len, "%s ", + state_labels[i]); + } + + if ((l->partner.state >> i) & 1) { + p_len += snprintf(&p_state[p_len], RTE_DIM(p_state) - p_len, "%s ", + state_labels[i]); + } + } + + if (a_len && a_state[a_len-1] == ' ') + a_state[a_len-1] = '\0'; + + if (p_len && p_state[p_len-1] == ' ') + p_state[p_len-1] = '\0'; + + RTE_LOG(DEBUG, PMD, "LACP: {\n"\ + " subtype= %02X\n"\ + " ver_num=%02X\n"\ + " actor={ tlv=%02X, len=%02X\n"\ + " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\ + " state={ %s }\n"\ + " }\n"\ + " partner={ tlv=%02X, len=%02X\n"\ + " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\ + " state={ %s }\n"\ + " }\n"\ + " collector={info=%02X, length=%02X, max_delay=%04X\n, " \ + "type_term=%02X, terminator_length = %02X}\n",\ + l->subtype,\ + l->version_number,\ + l->actor.tlv_type_info,\ + l->actor.info_length,\ + l->actor.port_params.system_priority,\ + a_address,\ + l->actor.port_params.key,\ + l->actor.port_params.port_priority,\ + l->actor.port_params.port_number,\ + a_state,\ + l->partner.tlv_type_info,\ + l->partner.info_length,\ + l->partner.port_params.system_priority,\ + p_address,\ + l->partner.port_params.key,\ + l->partner.port_params.port_priority,\ + l->partner.port_params.port_number,\ + p_state,\ + l->tlv_type_collector_info,\ + l->collector_info_length,\ + l->collector_max_delay,\ + l->tlv_type_terminator,\ + l->terminator_length); + +} +#define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu) +#else +#define BOND_PRINT_LACP(lacpdu) do { } while (0) +#define MODE4_DEBUG(fmt, ...) do { } while (0) +#endif + +static const struct ether_addr lacp_mac_addr = { + .addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 } +}; + +struct port mode_8023ad_ports[RTE_MAX_ETHPORTS]; + +static void +timer_cancel(uint64_t *timer) +{ + *timer = 0; +} + +static void +timer_set(uint64_t *timer, uint64_t timeout) +{ + *timer = rte_rdtsc() + timeout; +} + +/* Forces given timer to be in expired state. */ +static void +timer_force_expired(uint64_t *timer) +{ + *timer = rte_rdtsc(); +} + +static bool +timer_is_stopped(uint64_t *timer) +{ + return *timer == 0; +} + +static bool +timer_is_expired(uint64_t *timer) +{ + return *timer < rte_rdtsc(); +} + +/* Timer is in running state if it is not stopped nor expired */ +static bool +timer_is_running(uint64_t *timer) +{ + return !timer_is_stopped(timer) && !timer_is_expired(timer); +} + +static void +set_warning_flags(struct port *port, uint16_t flags) +{ + int retval; + uint16_t old; + uint16_t new_flag = 0; + + do { + old = port->warnings_to_show; + new_flag = old | flags; + retval = rte_atomic16_cmpset(&port->warnings_to_show, old, new_flag); + } while (unlikely(retval == 0)); +} + +static void +show_warnings(uint8_t slave_id) +{ + struct port *port = &mode_8023ad_ports[slave_id]; + uint8_t warnings; + + do { + warnings = port->warnings_to_show; + } while (rte_atomic16_cmpset(&port->warnings_to_show, warnings, 0) == 0); + + if (!warnings) + return; + + if (!timer_is_expired(&port->warning_timer)) + return; + + + timer_set(&port->warning_timer, BOND_8023AD_WARNINGS_PERIOD_MS * + rte_get_tsc_hz() / 1000); + + if (warnings & WRN_RX_QUEUE_FULL) { + RTE_LOG(DEBUG, PMD, + "Slave %u: failed to enqueue LACP packet into RX ring.\n" + "Receive and transmit functions must be invoked on bonded\n" + "interface at least 10 times per second or LACP will not\n" + "work correctly\n", slave_id); + } + + if (warnings & WRN_TX_QUEUE_FULL) { + RTE_LOG(DEBUG, PMD, + "Slave %u: failed to enqueue LACP packet into TX ring.\n" + "Receive and transmit functions must be invoked on bonded\n" + "interface at least 10 times per second or LACP will not\n" + "work correctly\n", slave_id); + } + + if (warnings & WRN_RX_MARKER_TO_FAST) + RTE_LOG(INFO, PMD, "Slave %u: marker to early - ignoring.\n", slave_id); + + if (warnings & WRN_UNKNOWN_SLOW_TYPE) { + RTE_LOG(INFO, PMD, + "Slave %u: ignoring unknown slow protocol frame type", slave_id); + } + + if (warnings & WRN_UNKNOWN_MARKER_TYPE) + RTE_LOG(INFO, PMD, "Slave %u: ignoring unknown marker type", slave_id); + + if (warnings & WRN_NOT_LACP_CAPABLE) + MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id); +} + +static void +record_default(struct port *port) +{ + /* Record default parameters for partner. Partner admin parameters + * are not implemented so set them to arbitrary default (last known) and + * mark actor that parner is in defaulted state. */ + port->partner_state = STATE_LACP_ACTIVE; + ACTOR_STATE_SET(port, DEFAULTED); +} + +/** Function handles rx state machine. + * + * This function implements Receive State Machine from point 5.4.12 in + * 802.1AX documentation. It should be called periodically. + * + * @param lacpdu LACPDU received. + * @param port Port on which LACPDU was received. + */ +static void +rx_machine(struct bond_dev_private *internals, uint8_t slave_id, + struct lacpdu *lacp) +{ + struct port *agg, *port = &mode_8023ad_ports[slave_id]; + uint64_t timeout; + + if (SM_FLAG(port, BEGIN)) { + /* Initialize stuff */ + MODE4_DEBUG("-> INITIALIZE\n"); + SM_FLAG_CLR(port, MOVED); + port->selected = UNSELECTED; + + record_default(port); + + ACTOR_STATE_CLR(port, EXPIRED); + timer_cancel(&port->current_while_timer); + + /* DISABLED: On initialization partner is out of sync */ + PARTNER_STATE_CLR(port, SYNCHRONIZATION); + + /* LACP DISABLED stuff if LACP not enabled on this port */ + if (!SM_FLAG(port, LACP_ENABLED)) + PARTNER_STATE_CLR(port, AGGREGATION); + else + PARTNER_STATE_SET(port, AGGREGATION); + } + + if (!SM_FLAG(port, LACP_ENABLED)) { + /* Update parameters only if state changed */ + if (!timer_is_stopped(&port->current_while_timer)) { + port->selected = UNSELECTED; + record_default(port); + PARTNER_STATE_CLR(port, AGGREGATION); + ACTOR_STATE_CLR(port, EXPIRED); + timer_cancel(&port->current_while_timer); + } + return; + } + + if (lacp) { + MODE4_DEBUG("LACP -> CURRENT\n"); + BOND_PRINT_LACP(lacp); + /* Update selected flag. If partner parameters are defaulted assume they + * are match. If not defaulted compare LACP actor with ports parner + * params. */ + if (!ACTOR_STATE(port, DEFAULTED) && + (ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION) + || memcmp(&port->partner, &lacp->actor.port_params, + sizeof(port->partner)) != 0)) { + MODE4_DEBUG("selected <- UNSELECTED\n"); + port->selected = UNSELECTED; + } + + /* Record this PDU actor params as partner params */ + memcpy(&port->partner, &lacp->actor.port_params, + sizeof(struct port_params)); + port->partner_state = lacp->actor.state; + + /* Partner parameters are not defaulted any more */ + ACTOR_STATE_CLR(port, DEFAULTED); + + /* If LACP partner params match this port actor params */ + agg = &mode_8023ad_ports[port->aggregator_port_id]; + bool match = port->actor.system_priority == + lacp->partner.port_params.system_priority && + is_same_ether_addr(&agg->actor.system, + &lacp->partner.port_params.system) && + port->actor.port_priority == + lacp->partner.port_params.port_priority && + port->actor.port_number == + lacp->partner.port_params.port_number; + + /* Update NTT if partners information are outdated (xored and masked + * bits are set)*/ + uint8_t state_mask = STATE_LACP_ACTIVE | STATE_LACP_SHORT_TIMEOUT | + STATE_SYNCHRONIZATION | STATE_AGGREGATION; + + if (((port->actor_state ^ lacp->partner.state) & state_mask) || + match == false) { + SM_FLAG_SET(port, NTT); + } + + /* If LACP partner params match this port actor params */ + if (match == true && ACTOR_STATE(port, AGGREGATION) == + PARTNER_STATE(port, AGGREGATION)) + PARTNER_STATE_SET(port, SYNCHRONIZATION); + else if (!PARTNER_STATE(port, AGGREGATION) && ACTOR_STATE(port, + AGGREGATION)) + PARTNER_STATE_SET(port, SYNCHRONIZATION); + else + PARTNER_STATE_CLR(port, SYNCHRONIZATION); + + if (ACTOR_STATE(port, LACP_SHORT_TIMEOUT)) + timeout = internals->mode4.short_timeout; + else + timeout = internals->mode4.long_timeout; + + timer_set(&port->current_while_timer, timeout); + ACTOR_STATE_CLR(port, EXPIRED); + return; /* No state change */ + } + + /* If CURRENT state timer is not running (stopped or expired) + * transit to EXPIRED state from DISABLED or CURRENT */ + if (!timer_is_running(&port->current_while_timer)) { + ACTOR_STATE_SET(port, EXPIRED); + PARTNER_STATE_CLR(port, SYNCHRONIZATION); + PARTNER_STATE_SET(port, LACP_SHORT_TIMEOUT); + timer_set(&port->current_while_timer, internals->mode4.short_timeout); + } +} + +/** + * Function handles periodic tx state machine. + * + * Function implements Periodic Transmission state machine from point 5.4.13 + * in 802.1AX documentation. It should be called periodically. + * + * @param port Port to handle state machine. + */ +static void +periodic_machine(struct bond_dev_private *internals, uint8_t slave_id) +{ + struct port *port = &mode_8023ad_ports[slave_id]; + /* Calculate if either site is LACP enabled */ + uint64_t timeout; + uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) || + PARTNER_STATE(port, LACP_ACTIVE); + + uint8_t is_partner_fast, was_partner_fast; + /* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */ + if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) { + timer_cancel(&port->periodic_timer); + timer_force_expired(&port->tx_machine_timer); + SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT); + + MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n", + SM_FLAG(port, BEGIN) ? "begind " : "", + SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ", + active ? "LACP active " : "LACP pasive "); + return; + } + + is_partner_fast = PARTNER_STATE(port, LACP_SHORT_TIMEOUT); + was_partner_fast = SM_FLAG(port, PARTNER_SHORT_TIMEOUT); + + /* If periodic timer is not started, transit from NO PERIODIC to FAST/SLOW. + * Other case: check if timer expire or partners settings changed. */ + if (!timer_is_stopped(&port->periodic_timer)) { + if (timer_is_expired(&port->periodic_timer)) { + SM_FLAG_SET(port, NTT); + } else if (is_partner_fast != was_partner_fast) { + /* Partners timeout was slow and now it is fast -> send LACP. + * In other case (was fast and now it is slow) just switch + * timeout to slow without forcing send of LACP (because standard + * say so)*/ + if (!is_partner_fast) + SM_FLAG_SET(port, NTT); + } else + return; /* Nothing changed */ + } + + /* Handle state transition to FAST/SLOW LACP timeout */ + if (is_partner_fast) { + timeout = internals->mode4.fast_periodic_timeout; + SM_FLAG_SET(port, PARTNER_SHORT_TIMEOUT); + } else { + timeout = internals->mode4.slow_periodic_timeout; + SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT); + } + + timer_set(&port->periodic_timer, timeout); +} + +/** + * Function handles mux state machine. + * + * Function implements Mux Machine from point 5.4.15 in 802.1AX documentation. + * It should be called periodically. + * + * @param port Port to handle state machine. + */ +static void +mux_machine(struct bond_dev_private *internals, uint8_t slave_id) +{ + struct port *port = &mode_8023ad_ports[slave_id]; + + /* Save current state for later use */ + const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING | + STATE_COLLECTING; + + /* Enter DETACHED state on BEGIN condition or from any other state if + * port was unselected */ + if (SM_FLAG(port, BEGIN) || + port->selected == UNSELECTED || (port->selected == STANDBY && + (port->actor_state & state_mask) != 0)) { + /* detach mux from aggregator */ + port->actor_state &= ~state_mask; + /* Set ntt to true if BEGIN condition or transition from any other state + * which is indicated that wait_while_timer was started */ + if (SM_FLAG(port, BEGIN) || + !timer_is_stopped(&port->wait_while_timer)) { + SM_FLAG_SET(port, NTT); + MODE4_DEBUG("-> DETACHED\n"); + } + timer_cancel(&port->wait_while_timer); + } + + if (timer_is_stopped(&port->wait_while_timer)) { + if (port->selected == SELECTED || port->selected == STANDBY) { + timer_set(&port->wait_while_timer, + internals->mode4.aggregate_wait_timeout); + + MODE4_DEBUG("DETACHED -> WAITING\n"); + } + /* Waiting state entered */ + return; + } + + /* Transit next state if port is ready */ + if (!timer_is_expired(&port->wait_while_timer)) + return; + + if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) && + !PARTNER_STATE(port, SYNCHRONIZATION)) { + /* If in COLLECTING or DISTRIBUTING state and partner becomes out of + * sync transit to ATACHED state. */ + ACTOR_STATE_CLR(port, DISTRIBUTING); + ACTOR_STATE_CLR(port, COLLECTING); + /* Clear actor sync to activate transit ATACHED in condition bellow */ + ACTOR_STATE_CLR(port, SYNCHRONIZATION); + MODE4_DEBUG("Out of sync -> ATTACHED\n"); + } + + if (!ACTOR_STATE(port, SYNCHRONIZATION)) { + /* attach mux to aggregator */ + RTE_VERIFY((port->actor_state & (STATE_COLLECTING | + STATE_DISTRIBUTING)) == 0); + + ACTOR_STATE_SET(port, SYNCHRONIZATION); + SM_FLAG_SET(port, NTT); + MODE4_DEBUG("ATTACHED Entered\n"); + } else if (!ACTOR_STATE(port, COLLECTING)) { + /* Start collecting if in sync */ + if (PARTNER_STATE(port, SYNCHRONIZATION)) { + MODE4_DEBUG("ATTACHED -> COLLECTING\n"); + ACTOR_STATE_SET(port, COLLECTING); + SM_FLAG_SET(port, NTT); + } + } else if (ACTOR_STATE(port, COLLECTING)) { + /* Check if partner is in COLLECTING state. If so this port can + * distribute frames to it */ + if (!ACTOR_STATE(port, DISTRIBUTING)) { + if (PARTNER_STATE(port, COLLECTING)) { + /* Enable DISTRIBUTING if partner is collecting */ + ACTOR_STATE_SET(port, DISTRIBUTING); + SM_FLAG_SET(port, NTT); + MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n"); + RTE_LOG(INFO, PMD, + "Bond %u: slave id %u distributing started.\n", + internals->port_id, slave_id); + } + } else { + if (!PARTNER_STATE(port, COLLECTING)) { + /* Disable DISTRIBUTING (enter COLLECTING state) if partner + * is not collecting */ + ACTOR_STATE_CLR(port, DISTRIBUTING); + SM_FLAG_SET(port, NTT); + MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n"); + RTE_LOG(INFO, PMD, + "Bond %u: slave id %u distributing stopped.\n", + internals->port_id, slave_id); + } + } + } +} + +/** + * Function handles transmit state machine. + * + * Function implements Transmit Machine from point 5.4.16 in 802.1AX + * documentation. + * + * @param port + */ +static void +tx_machine(struct bond_dev_private *internals, uint8_t slave_id) +{ + struct port *agg, *port = &mode_8023ad_ports[slave_id]; + + struct rte_mbuf *lacp_pkt = NULL; + struct lacpdu_header *hdr; + struct lacpdu *lacpdu; + + /* If periodic timer is not running periodic machine is in NO PERIODIC and + * according to 802.3ax standard tx machine should not transmit any frames + * and set ntt to false. */ + if (timer_is_stopped(&port->periodic_timer)) + SM_FLAG_CLR(port, NTT); + + if (!SM_FLAG(port, NTT)) + return; + + if (!timer_is_expired(&port->tx_machine_timer)) + return; + + lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool); + if (lacp_pkt == NULL) { + RTE_LOG(ERR, PMD, "Failed to allocate LACP packet from pool\n"); + return; + } + + lacp_pkt->data_len = sizeof(*hdr); + lacp_pkt->pkt_len = sizeof(*hdr); + + hdr = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *); + + /* Source and destination MAC */ + ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr); + rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr); + hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW); + + lacpdu = &hdr->lacpdu; + memset(lacpdu, 0, sizeof(*lacpdu)); + + /* Initialize LACP part */ + lacpdu->subtype = SLOW_SUBTYPE_LACP; + lacpdu->version_number = 1; + + /* ACTOR */ + lacpdu->actor.tlv_type_info = TLV_TYPE_ACTOR_INFORMATION; + lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params); + memcpy(&hdr->lacpdu.actor.port_params, &port->actor, + sizeof(port->actor)); + agg = &mode_8023ad_ports[port->aggregator_port_id]; + ether_addr_copy(&agg->actor.system, &hdr->lacpdu.actor.port_params.system); + lacpdu->actor.state = port->actor_state; + + /* PARTNER */ + lacpdu->partner.tlv_type_info = TLV_TYPE_PARTNER_INFORMATION; + lacpdu->partner.info_length = sizeof(struct lacpdu_actor_partner_params); + memcpy(&lacpdu->partner.port_params, &port->partner, + sizeof(struct port_params)); + lacpdu->partner.state = port->partner_state; + + /* Other fields */ + lacpdu->tlv_type_collector_info = TLV_TYPE_COLLECTOR_INFORMATION; + lacpdu->collector_info_length = 0x10; + lacpdu->collector_max_delay = 0; + + lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION; + lacpdu->terminator_length = 0; + + if (rte_ring_enqueue(port->tx_ring, lacp_pkt) == -ENOBUFS) { + /* If TX ring full, drop packet and free message. Retransmission + * will happen in next function call. */ + rte_pktmbuf_free(lacp_pkt); + set_warning_flags(port, WRN_TX_QUEUE_FULL); + return; + } + + MODE4_DEBUG("sending LACP frame\n"); + BOND_PRINT_LACP(lacpdu); + + timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout); + SM_FLAG_CLR(port, NTT); +} + +/** + * Function assigns port to aggregator. + * + * @param bond_dev_private Pointer to bond_dev_private structure. + * @param port_pos Port to assign. + */ +static void +selection_logic(struct bond_dev_private *internals, uint8_t slave_id) +{ + struct port *agg, *port; + uint8_t slaves_count, new_agg_id, i; + uint8_t *slaves; + + slaves = internals->active_slaves; + slaves_count = internals->active_slave_count; + port = &mode_8023ad_ports[slave_id]; + + /* Search for aggregator suitable for this port */ + for (i = 0; i < slaves_count; ++i) { + agg = &mode_8023ad_ports[slaves[i]]; + /* Skip ports that are not aggreagators */ + if (agg->aggregator_port_id != slaves[i]) + continue; + + /* Actors system ID is not checked since all slave device have the same + * ID (MAC address). */ + if ((agg->actor.key == port->actor.key && + agg->partner.system_priority == port->partner.system_priority && + is_same_ether_addr(&agg->partner.system, &port->partner.system) == 1 + && (agg->partner.key == port->partner.key)) && + is_zero_ether_addr(&port->partner.system) != 1 && + (agg->actor.key & + rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) { + + break; + } + } + + /* By default, port uses it self as agregator */ + if (i == slaves_count) + new_agg_id = slave_id; + else + new_agg_id = slaves[i]; + + if (new_agg_id != port->aggregator_port_id) { + port->aggregator_port_id = new_agg_id; + + MODE4_DEBUG("-> SELECTED: ID=%3u\n" + "\t%s aggregator ID=%3u\n", + port->aggregator_port_id, + port->aggregator_port_id == slave_id ? + "aggregator not found, using default" : "aggregator found", + port->aggregator_port_id); + } + + port->selected = SELECTED; +} + +/* Function maps DPDK speed to bonding speed stored in key field */ +static uint16_t +link_speed_key(uint16_t speed) { + uint16_t key_speed; + + switch (speed) { + case ETH_SPEED_NUM_NONE: + key_speed = 0x00; + break; + case ETH_SPEED_NUM_10M: + key_speed = BOND_LINK_SPEED_KEY_10M; + break; + case ETH_SPEED_NUM_100M: + key_speed = BOND_LINK_SPEED_KEY_100M; + break; + case ETH_SPEED_NUM_1G: + key_speed = BOND_LINK_SPEED_KEY_1000M; + break; + case ETH_SPEED_NUM_10G: + key_speed = BOND_LINK_SPEED_KEY_10G; + break; + case ETH_SPEED_NUM_20G: + key_speed = BOND_LINK_SPEED_KEY_20G; + break; + case ETH_SPEED_NUM_40G: + key_speed = BOND_LINK_SPEED_KEY_40G; + break; + default: + /* Unknown speed*/ + key_speed = 0xFFFF; + } + + return key_speed; +} + +static void +bond_mode_8023ad_periodic_cb(void *arg) +{ + struct rte_eth_dev *bond_dev = arg; + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct port *port; + struct rte_eth_link link_info; + struct ether_addr slave_addr; + + void *pkt = NULL; + uint8_t i, slave_id; + + + /* Update link status on each port */ + for (i = 0; i < internals->active_slave_count; i++) { + uint16_t key; + + slave_id = internals->active_slaves[i]; + rte_eth_link_get(slave_id, &link_info); + rte_eth_macaddr_get(slave_id, &slave_addr); + + if (link_info.link_status != 0) { + key = link_speed_key(link_info.link_speed) << 1; + if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX) + key |= BOND_LINK_FULL_DUPLEX_KEY; + } else + key = 0; + + port = &mode_8023ad_ports[slave_id]; + + key = rte_cpu_to_be_16(key); + if (key != port->actor.key) { + if (!(key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY))) + set_warning_flags(port, WRN_NOT_LACP_CAPABLE); + + port->actor.key = key; + SM_FLAG_SET(port, NTT); + } + + if (!is_same_ether_addr(&port->actor.system, &slave_addr)) { + ether_addr_copy(&slave_addr, &port->actor.system); + if (port->aggregator_port_id == slave_id) + SM_FLAG_SET(port, NTT); + } + } + + for (i = 0; i < internals->active_slave_count; i++) { + slave_id = internals->active_slaves[i]; + port = &mode_8023ad_ports[slave_id]; + + if ((port->actor.key & + rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) { + + SM_FLAG_SET(port, BEGIN); + + /* LACP is disabled on half duples or link is down */ + if (SM_FLAG(port, LACP_ENABLED)) { + /* If port was enabled set it to BEGIN state */ + SM_FLAG_CLR(port, LACP_ENABLED); + ACTOR_STATE_CLR(port, DISTRIBUTING); + ACTOR_STATE_CLR(port, COLLECTING); + } + + /* Skip this port processing */ + continue; + } + + SM_FLAG_SET(port, LACP_ENABLED); + + /* Find LACP packet to this port. Do not check subtype, it is done in + * function that queued packet */ + if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) { + struct rte_mbuf *lacp_pkt = pkt; + struct lacpdu_header *lacp; + + lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *); + RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP); + + /* This is LACP frame so pass it to rx_machine */ + rx_machine(internals, slave_id, &lacp->lacpdu); + rte_pktmbuf_free(lacp_pkt); + } else + rx_machine(internals, slave_id, NULL); + + periodic_machine(internals, slave_id); + mux_machine(internals, slave_id); + tx_machine(internals, slave_id); + selection_logic(internals, slave_id); + + SM_FLAG_CLR(port, BEGIN); + show_warnings(slave_id); + } + + rte_eal_alarm_set(internals->mode4.update_timeout_us, + bond_mode_8023ad_periodic_cb, arg); +} + +void +bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id) +{ + struct bond_dev_private *internals = bond_dev->data->dev_private; + + struct port *port = &mode_8023ad_ports[slave_id]; + struct port_params initial = { + .system = { { 0 } }, + .system_priority = rte_cpu_to_be_16(0xFFFF), + .key = rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY), + .port_priority = rte_cpu_to_be_16(0x00FF), + .port_number = 0, + }; + + char mem_name[RTE_ETH_NAME_MAX_LEN]; + int socket_id; + unsigned element_size; + uint32_t total_tx_desc; + struct bond_tx_queue *bd_tx_q; + uint16_t q_id; + + /* Given slave mus not be in active list */ + RTE_VERIFY(find_slave_by_id(internals->active_slaves, + internals->active_slave_count, slave_id) == internals->active_slave_count); + + memcpy(&port->actor, &initial, sizeof(struct port_params)); + /* Standard requires that port ID must be grater than 0. + * Add 1 do get corresponding port_number */ + port->actor.port_number = rte_cpu_to_be_16((uint16_t)slave_id + 1); + + memcpy(&port->partner, &initial, sizeof(struct port_params)); + + /* default states */ + port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED; + port->partner_state = STATE_LACP_ACTIVE; + port->sm_flags = SM_FLAGS_BEGIN; + + /* use this port as agregator */ + port->aggregator_port_id = slave_id; + rte_eth_promiscuous_enable(slave_id); + + timer_cancel(&port->warning_timer); + + if (port->mbuf_pool != NULL) + return; + + RTE_VERIFY(port->rx_ring == NULL); + RTE_VERIFY(port->tx_ring == NULL); + socket_id = rte_eth_devices[slave_id].data->numa_node; + + element_size = sizeof(struct slow_protocol_frame) + sizeof(struct rte_mbuf) + + RTE_PKTMBUF_HEADROOM; + + /* The size of the mempool should be at least: + * the sum of the TX descriptors + BOND_MODE_8023AX_SLAVE_TX_PKTS */ + total_tx_desc = BOND_MODE_8023AX_SLAVE_TX_PKTS; + for (q_id = 0; q_id < bond_dev->data->nb_tx_queues; q_id++) { + bd_tx_q = (struct bond_tx_queue*)bond_dev->data->tx_queues[q_id]; + total_tx_desc += bd_tx_q->nb_tx_desc; + } + + snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id); + port->mbuf_pool = rte_mempool_create(mem_name, + total_tx_desc, element_size, + RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ? 32 : RTE_MEMPOOL_CACHE_MAX_SIZE, + sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, + NULL, rte_pktmbuf_init, NULL, socket_id, MEMPOOL_F_NO_SPREAD); + + /* Any memory allocation failure in initalization is critical because + * resources can't be free, so reinitialization is impossible. */ + if (port->mbuf_pool == NULL) { + rte_panic("Slave %u: Failed to create memory pool '%s': %s\n", + slave_id, mem_name, rte_strerror(rte_errno)); + } + + snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_rx", slave_id); + port->rx_ring = rte_ring_create(mem_name, + rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0); + + if (port->rx_ring == NULL) { + rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id, + mem_name, rte_strerror(rte_errno)); + } + + /* TX ring is at least one pkt longer to make room for marker packet. */ + snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_tx", slave_id); + port->tx_ring = rte_ring_create(mem_name, + rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0); + + if (port->tx_ring == NULL) { + rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id, + mem_name, rte_strerror(rte_errno)); + } +} + +int +bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev, + uint8_t slave_id) +{ + struct bond_dev_private *internals = bond_dev->data->dev_private; + void *pkt = NULL; + struct port *port; + uint8_t i; + + /* Given slave must be in active list */ + RTE_VERIFY(find_slave_by_id(internals->active_slaves, + internals->active_slave_count, slave_id) < internals->active_slave_count); + + /* Exclude slave from transmit policy. If this slave is an aggregator + * make all aggregated slaves unselected to force selection logic + * to select suitable aggregator for this port. */ + for (i = 0; i < internals->active_slave_count; i++) { + port = &mode_8023ad_ports[internals->active_slaves[i]]; + if (port->aggregator_port_id != slave_id) + continue; + + port->selected = UNSELECTED; + + /* Use default aggregator */ + port->aggregator_port_id = internals->active_slaves[i]; + } + + port = &mode_8023ad_ports[slave_id]; + port->selected = UNSELECTED; + port->actor_state &= ~(STATE_SYNCHRONIZATION | STATE_DISTRIBUTING | + STATE_COLLECTING); + + while (rte_ring_dequeue(port->rx_ring, &pkt) == 0) + rte_pktmbuf_free((struct rte_mbuf *)pkt); + + while (rte_ring_dequeue(port->tx_ring, &pkt) == 0) + rte_pktmbuf_free((struct rte_mbuf *)pkt); + return 0; +} + +void +bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev) +{ + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct ether_addr slave_addr; + struct port *slave, *agg_slave; + uint8_t slave_id, i, j; + + bond_mode_8023ad_stop(bond_dev); + + for (i = 0; i < internals->active_slave_count; i++) { + slave_id = internals->active_slaves[i]; + slave = &mode_8023ad_ports[slave_id]; + rte_eth_macaddr_get(slave_id, &slave_addr); + + if (is_same_ether_addr(&slave_addr, &slave->actor.system)) + continue; + + ether_addr_copy(&slave_addr, &slave->actor.system); + /* Do nothing if this port is not an aggregator. In other case + * Set NTT flag on every port that use this aggregator. */ + if (slave->aggregator_port_id != slave_id) + continue; + + for (j = 0; j < internals->active_slave_count; j++) { + agg_slave = &mode_8023ad_ports[internals->active_slaves[j]]; + if (agg_slave->aggregator_port_id == slave_id) + SM_FLAG_SET(agg_slave, NTT); + } + } + + if (bond_dev->data->dev_started) + bond_mode_8023ad_start(bond_dev); +} + +void +bond_mode_8023ad_conf_get(struct rte_eth_dev *dev, + struct rte_eth_bond_8023ad_conf *conf) +{ + struct bond_dev_private *internals = dev->data->dev_private; + struct mode8023ad_private *mode4 = &internals->mode4; + uint64_t ms_ticks = rte_get_tsc_hz() / 1000; + + conf->fast_periodic_ms = mode4->fast_periodic_timeout / ms_ticks; + conf->slow_periodic_ms = mode4->slow_periodic_timeout / ms_ticks; + conf->short_timeout_ms = mode4->short_timeout / ms_ticks; + conf->long_timeout_ms = mode4->long_timeout / ms_ticks; + conf->aggregate_wait_timeout_ms = mode4->aggregate_wait_timeout / ms_ticks; + conf->tx_period_ms = mode4->tx_period_timeout / ms_ticks; + conf->update_timeout_ms = mode4->update_timeout_us / 1000; + conf->rx_marker_period_ms = mode4->rx_marker_timeout / ms_ticks; +} + +void +bond_mode_8023ad_setup(struct rte_eth_dev *dev, + struct rte_eth_bond_8023ad_conf *conf) +{ + struct rte_eth_bond_8023ad_conf def_conf; + struct bond_dev_private *internals = dev->data->dev_private; + struct mode8023ad_private *mode4 = &internals->mode4; + uint64_t ms_ticks = rte_get_tsc_hz() / 1000; + + if (conf == NULL) { + conf = &def_conf; + conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS; + conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS; + conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS; + conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS; + conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS; + conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS; + conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS; + conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS; + } + + mode4->fast_periodic_timeout = conf->fast_periodic_ms * ms_ticks; + mode4->slow_periodic_timeout = conf->slow_periodic_ms * ms_ticks; + mode4->short_timeout = conf->short_timeout_ms * ms_ticks; + mode4->long_timeout = conf->long_timeout_ms * ms_ticks; + mode4->aggregate_wait_timeout = conf->aggregate_wait_timeout_ms * ms_ticks; + mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks; + mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks; + mode4->update_timeout_us = conf->update_timeout_ms * 1000; +} + +int +bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev) +{ + struct bond_dev_private *internals = bond_dev->data->dev_private; + uint8_t i; + + for (i = 0; i < internals->active_slave_count; i++) + bond_mode_8023ad_activate_slave(bond_dev, i); + + return 0; +} + +int +bond_mode_8023ad_start(struct rte_eth_dev *bond_dev) +{ + return rte_eal_alarm_set(BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000, + &bond_mode_8023ad_periodic_cb, bond_dev); +} + +void +bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev) +{ + rte_eal_alarm_cancel(&bond_mode_8023ad_periodic_cb, bond_dev); +} + +void +bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals, + uint8_t slave_id, struct rte_mbuf *pkt) +{ + struct mode8023ad_private *mode4 = &internals->mode4; + struct port *port = &mode_8023ad_ports[slave_id]; + struct marker_header *m_hdr; + uint64_t marker_timer, old_marker_timer; + int retval; + uint8_t wrn, subtype; + /* If packet is a marker, we send response now by reusing given packet + * and update only source MAC, destination MAC is multicast so don't + * update it. Other frames will be handled later by state machines */ + subtype = rte_pktmbuf_mtod(pkt, + struct slow_protocol_frame *)->slow_protocol.subtype; + + if (subtype == SLOW_SUBTYPE_MARKER) { + m_hdr = rte_pktmbuf_mtod(pkt, struct marker_header *); + + if (likely(m_hdr->marker.tlv_type_marker != MARKER_TLV_TYPE_INFO)) { + wrn = WRN_UNKNOWN_MARKER_TYPE; + goto free_out; + } + + /* Setup marker timer. Do it in loop in case concurrent access. */ + do { + old_marker_timer = port->rx_marker_timer; + if (!timer_is_expired(&old_marker_timer)) { + wrn = WRN_RX_MARKER_TO_FAST; + goto free_out; + } + + timer_set(&marker_timer, mode4->rx_marker_timeout); + retval = rte_atomic64_cmpset(&port->rx_marker_timer, + old_marker_timer, marker_timer); + } while (unlikely(retval == 0)); + + m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP; + rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr); + + if (unlikely(rte_ring_enqueue(port->tx_ring, pkt) == -ENOBUFS)) { + /* reset timer */ + port->rx_marker_timer = 0; + wrn = WRN_TX_QUEUE_FULL; + goto free_out; + } + } else if (likely(subtype == SLOW_SUBTYPE_LACP)) { + if (unlikely(rte_ring_enqueue(port->rx_ring, pkt) == -ENOBUFS)) { + /* If RX fing full free lacpdu message and drop packet */ + wrn = WRN_RX_QUEUE_FULL; + goto free_out; + } + } else { + wrn = WRN_UNKNOWN_SLOW_TYPE; + goto free_out; + } + + return; + +free_out: + set_warning_flags(port, wrn); + rte_pktmbuf_free(pkt); +} + +int +rte_eth_bond_8023ad_conf_get(uint8_t port_id, + struct rte_eth_bond_8023ad_conf *conf) +{ + struct rte_eth_dev *bond_dev; + + if (valid_bonded_port_id(port_id) != 0) + return -EINVAL; + + if (conf == NULL) + return -EINVAL; + + bond_dev = &rte_eth_devices[port_id]; + bond_mode_8023ad_conf_get(bond_dev, conf); + return 0; +} + +int +rte_eth_bond_8023ad_setup(uint8_t port_id, + struct rte_eth_bond_8023ad_conf *conf) +{ + struct rte_eth_dev *bond_dev; + + if (valid_bonded_port_id(port_id) != 0) + return -EINVAL; + + if (conf != NULL) { + /* Basic sanity check */ + if (conf->slow_periodic_ms == 0 || + conf->fast_periodic_ms >= conf->slow_periodic_ms || + conf->long_timeout_ms == 0 || + conf->short_timeout_ms >= conf->long_timeout_ms || + conf->aggregate_wait_timeout_ms == 0 || + conf->tx_period_ms == 0 || + conf->rx_marker_period_ms == 0 || + conf->update_timeout_ms == 0) { + RTE_LOG(ERR, PMD, "given mode 4 configuration is invalid\n"); + return -EINVAL; + } + } + + bond_dev = &rte_eth_devices[port_id]; + bond_mode_8023ad_setup(bond_dev, conf); + + return 0; +} + +int +rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id, + struct rte_eth_bond_8023ad_slave_info *info) +{ + struct rte_eth_dev *bond_dev; + struct bond_dev_private *internals; + struct port *port; + + if (info == NULL || valid_bonded_port_id(port_id) != 0 || + rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD) + return -EINVAL; + + bond_dev = &rte_eth_devices[port_id]; + + internals = bond_dev->data->dev_private; + if (find_slave_by_id(internals->active_slaves, + internals->active_slave_count, slave_id) == + internals->active_slave_count) + return -EINVAL; + + port = &mode_8023ad_ports[slave_id]; + info->selected = port->selected; + + info->actor_state = port->actor_state; + rte_memcpy(&info->actor, &port->actor, sizeof(port->actor)); + + info->partner_state = port->partner_state; + rte_memcpy(&info->partner, &port->partner, sizeof(port->partner)); + + info->agg_port_id = port->aggregator_port_id; + return 0; +} diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h b/drivers/net/bonding/rte_eth_bond_8023ad.h new file mode 100644 index 00000000..ebd0e934 --- /dev/null +++ b/drivers/net/bonding/rte_eth_bond_8023ad.h @@ -0,0 +1,222 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RTE_ETH_BOND_8023AD_H_ +#define RTE_ETH_BOND_8023AD_H_ + +#include <rte_ether.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Actor/partner states + */ +#define STATE_LACP_ACTIVE 0x01 +#define STATE_LACP_SHORT_TIMEOUT 0x02 +#define STATE_AGGREGATION 0x04 +#define STATE_SYNCHRONIZATION 0x08 +#define STATE_COLLECTING 0x10 +#define STATE_DISTRIBUTING 0x20 +/** Partners parameters are defaulted */ +#define STATE_DEFAULTED 0x40 +#define STATE_EXPIRED 0x80 + +#define TLV_TYPE_ACTOR_INFORMATION 0x01 +#define TLV_TYPE_PARTNER_INFORMATION 0x02 +#define TLV_TYPE_COLLECTOR_INFORMATION 0x03 +#define TLV_TYPE_TERMINATOR_INFORMATION 0x00 + +#define SLOW_SUBTYPE_LACP 0x01 +#define SLOW_SUBTYPE_MARKER 0x02 + +#define MARKER_TLV_TYPE_INFO 0x01 +#define MARKER_TLV_TYPE_RESP 0x02 + +enum rte_bond_8023ad_selection { + UNSELECTED, + STANDBY, + SELECTED +}; + +/** Generic slow protocol structure */ +struct slow_protocol { + uint8_t subtype; + uint8_t reserved_119[119]; +} __attribute__((__packed__)); + +/** Generic slow protocol frame type structure */ +struct slow_protocol_frame { + struct ether_hdr eth_hdr; + struct slow_protocol slow_protocol; +} __attribute__((__packed__)); + +struct port_params { + uint16_t system_priority; + /**< System priority (unused in current implementation) */ + struct ether_addr system; + /**< System ID - Slave MAC address, same as bonding MAC address */ + uint16_t key; + /**< Speed information (implementation dependednt) and duplex. */ + uint16_t port_priority; + /**< Priority of this (unused in current implementation) */ + uint16_t port_number; + /**< Port number. It corresponds to slave port id. */ +} __attribute__((__packed__)); + +struct lacpdu_actor_partner_params { + uint8_t tlv_type_info; + uint8_t info_length; + struct port_params port_params; + uint8_t state; + uint8_t reserved_3[3]; +} __attribute__((__packed__)); + +/** LACPDU structure (5.4.2 in 802.1AX documentation). */ +struct lacpdu { + uint8_t subtype; + uint8_t version_number; + + struct lacpdu_actor_partner_params actor; + struct lacpdu_actor_partner_params partner; + + uint8_t tlv_type_collector_info; + uint8_t collector_info_length; + uint16_t collector_max_delay; + uint8_t reserved_12[12]; + + uint8_t tlv_type_terminator; + uint8_t terminator_length; + uint8_t reserved_50[50]; +} __attribute__((__packed__)); + +/** LACPDU frame: Contains ethernet header and LACPDU. */ +struct lacpdu_header { + struct ether_hdr eth_hdr; + struct lacpdu lacpdu; +} __attribute__((__packed__)); + +struct marker { + uint8_t subtype; + uint8_t version_number; + + uint8_t tlv_type_marker; + uint8_t info_length; + uint16_t requester_port; + struct ether_addr requester_system; + uint32_t requester_transaction_id; + uint8_t reserved_2[2]; + + uint8_t tlv_type_terminator; + uint8_t terminator_length; + uint8_t reserved_90[90]; +} __attribute__((__packed__)); + +struct marker_header { + struct ether_hdr eth_hdr; + struct marker marker; +} __attribute__((__packed__)); + +struct rte_eth_bond_8023ad_conf { + uint32_t fast_periodic_ms; + uint32_t slow_periodic_ms; + uint32_t short_timeout_ms; + uint32_t long_timeout_ms; + uint32_t aggregate_wait_timeout_ms; + uint32_t tx_period_ms; + uint32_t rx_marker_period_ms; + uint32_t update_timeout_ms; +}; + +struct rte_eth_bond_8023ad_slave_info { + enum rte_bond_8023ad_selection selected; + uint8_t actor_state; + struct port_params actor; + uint8_t partner_state; + struct port_params partner; + uint8_t agg_port_id; +}; + +/** + * @internal + * + * Function returns current configuration of 802.3AX mode. + * + * @param port_id Bonding device id + * @param conf Pointer to timeout structure. + * + * @return + * 0 - if ok + * -EINVAL if conf is NULL + */ +int +rte_eth_bond_8023ad_conf_get(uint8_t port_id, + struct rte_eth_bond_8023ad_conf *conf); + +/** + * @internal + * + * Function set new configuration of 802.3AX mode. + * + * @param port_id Bonding device id + * @param conf Configuration, if NULL set default configuration. + * @return + * 0 - if ok + * -EINVAL if configuration is invalid. + */ +int +rte_eth_bond_8023ad_setup(uint8_t port_id, + struct rte_eth_bond_8023ad_conf *conf); + +/** + * @internal + * + * Function returns current state of given slave device. + * + * @param slave_id Port id of valid slave. + * @param conf buffer for configuration + * @return + * 0 - if ok + * -EINVAL if conf is NULL or slave id is invalid (not a slave of given + * bonded device or is not inactive). + */ +int +rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id, + struct rte_eth_bond_8023ad_slave_info *conf); + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_ETH_BOND_8023AD_H_ */ diff --git a/drivers/net/bonding/rte_eth_bond_8023ad_private.h b/drivers/net/bonding/rte_eth_bond_8023ad_private.h new file mode 100644 index 00000000..8adee70b --- /dev/null +++ b/drivers/net/bonding/rte_eth_bond_8023ad_private.h @@ -0,0 +1,308 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RTE_ETH_BOND_8023AD_PRIVATE_H_ +#define RTE_ETH_BOND_8023AD_PRIVATE_H_ + +#include <stdint.h> + +#include <rte_ether.h> +#include <rte_byteorder.h> +#include <rte_atomic.h> + +#include "rte_eth_bond_8023ad.h" + +#define BOND_MODE_8023AX_UPDATE_TIMEOUT_MS 100 +/** Maximum number of packets to one slave queued in TX ring. */ +#define BOND_MODE_8023AX_SLAVE_RX_PKTS 3 +/** Maximum number of LACP packets from one slave queued in TX ring. */ +#define BOND_MODE_8023AX_SLAVE_TX_PKTS 1 +/** + * Timeouts deffinitions (5.4.4 in 802.1AX documentation). + */ +#define BOND_8023AD_FAST_PERIODIC_MS 900 +#define BOND_8023AD_SLOW_PERIODIC_MS 29000 +#define BOND_8023AD_SHORT_TIMEOUT_MS 3000 +#define BOND_8023AD_LONG_TIMEOUT_MS 90000 +#define BOND_8023AD_CHURN_DETECTION_TIMEOUT_MS 60000 +#define BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS 2000 +#define BOND_8023AD_TX_MACHINE_PERIOD_MS 500 +#define BOND_8023AD_RX_MARKER_PERIOD_MS 2000 + +/** + * Interval of showing warning message from state machines. All messages will + * be held (and gathered together) to prevent flooding. + * This is no parto of 802.1AX standard. + */ +#define BOND_8023AD_WARNINGS_PERIOD_MS 1000 + + + +/** + * State machine flags + */ +#define SM_FLAGS_BEGIN 0x0001 +#define SM_FLAGS_LACP_ENABLED 0x0002 +#define SM_FLAGS_ACTOR_CHURN 0x0004 +#define SM_FLAGS_PARTNER_CHURN 0x0008 +#define SM_FLAGS_MOVED 0x0100 +#define SM_FLAGS_PARTNER_SHORT_TIMEOUT 0x0200 +#define SM_FLAGS_NTT 0x0400 + +#define BOND_LINK_FULL_DUPLEX_KEY 0x01 +#define BOND_LINK_SPEED_KEY_10M 0x02 +#define BOND_LINK_SPEED_KEY_100M 0x04 +#define BOND_LINK_SPEED_KEY_1000M 0x08 +#define BOND_LINK_SPEED_KEY_10G 0x10 +#define BOND_LINK_SPEED_KEY_20G 0x11 +#define BOND_LINK_SPEED_KEY_40G 0x12 + +#define WRN_RX_MARKER_TO_FAST 0x01 +#define WRN_UNKNOWN_SLOW_TYPE 0x02 +#define WRN_UNKNOWN_MARKER_TYPE 0x04 +#define WRN_NOT_LACP_CAPABLE 0x08 +#define WRN_RX_QUEUE_FULL 0x10 +#define WRN_TX_QUEUE_FULL 0x20 + +#define CHECK_FLAGS(_variable, _f) ((_variable) & (_f)) +#define SET_FLAGS(_variable, _f) ((_variable) |= (_f)) +#define CLEAR_FLAGS(_variable, _f) ((_variable) &= ~(_f)) + +#define SM_FLAG(_p, _f) (!!CHECK_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f)) +#define SM_FLAG_SET(_p, _f) SET_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f) +#define SM_FLAG_CLR(_p, _f) CLEAR_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f) + +#define ACTOR_STATE(_p, _f) (!!CHECK_FLAGS((_p)->actor_state, STATE_ ## _f)) +#define ACTOR_STATE_SET(_p, _f) SET_FLAGS((_p)->actor_state, STATE_ ## _f) +#define ACTOR_STATE_CLR(_p, _f) CLEAR_FLAGS((_p)->actor_state, STATE_ ## _f) + +#define PARTNER_STATE(_p, _f) (!!CHECK_FLAGS((_p)->partner_state, STATE_ ## _f)) +#define PARTNER_STATE_SET(_p, _f) SET_FLAGS((_p)->partner_state, STATE_ ## _f) +#define PARTNER_STATE_CLR(_p, _f) CLEAR_FLAGS((_p)->partner_state, STATE_ ## _f) + +/** Variables associated with each port (5.4.7 in 802.1AX documentation). */ +struct port { + /** + * The operational values of the Actor's state parameters. Bitmask + * of port states. + */ + uint8_t actor_state; + + /** The operational Actor's port parameters */ + struct port_params actor; + + /** + * The operational value of the Actor's view of the current values of + * the Partner's state parameters. The Actor sets this variable either + * to the value received from the Partner in an LACPDU, or to the value + * of Partner_Admin_Port_State. Bitmask of port states. + */ + uint8_t partner_state; + + /** The operational Partner's port parameters */ + struct port_params partner; + + /* Additional port parameters not listed in documentation */ + /** State machine flags */ + uint16_t sm_flags; + enum rte_bond_8023ad_selection selected; + + uint64_t current_while_timer; + uint64_t periodic_timer; + uint64_t wait_while_timer; + uint64_t tx_machine_timer; + uint64_t tx_marker_timer; + /* Agregator parameters */ + /** Used aggregator port ID */ + uint16_t aggregator_port_id; + + /** Memory pool used to allocate rings */ + struct rte_mempool *mbuf_pool; + + /** Ring of LACP packets from RX burst function */ + struct rte_ring *rx_ring; + + /** Ring of slow protocol packets (LACP and MARKERS) to TX burst function */ + struct rte_ring *tx_ring; + + /** Timer which is also used as mutex. If is 0 (not running) RX marker + * packet might be responded. Otherwise shall be dropped. It is zeroed in + * mode 4 callback function after expire. */ + volatile uint64_t rx_marker_timer; + + uint64_t warning_timer; + volatile uint16_t warnings_to_show; +}; + +struct mode8023ad_private { + uint64_t fast_periodic_timeout; + uint64_t slow_periodic_timeout; + uint64_t short_timeout; + uint64_t long_timeout; + uint64_t aggregate_wait_timeout; + uint64_t tx_period_timeout; + uint64_t rx_marker_timeout; + uint64_t update_timeout_us; +}; + +/** + * @internal + * The pool of *port* structures. The size of the pool + * is configured at compile-time in the <rte_eth_bond_8023ad.c> file. + */ +extern struct port mode_8023ad_ports[]; + +/* Forward declaration */ +struct bond_dev_private; + +/** + * @internal + * + * Get configuration of bonded interface. + * + * + * @param dev Bonded interface + * @param conf returned configuration + */ +void +bond_mode_8023ad_conf_get(struct rte_eth_dev *dev, + struct rte_eth_bond_8023ad_conf *conf); + +/** + * @internal + * + * Set mode 4 configuration of bonded interface. + * + * @pre Bonded interface must be stopped. + * + * @param dev Bonded interface + * @param conf new configuration. If NULL set default configuration. + */ +void +bond_mode_8023ad_setup(struct rte_eth_dev *dev, + struct rte_eth_bond_8023ad_conf *conf); + +/** + * @internal + * + * Enables 802.1AX mode and all active slaves on bonded interface. + * + * @param dev Bonded interface + * @return + * 0 on success, negative value otherwise. + */ +int +bond_mode_8023ad_enable(struct rte_eth_dev *dev); + +/** + * @internal + * + * Disables 802.1AX mode of the bonded interface and slaves. + * + * @param dev Bonded interface + * @return + * 0 on success, negative value otherwise. + */ +int bond_mode_8023ad_disable(struct rte_eth_dev *dev); + +/** + * @internal + * + * Starts 802.3AX state machines management logic. + * @param dev Bonded interface + * @return + * 0 if machines was started, 1 if machines was already running, + * negative value otherwise. + */ +int +bond_mode_8023ad_start(struct rte_eth_dev *dev); + +/** + * @internal + * + * Stops 802.3AX state machines management logic. + * @param dev Bonded interface + * @return + * 0 if this call stopped state machines, -ENOENT if alarm was not set. + */ +void +bond_mode_8023ad_stop(struct rte_eth_dev *dev); + +/** + * @internal + * + * Passes given slow packet to state machines management logic. + * @param internals Bonded device private data. + * @param slave_id Slave port id. + * @param slot_pkt Slow packet. + */ +void +bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals, + uint8_t slave_id, struct rte_mbuf *pkt); + +/** + * @internal + * + * Appends given slave used slave + * + * @param dev Bonded interface. + * @param port_id Slave port ID to be added + * + * @return + * 0 on success, negative value otherwise. + */ +void +bond_mode_8023ad_activate_slave(struct rte_eth_dev *dev, uint8_t port_id); + +/** + * @internal + * + * Denitializes and removes given slave from 802.1AX mode. + * + * @param dev Bonded interface. + * @param slave_num Position of slave in active_slaves array + * + * @return + * 0 on success, negative value otherwise. + */ +int +bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *dev, uint8_t slave_pos); + +/** + * Updates state when MAC was changed on bonded device or one of its slaves. + * @param bond_dev Bonded device + */ +void +bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev); + +#endif /* RTE_ETH_BOND_8023AD_H_ */ diff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c new file mode 100644 index 00000000..3157543e --- /dev/null +++ b/drivers/net/bonding/rte_eth_bond_alb.c @@ -0,0 +1,287 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "rte_eth_bond_private.h" +#include "rte_eth_bond_alb.h" + +static inline uint8_t +simple_hash(uint8_t *hash_start, int hash_size) +{ + int i; + uint8_t hash; + + hash = 0; + for (i = 0; i < hash_size; ++i) + hash ^= hash_start[i]; + + return hash; +} + +static uint8_t +calculate_slave(struct bond_dev_private *internals) +{ + uint8_t idx; + + idx = (internals->mode6.last_slave + 1) % internals->active_slave_count; + internals->mode6.last_slave = idx; + return internals->active_slaves[idx]; +} + +int +bond_mode_alb_enable(struct rte_eth_dev *bond_dev) +{ + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct client_data *hash_table = internals->mode6.client_table; + + uint16_t data_size; + char mem_name[RTE_ETH_NAME_MAX_LEN]; + int socket_id = bond_dev->data->numa_node; + + /* Fill hash table with initial values */ + memset(hash_table, 0, sizeof(struct client_data) * ALB_HASH_TABLE_SIZE); + rte_spinlock_init(&internals->mode6.lock); + internals->mode6.last_slave = ALB_NULL_INDEX; + internals->mode6.ntt = 0; + + /* Initialize memory pool for ARP packets to send */ + if (internals->mode6.mempool == NULL) { + /* + * 256 is size of ETH header, ARP header and nested VLAN headers. + * The value is chosen to be cache aligned. + */ + data_size = 256 + RTE_PKTMBUF_HEADROOM; + snprintf(mem_name, sizeof(mem_name), "%s_MODE6", bond_dev->data->name); + internals->mode6.mempool = rte_pktmbuf_pool_create(mem_name, + 512 * RTE_MAX_ETHPORTS, + RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ? + 32 : RTE_MEMPOOL_CACHE_MAX_SIZE, + 0, data_size, socket_id); + + if (internals->mode6.mempool == NULL) { + RTE_LOG(ERR, PMD, "%s: Failed to initialize ALB mempool.\n", + bond_dev->data->name); + rte_panic( + "Failed to allocate memory pool ('%s')\n" + "for bond device '%s'\n", + mem_name, bond_dev->data->name); + } + } + + return 0; +} + +void bond_mode_alb_arp_recv(struct ether_hdr *eth_h, uint16_t offset, + struct bond_dev_private *internals) { + struct arp_hdr *arp; + + struct client_data *hash_table = internals->mode6.client_table; + struct client_data *client_info; + + uint8_t hash_index; + + arp = (struct arp_hdr *) ((char *) (eth_h + 1) + offset); + + /* ARP Requests are forwarded to the application with no changes */ + if (arp->arp_op != rte_cpu_to_be_16(ARP_OP_REPLY)) + return; + + /* From now on, we analyze only ARP Reply packets */ + hash_index = simple_hash((uint8_t *) &arp->arp_data.arp_sip, + sizeof(arp->arp_data.arp_sip)); + client_info = &hash_table[hash_index]; + + /* + * We got reply for ARP Request send by the application. We need to + * update client table when received data differ from what is stored + * in ALB table and issue sending update packet to that slave. + */ + rte_spinlock_lock(&internals->mode6.lock); + if (client_info->in_use == 0 || + client_info->app_ip != arp->arp_data.arp_tip || + client_info->cli_ip != arp->arp_data.arp_sip || + !is_same_ether_addr(&client_info->cli_mac, &arp->arp_data.arp_sha) || + client_info->vlan_count != offset / sizeof(struct vlan_hdr) || + memcmp(client_info->vlan, eth_h + 1, offset) != 0 + ) { + client_info->in_use = 1; + client_info->app_ip = arp->arp_data.arp_tip; + client_info->cli_ip = arp->arp_data.arp_sip; + ether_addr_copy(&arp->arp_data.arp_sha, &client_info->cli_mac); + client_info->slave_idx = calculate_slave(internals); + rte_eth_macaddr_get(client_info->slave_idx, &client_info->app_mac); + ether_addr_copy(&client_info->app_mac, &arp->arp_data.arp_tha); + memcpy(client_info->vlan, eth_h + 1, offset); + client_info->vlan_count = offset / sizeof(struct vlan_hdr); + } + internals->mode6.ntt = 1; + rte_spinlock_unlock(&internals->mode6.lock); +} + +uint8_t +bond_mode_alb_arp_xmit(struct ether_hdr *eth_h, uint16_t offset, + struct bond_dev_private *internals) +{ + struct arp_hdr *arp; + + struct client_data *hash_table = internals->mode6.client_table; + struct client_data *client_info; + + uint8_t hash_index; + + struct ether_addr bonding_mac; + + arp = (struct arp_hdr *)((char *)(eth_h + 1) + offset); + + /* + * Traffic with src MAC other than bonding should be sent on + * current primary port. + */ + rte_eth_macaddr_get(internals->port_id, &bonding_mac); + if (!is_same_ether_addr(&bonding_mac, &arp->arp_data.arp_sha)) { + rte_eth_macaddr_get(internals->current_primary_port, + &arp->arp_data.arp_sha); + return internals->current_primary_port; + } + + hash_index = simple_hash((uint8_t *)&arp->arp_data.arp_tip, + sizeof(uint32_t)); + client_info = &hash_table[hash_index]; + + rte_spinlock_lock(&internals->mode6.lock); + if (arp->arp_op == rte_cpu_to_be_16(ARP_OP_REPLY)) { + if (client_info->in_use) { + if (client_info->app_ip == arp->arp_data.arp_sip && + client_info->cli_ip == arp->arp_data.arp_tip) { + /* Entry is already assigned to this client */ + if (!is_broadcast_ether_addr(&arp->arp_data.arp_tha)) { + ether_addr_copy(&arp->arp_data.arp_tha, + &client_info->cli_mac); + } + rte_eth_macaddr_get(client_info->slave_idx, + &client_info->app_mac); + ether_addr_copy(&client_info->app_mac, &arp->arp_data.arp_sha); + memcpy(client_info->vlan, eth_h + 1, offset); + client_info->vlan_count = offset / sizeof(struct vlan_hdr); + rte_spinlock_unlock(&internals->mode6.lock); + return client_info->slave_idx; + } + } + + /* Assign new slave to this client and update src mac in ARP */ + client_info->in_use = 1; + client_info->ntt = 0; + client_info->app_ip = arp->arp_data.arp_sip; + ether_addr_copy(&arp->arp_data.arp_tha, &client_info->cli_mac); + client_info->cli_ip = arp->arp_data.arp_tip; + client_info->slave_idx = calculate_slave(internals); + rte_eth_macaddr_get(client_info->slave_idx, &client_info->app_mac); + ether_addr_copy(&client_info->app_mac, &arp->arp_data.arp_sha); + memcpy(client_info->vlan, eth_h + 1, offset); + client_info->vlan_count = offset / sizeof(struct vlan_hdr); + rte_spinlock_unlock(&internals->mode6.lock); + return client_info->slave_idx; + } + + /* If packet is not ARP Reply, send it on current primary port. */ + rte_spinlock_unlock(&internals->mode6.lock); + rte_eth_macaddr_get(internals->current_primary_port, + &arp->arp_data.arp_sha); + return internals->current_primary_port; +} + +uint8_t +bond_mode_alb_arp_upd(struct client_data *client_info, + struct rte_mbuf *pkt, struct bond_dev_private *internals) +{ + struct ether_hdr *eth_h; + struct arp_hdr *arp_h; + uint8_t slave_idx; + + rte_spinlock_lock(&internals->mode6.lock); + eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *); + + ether_addr_copy(&client_info->app_mac, ð_h->s_addr); + ether_addr_copy(&client_info->cli_mac, ð_h->d_addr); + if (client_info->vlan_count > 0) + eth_h->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN); + else + eth_h->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP); + + arp_h = (struct arp_hdr *)((char *)eth_h + sizeof(struct ether_hdr) + + client_info->vlan_count * sizeof(struct vlan_hdr)); + + memcpy(eth_h + 1, client_info->vlan, + client_info->vlan_count * sizeof(struct vlan_hdr)); + + ether_addr_copy(&client_info->app_mac, &arp_h->arp_data.arp_sha); + arp_h->arp_data.arp_sip = client_info->app_ip; + ether_addr_copy(&client_info->cli_mac, &arp_h->arp_data.arp_tha); + arp_h->arp_data.arp_tip = client_info->cli_ip; + + arp_h->arp_hrd = rte_cpu_to_be_16(ARP_HRD_ETHER); + arp_h->arp_pro = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + arp_h->arp_hln = ETHER_ADDR_LEN; + arp_h->arp_pln = sizeof(uint32_t); + arp_h->arp_op = rte_cpu_to_be_16(ARP_OP_REPLY); + + slave_idx = client_info->slave_idx; + rte_spinlock_unlock(&internals->mode6.lock); + + return slave_idx; +} + +void +bond_mode_alb_client_list_upd(struct rte_eth_dev *bond_dev) +{ + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct client_data *client_info; + + int i; + + /* If active slave count is 0, it's pointless to refresh alb table */ + if (internals->active_slave_count <= 0) + return; + + rte_spinlock_lock(&internals->mode6.lock); + internals->mode6.last_slave = ALB_NULL_INDEX; + + for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) { + client_info = &internals->mode6.client_table[i]; + if (client_info->in_use) { + client_info->slave_idx = calculate_slave(internals); + rte_eth_macaddr_get(client_info->slave_idx, &client_info->app_mac); + internals->mode6.ntt = 1; + } + } + rte_spinlock_unlock(&internals->mode6.lock); +} diff --git a/drivers/net/bonding/rte_eth_bond_alb.h b/drivers/net/bonding/rte_eth_bond_alb.h new file mode 100644 index 00000000..fd7c3aeb --- /dev/null +++ b/drivers/net/bonding/rte_eth_bond_alb.h @@ -0,0 +1,142 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RTE_ETH_BOND_ALB_H_ +#define RTE_ETH_BOND_ALB_H_ + +#include <rte_ether.h> +#include <rte_arp.h> + +#define ALB_HASH_TABLE_SIZE 256 +#define ALB_NULL_INDEX 0xFFFFFFFF + +struct client_data { + /** ARP data of single client */ + struct ether_addr app_mac; + /**< MAC address of application running DPDK */ + uint32_t app_ip; + /**< IP address of application running DPDK */ + struct ether_addr cli_mac; + /**< Client MAC address */ + uint32_t cli_ip; + /**< Client IP address */ + + uint8_t slave_idx; + /**< Index of slave on which we connect with that client */ + uint8_t in_use; + /**< Flag indicating if entry in client table is currently used */ + uint8_t ntt; + /**< Flag indicating if we need to send update to this client on next tx */ + + struct vlan_hdr vlan[2]; + /**< Content of vlan headers */ + uint8_t vlan_count; + /**< Number of nested vlan headers */ +}; + +struct mode_alb_private { + struct client_data client_table[ALB_HASH_TABLE_SIZE]; + /**< Hash table storing ARP data of every client connected */ + struct rte_mempool *mempool; + /**< Mempool for creating ARP update packets */ + uint8_t ntt; + /**< Flag indicating if we need to send update to any client on next tx */ + uint32_t last_slave; + /**< Index of last used slave in client table */ + rte_spinlock_t lock; +}; + +/** + * ALB mode initialization. + * + * @param bond_dev Pointer to bonding device. + * + * @return + * Error code - 0 on success. + */ +int +bond_mode_alb_enable(struct rte_eth_dev *bond_dev); + +/** + * Function handles ARP packet reception. If received ARP request, it is + * forwarded to application without changes. If it is ARP reply, client table + * is updated. + * + * @param eth_h ETH header of received packet. + * @param offset Vlan header offset. + * @param internals Bonding data. + */ +void +bond_mode_alb_arp_recv(struct ether_hdr *eth_h, uint16_t offset, + struct bond_dev_private *internals); + +/** + * Function handles ARP packet transmission. It also decides on which slave + * send that packet. If packet is ARP Request, it is send on primary slave. + * If it is ARP Reply, it is send on slave stored in client table for that + * connection. On Reply function also updates data in client table. + * + * @param eth_h ETH header of transmitted packet. + * @param offset Vlan header offset. + * @param internals Bonding data. + * + * @return + * Index of slave on which packet should be sent. + */ +uint8_t +bond_mode_alb_arp_xmit(struct ether_hdr *eth_h, uint16_t offset, + struct bond_dev_private *internals); + +/** + * Function fills packet with ARP data from client_info. + * + * @param client_info Data of client to which packet is sent. + * @param pkt Pointer to packet which is sent. + * @param internals Bonding data. + * + * @return + * Index of slawe on which packet should be sent. + */ +uint8_t +bond_mode_alb_arp_upd(struct client_data *client_info, + struct rte_mbuf *pkt, struct bond_dev_private *internals); + +/** + * Function updates slave indexes of active connections. + * + * @param bond_dev Pointer to bonded device struct. + */ +void +bond_mode_alb_client_list_upd(struct rte_eth_dev *bond_dev); + +#endif /* RTE_ETH_BOND_ALB_H_ */ diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c new file mode 100644 index 00000000..e9247b5f --- /dev/null +++ b/drivers/net/bonding/rte_eth_bond_api.c @@ -0,0 +1,848 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <string.h> + +#include <rte_mbuf.h> +#include <rte_malloc.h> +#include <rte_ethdev.h> +#include <rte_tcp.h> + +#include "rte_eth_bond.h" +#include "rte_eth_bond_private.h" +#include "rte_eth_bond_8023ad_private.h" + +#define DEFAULT_POLLING_INTERVAL_10_MS (10) + +const char pmd_bond_driver_name[] = "rte_bond_pmd"; + +int +check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev) +{ + /* Check valid pointer */ + if (eth_dev->data->drv_name == NULL) + return -1; + + /* return 0 if driver name matches */ + return eth_dev->data->drv_name != pmd_bond_driver_name; +} + +int +valid_bonded_port_id(uint8_t port_id) +{ + if (!rte_eth_dev_is_valid_port(port_id)) + return -1; + + return check_for_bonded_ethdev(&rte_eth_devices[port_id]); +} + +int +valid_slave_port_id(uint8_t port_id) +{ + /* Verify that port id's are valid */ + if (!rte_eth_dev_is_valid_port(port_id)) + return -1; + + /* Verify that port_id refers to a non bonded port */ + if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0) + return -1; + + return 0; +} + +void +activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id) +{ + struct bond_dev_private *internals = eth_dev->data->dev_private; + uint8_t active_count = internals->active_slave_count; + + if (internals->mode == BONDING_MODE_8023AD) + bond_mode_8023ad_activate_slave(eth_dev, port_id); + + if (internals->mode == BONDING_MODE_TLB + || internals->mode == BONDING_MODE_ALB) { + + internals->tlb_slaves_order[active_count] = port_id; + } + + RTE_VERIFY(internals->active_slave_count < + (RTE_DIM(internals->active_slaves) - 1)); + + internals->active_slaves[internals->active_slave_count] = port_id; + internals->active_slave_count++; + + if (internals->mode == BONDING_MODE_TLB) + bond_tlb_activate_slave(internals); + if (internals->mode == BONDING_MODE_ALB) + bond_mode_alb_client_list_upd(eth_dev); +} + +void +deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id) +{ + uint8_t slave_pos; + struct bond_dev_private *internals = eth_dev->data->dev_private; + uint8_t active_count = internals->active_slave_count; + + if (internals->mode == BONDING_MODE_8023AD) { + bond_mode_8023ad_stop(eth_dev); + bond_mode_8023ad_deactivate_slave(eth_dev, port_id); + } else if (internals->mode == BONDING_MODE_TLB + || internals->mode == BONDING_MODE_ALB) + bond_tlb_disable(internals); + + slave_pos = find_slave_by_id(internals->active_slaves, active_count, + port_id); + + /* If slave was not at the end of the list + * shift active slaves up active array list */ + if (slave_pos < active_count) { + active_count--; + memmove(internals->active_slaves + slave_pos, + internals->active_slaves + slave_pos + 1, + (active_count - slave_pos) * + sizeof(internals->active_slaves[0])); + } + + RTE_VERIFY(active_count < RTE_DIM(internals->active_slaves)); + internals->active_slave_count = active_count; + + if (eth_dev->data->dev_started) { + if (internals->mode == BONDING_MODE_8023AD) { + bond_mode_8023ad_start(eth_dev); + } else if (internals->mode == BONDING_MODE_TLB) { + bond_tlb_enable(internals); + } else if (internals->mode == BONDING_MODE_ALB) { + bond_tlb_enable(internals); + bond_mode_alb_client_list_upd(eth_dev); + } + } +} + +uint8_t +number_of_sockets(void) +{ + int sockets = 0; + int i; + const struct rte_memseg *ms = rte_eal_get_physmem_layout(); + + for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) { + if (sockets < ms[i].socket_id) + sockets = ms[i].socket_id; + } + + /* Number of sockets = maximum socket_id + 1 */ + return ++sockets; +} + +int +rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id) +{ + struct bond_dev_private *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + + /* now do all data allocation - for eth_dev structure, dummy pci driver + * and internal (private) data + */ + + if (name == NULL) { + RTE_BOND_LOG(ERR, "Invalid name specified"); + goto err; + } + + if (socket_id >= number_of_sockets()) { + RTE_BOND_LOG(ERR, + "Invalid socket id specified to create bonded device on."); + goto err; + } + + internals = rte_zmalloc_socket(name, sizeof(*internals), 0, socket_id); + if (internals == NULL) { + RTE_BOND_LOG(ERR, "Unable to malloc internals on socket"); + goto err; + } + + /* reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL); + if (eth_dev == NULL) { + RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev"); + goto err; + } + + eth_dev->data->dev_private = internals; + eth_dev->data->nb_rx_queues = (uint16_t)1; + eth_dev->data->nb_tx_queues = (uint16_t)1; + + TAILQ_INIT(&(eth_dev->link_intr_cbs)); + + eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; + + eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0, + socket_id); + if (eth_dev->data->mac_addrs == NULL) { + RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs"); + goto err; + } + + eth_dev->data->dev_started = 0; + eth_dev->data->promiscuous = 0; + eth_dev->data->scattered_rx = 0; + eth_dev->data->all_multicast = 0; + + eth_dev->dev_ops = &default_dev_ops; + eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC | + RTE_ETH_DEV_DETACHABLE; + eth_dev->driver = NULL; + eth_dev->data->kdrv = RTE_KDRV_NONE; + eth_dev->data->drv_name = pmd_bond_driver_name; + eth_dev->data->numa_node = socket_id; + + rte_spinlock_init(&internals->lock); + + internals->port_id = eth_dev->data->port_id; + internals->mode = BONDING_MODE_INVALID; + internals->current_primary_port = RTE_MAX_ETHPORTS + 1; + internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2; + internals->xmit_hash = xmit_l2_hash; + internals->user_defined_mac = 0; + internals->link_props_set = 0; + + internals->link_status_polling_enabled = 0; + + internals->link_status_polling_interval_ms = DEFAULT_POLLING_INTERVAL_10_MS; + internals->link_down_delay_ms = 0; + internals->link_up_delay_ms = 0; + + internals->slave_count = 0; + internals->active_slave_count = 0; + internals->rx_offload_capa = 0; + internals->tx_offload_capa = 0; + + /* Initially allow to choose any offload type */ + internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; + + memset(internals->active_slaves, 0, sizeof(internals->active_slaves)); + memset(internals->slaves, 0, sizeof(internals->slaves)); + + /* Set mode 4 default configuration */ + bond_mode_8023ad_setup(eth_dev, NULL); + if (bond_ethdev_mode_set(eth_dev, mode)) { + RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d", + eth_dev->data->port_id, mode); + goto err; + } + + return eth_dev->data->port_id; + +err: + rte_free(internals); + if (eth_dev != NULL) { + rte_free(eth_dev->data->mac_addrs); + rte_eth_dev_release_port(eth_dev); + } + return -1; +} + +int +rte_eth_bond_free(const char *name) +{ + struct rte_eth_dev *eth_dev = NULL; + struct bond_dev_private *internals; + + /* now free all data allocation - for eth_dev structure, + * dummy pci driver and internal (private) data + */ + + /* find an ethdev entry */ + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev == NULL) + return -ENODEV; + + internals = eth_dev->data->dev_private; + if (internals->slave_count != 0) + return -EBUSY; + + if (eth_dev->data->dev_started == 1) { + bond_ethdev_stop(eth_dev); + bond_ethdev_close(eth_dev); + } + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + rte_free(eth_dev->data->dev_private); + rte_free(eth_dev->data->mac_addrs); + + rte_eth_dev_release_port(eth_dev); + + return 0; +} + +static int +__eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id) +{ + struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev; + struct bond_dev_private *internals; + struct rte_eth_link link_props; + struct rte_eth_dev_info dev_info; + + if (valid_slave_port_id(slave_port_id) != 0) + return -1; + + bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + internals = bonded_eth_dev->data->dev_private; + + slave_eth_dev = &rte_eth_devices[slave_port_id]; + if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDED_SLAVE) { + RTE_BOND_LOG(ERR, "Slave device is already a slave of a bonded device"); + return -1; + } + + /* Add slave details to bonded device */ + slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE; + slave_add(internals, slave_eth_dev); + + rte_eth_dev_info_get(slave_port_id, &dev_info); + + /* We need to store slaves reta_size to be able to synchronize RETA for all + * slave devices even if its sizes are different. + */ + internals->slaves[internals->slave_count].reta_size = dev_info.reta_size; + + if (internals->slave_count < 1) { + /* if MAC is not user defined then use MAC of first slave add to + * bonded device */ + if (!internals->user_defined_mac) + mac_address_set(bonded_eth_dev, slave_eth_dev->data->mac_addrs); + + /* Inherit eth dev link properties from first slave */ + link_properties_set(bonded_eth_dev, + &(slave_eth_dev->data->dev_link)); + + /* Make primary slave */ + internals->primary_port = slave_port_id; + internals->current_primary_port = slave_port_id; + + /* Inherit queues settings from first slave */ + internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues; + internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues; + + internals->reta_size = dev_info.reta_size; + + /* Take the first dev's offload capabilities */ + internals->rx_offload_capa = dev_info.rx_offload_capa; + internals->tx_offload_capa = dev_info.tx_offload_capa; + internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads; + + } else { + /* Check slave link properties are supported if props are set, + * all slaves must be the same */ + if (internals->link_props_set) { + if (link_properties_valid(&(bonded_eth_dev->data->dev_link), + &(slave_eth_dev->data->dev_link))) { + slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE); + RTE_BOND_LOG(ERR, + "Slave port %d link speed/duplex not supported", + slave_port_id); + return -1; + } + } else { + link_properties_set(bonded_eth_dev, + &(slave_eth_dev->data->dev_link)); + } + internals->rx_offload_capa &= dev_info.rx_offload_capa; + internals->tx_offload_capa &= dev_info.tx_offload_capa; + internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads; + + /* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be + * the power of 2, the lower one is GCD + */ + if (internals->reta_size > dev_info.reta_size) + internals->reta_size = dev_info.reta_size; + + } + + bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &= + internals->flow_type_rss_offloads; + + internals->slave_count++; + + /* Update all slave devices MACs*/ + mac_address_slaves_update(bonded_eth_dev); + + if (bonded_eth_dev->data->dev_started) { + if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) { + slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE); + RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d", + slave_port_id); + return -1; + } + } + + /* Register link status change callback with bonded device pointer as + * argument*/ + rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC, + bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id); + + /* If bonded device is started then we can add the slave to our active + * slave array */ + if (bonded_eth_dev->data->dev_started) { + rte_eth_link_get_nowait(slave_port_id, &link_props); + + if (link_props.link_status == ETH_LINK_UP) { + if (internals->active_slave_count == 0 && + !internals->user_defined_primary_port) + bond_ethdev_primary_set(internals, + slave_port_id); + + if (find_slave_by_id(internals->active_slaves, + internals->active_slave_count, + slave_port_id) == internals->active_slave_count) + activate_slave(bonded_eth_dev, slave_port_id); + } + } + return 0; + +} + +int +rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id) +{ + struct rte_eth_dev *bonded_eth_dev; + struct bond_dev_private *internals; + + int retval; + + /* Verify that port id's are valid bonded and slave ports */ + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + internals = bonded_eth_dev->data->dev_private; + + rte_spinlock_lock(&internals->lock); + + retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id); + + rte_spinlock_unlock(&internals->lock); + + return retval; +} + +static int +__eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id) +{ + struct rte_eth_dev *bonded_eth_dev; + struct bond_dev_private *internals; + struct rte_eth_dev *slave_eth_dev; + int i, slave_idx; + + if (valid_slave_port_id(slave_port_id) != 0) + return -1; + + bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + internals = bonded_eth_dev->data->dev_private; + + /* first remove from active slave list */ + slave_idx = find_slave_by_id(internals->active_slaves, + internals->active_slave_count, slave_port_id); + + if (slave_idx < internals->active_slave_count) + deactivate_slave(bonded_eth_dev, slave_port_id); + + slave_idx = -1; + /* now find in slave list */ + for (i = 0; i < internals->slave_count; i++) + if (internals->slaves[i].port_id == slave_port_id) { + slave_idx = i; + break; + } + + if (slave_idx < 0) { + RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %d", + internals->slave_count); + return -1; + } + + /* Un-register link status change callback with bonded device pointer as + * argument*/ + rte_eth_dev_callback_unregister(slave_port_id, RTE_ETH_EVENT_INTR_LSC, + bond_ethdev_lsc_event_callback, + &rte_eth_devices[bonded_port_id].data->port_id); + + /* Restore original MAC address of slave device */ + mac_address_set(&rte_eth_devices[slave_port_id], + &(internals->slaves[slave_idx].persisted_mac_addr)); + + slave_eth_dev = &rte_eth_devices[slave_port_id]; + slave_remove(internals, slave_eth_dev); + slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE); + + /* first slave in the active list will be the primary by default, + * otherwise use first device in list */ + if (internals->current_primary_port == slave_port_id) { + if (internals->active_slave_count > 0) + internals->current_primary_port = internals->active_slaves[0]; + else if (internals->slave_count > 0) + internals->current_primary_port = internals->slaves[0].port_id; + else + internals->primary_port = 0; + } + + if (internals->active_slave_count < 1) { + /* reset device link properties as no slaves are active */ + link_properties_reset(&rte_eth_devices[bonded_port_id]); + + /* if no slaves are any longer attached to bonded device and MAC is not + * user defined then clear MAC of bonded device as it will be reset + * when a new slave is added */ + if (internals->slave_count < 1 && !internals->user_defined_mac) + memset(rte_eth_devices[bonded_port_id].data->mac_addrs, 0, + sizeof(*(rte_eth_devices[bonded_port_id].data->mac_addrs))); + } + if (internals->slave_count == 0) { + internals->rx_offload_capa = 0; + internals->tx_offload_capa = 0; + internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; + internals->reta_size = 0; + } + return 0; +} + +int +rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id) +{ + struct rte_eth_dev *bonded_eth_dev; + struct bond_dev_private *internals; + int retval; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + internals = bonded_eth_dev->data->dev_private; + + rte_spinlock_lock(&internals->lock); + + retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id); + + rte_spinlock_unlock(&internals->lock); + + return retval; +} + +int +rte_eth_bond_mode_set(uint8_t bonded_port_id, uint8_t mode) +{ + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + return bond_ethdev_mode_set(&rte_eth_devices[bonded_port_id], mode); +} + +int +rte_eth_bond_mode_get(uint8_t bonded_port_id) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + return internals->mode; +} + +int +rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + if (valid_slave_port_id(slave_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + internals->user_defined_primary_port = 1; + internals->primary_port = slave_port_id; + + bond_ethdev_primary_set(internals, slave_port_id); + + return 0; +} + +int +rte_eth_bond_primary_get(uint8_t bonded_port_id) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + if (internals->slave_count < 1) + return -1; + + return internals->current_primary_port; +} + +int +rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], uint8_t len) +{ + struct bond_dev_private *internals; + uint8_t i; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + if (slaves == NULL) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + if (internals->slave_count > len) + return -1; + + for (i = 0; i < internals->slave_count; i++) + slaves[i] = internals->slaves[i].port_id; + + return internals->slave_count; +} + +int +rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], + uint8_t len) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + if (slaves == NULL) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + if (internals->active_slave_count > len) + return -1; + + memcpy(slaves, internals->active_slaves, internals->active_slave_count); + + return internals->active_slave_count; +} + +int +rte_eth_bond_mac_address_set(uint8_t bonded_port_id, + struct ether_addr *mac_addr) +{ + struct rte_eth_dev *bonded_eth_dev; + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + internals = bonded_eth_dev->data->dev_private; + + /* Set MAC Address of Bonded Device */ + if (mac_address_set(bonded_eth_dev, mac_addr)) + return -1; + + internals->user_defined_mac = 1; + + /* Update all slave devices MACs*/ + if (internals->slave_count > 0) + return mac_address_slaves_update(bonded_eth_dev); + + return 0; +} + +int +rte_eth_bond_mac_address_reset(uint8_t bonded_port_id) +{ + struct rte_eth_dev *bonded_eth_dev; + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + bonded_eth_dev = &rte_eth_devices[bonded_port_id]; + internals = bonded_eth_dev->data->dev_private; + + internals->user_defined_mac = 0; + + if (internals->slave_count > 0) { + /* Set MAC Address of Bonded Device */ + if (mac_address_set(bonded_eth_dev, + &internals->slaves[internals->primary_port].persisted_mac_addr) + != 0) { + RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device"); + return -1; + } + /* Update all slave devices MAC addresses */ + return mac_address_slaves_update(bonded_eth_dev); + } + /* No need to update anything as no slaves present */ + return 0; +} + +int +rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, uint8_t policy) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + switch (policy) { + case BALANCE_XMIT_POLICY_LAYER2: + internals->balance_xmit_policy = policy; + internals->xmit_hash = xmit_l2_hash; + break; + case BALANCE_XMIT_POLICY_LAYER23: + internals->balance_xmit_policy = policy; + internals->xmit_hash = xmit_l23_hash; + break; + case BALANCE_XMIT_POLICY_LAYER34: + internals->balance_xmit_policy = policy; + internals->xmit_hash = xmit_l34_hash; + break; + + default: + return -1; + } + return 0; +} + +int +rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + return internals->balance_xmit_policy; +} + +int +rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, uint32_t internal_ms) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals->link_status_polling_interval_ms = internal_ms; + + return 0; +} + +int +rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + return internals->link_status_polling_interval_ms; +} + +int +rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms) + +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals->link_down_delay_ms = delay_ms; + + return 0; +} + +int +rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + return internals->link_down_delay_ms; +} + +int +rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms) + +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + internals->link_up_delay_ms = delay_ms; + + return 0; +} + +int +rte_eth_bond_link_up_prop_delay_get(uint8_t bonded_port_id) +{ + struct bond_dev_private *internals; + + if (valid_bonded_port_id(bonded_port_id) != 0) + return -1; + + internals = rte_eth_devices[bonded_port_id].data->dev_private; + + return internals->link_up_delay_ms; +} diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c new file mode 100644 index 00000000..02ecde64 --- /dev/null +++ b/drivers/net/bonding/rte_eth_bond_args.c @@ -0,0 +1,278 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rte_devargs.h> +#include <rte_kvargs.h> + +#include <cmdline_parse.h> +#include <cmdline_parse_etheraddr.h> + +#include "rte_eth_bond.h" +#include "rte_eth_bond_private.h" + +const char *pmd_bond_init_valid_arguments[] = { + PMD_BOND_SLAVE_PORT_KVARG, + PMD_BOND_PRIMARY_SLAVE_KVARG, + PMD_BOND_MODE_KVARG, + PMD_BOND_XMIT_POLICY_KVARG, + PMD_BOND_SOCKET_ID_KVARG, + PMD_BOND_MAC_ADDR_KVARG, + + NULL +}; + +static inline int +find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr) +{ + struct rte_pci_addr *eth_pci_addr; + unsigned i; + + for (i = 0; i < rte_eth_dev_count(); i++) { + + if (rte_eth_devices[i].pci_dev == NULL) + continue; + + eth_pci_addr = &(rte_eth_devices[i].pci_dev->addr); + + if (pci_addr->bus == eth_pci_addr->bus && + pci_addr->devid == eth_pci_addr->devid && + pci_addr->domain == eth_pci_addr->domain && + pci_addr->function == eth_pci_addr->function) + return i; + } + return -1; +} + +static inline int +find_port_id_by_dev_name(const char *name) +{ + unsigned i; + + for (i = 0; i < rte_eth_dev_count(); i++) { + if (rte_eth_devices[i].data == NULL) + continue; + + if (strcmp(rte_eth_devices[i].data->name, name) == 0) + return i; + } + return -1; +} + +/** + * Parses a port identifier string to a port id by pci address, then by name, + * and finally port id. + */ +static inline int +parse_port_id(const char *port_str) +{ + struct rte_pci_addr dev_addr; + int port_id; + + /* try parsing as pci address, physical devices */ + if (eal_parse_pci_DomBDF(port_str, &dev_addr) == 0) { + port_id = find_port_id_by_pci_addr(&dev_addr); + if (port_id < 0) + return -1; + } else { + /* try parsing as device name, virtual devices */ + port_id = find_port_id_by_dev_name(port_str); + if (port_id < 0) { + char *end; + errno = 0; + + /* try parsing as port id */ + port_id = strtol(port_str, &end, 10); + if (*end != 0 || errno != 0) + return -1; + } + } + + if (port_id < 0 || port_id > RTE_MAX_ETHPORTS) { + RTE_BOND_LOG(ERR, "Slave port specified (%s) outside expected range", + port_str); + return -1; + } + return port_id; +} + +int +bond_ethdev_parse_slave_port_kvarg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + struct bond_ethdev_slave_ports *slave_ports; + + if (value == NULL || extra_args == NULL) + return -1; + + slave_ports = extra_args; + + if (strcmp(key, PMD_BOND_SLAVE_PORT_KVARG) == 0) { + int port_id = parse_port_id(value); + if (port_id < 0) { + RTE_BOND_LOG(ERR, "Invalid slave port value (%s) specified", value); + return -1; + } else + slave_ports->slaves[slave_ports->slave_count++] = + (uint8_t)port_id; + } + return 0; +} + +int +bond_ethdev_parse_slave_mode_kvarg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + uint8_t *mode; + char *endptr; + + if (value == NULL || extra_args == NULL) + return -1; + + mode = extra_args; + + errno = 0; + *mode = strtol(value, &endptr, 10); + if (*endptr != 0 || errno != 0) + return -1; + + /* validate mode value */ + switch (*mode) { + case BONDING_MODE_ROUND_ROBIN: + case BONDING_MODE_ACTIVE_BACKUP: + case BONDING_MODE_BALANCE: + case BONDING_MODE_BROADCAST: + case BONDING_MODE_8023AD: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: + return 0; + default: + RTE_BOND_LOG(ERR, "Invalid slave mode value (%s) specified", value); + return -1; + } +} + +int +bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + int socket_id; + char *endptr; + + if (value == NULL || extra_args == NULL) + return -1; + + errno = 0; + socket_id = (uint8_t)strtol(value, &endptr, 10); + if (*endptr != 0 || errno != 0) + return -1; + + /* validate mode value */ + if (socket_id >= 0 && socket_id < number_of_sockets()) { + *(uint8_t *)extra_args = (uint8_t)socket_id; + return 0; + } + return -1; +} + +int +bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + int primary_slave_port_id; + + if (value == NULL || extra_args == NULL) + return -1; + + primary_slave_port_id = parse_port_id(value); + if (primary_slave_port_id < 0) + return -1; + + *(uint8_t *)extra_args = (uint8_t)primary_slave_port_id; + + return 0; +} + +int +bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + uint8_t *xmit_policy; + + if (value == NULL || extra_args == NULL) + return -1; + + xmit_policy = extra_args; + + if (strcmp(PMD_BOND_XMIT_POLICY_LAYER2_KVARG, value) == 0) + *xmit_policy = BALANCE_XMIT_POLICY_LAYER2; + else if (strcmp(PMD_BOND_XMIT_POLICY_LAYER23_KVARG, value) == 0) + *xmit_policy = BALANCE_XMIT_POLICY_LAYER23; + else if (strcmp(PMD_BOND_XMIT_POLICY_LAYER34_KVARG, value) == 0) + *xmit_policy = BALANCE_XMIT_POLICY_LAYER34; + else + return -1; + + return 0; +} + +int +bond_ethdev_parse_bond_mac_addr_kvarg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + if (value == NULL || extra_args == NULL) + return -1; + + /* Parse MAC */ + return cmdline_parse_etheraddr(NULL, value, extra_args, + sizeof(struct ether_addr)); +} + +int +bond_ethdev_parse_time_ms_kvarg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + uint32_t time_ms; + char *endptr; + + if (value == NULL || extra_args == NULL) + return -1; + + errno = 0; + time_ms = (uint32_t)strtol(value, &endptr, 10); + if (*endptr != 0 || errno != 0) + return -1; + + *(uint32_t *)extra_args = time_ms; + + return 0; +} diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c new file mode 100644 index 00000000..54788cf8 --- /dev/null +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -0,0 +1,2515 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <stdlib.h> +#include <netinet/in.h> + +#include <rte_mbuf.h> +#include <rte_malloc.h> +#include <rte_ethdev.h> +#include <rte_tcp.h> +#include <rte_udp.h> +#include <rte_ip.h> +#include <rte_ip_frag.h> +#include <rte_devargs.h> +#include <rte_kvargs.h> +#include <rte_dev.h> +#include <rte_alarm.h> +#include <rte_cycles.h> + +#include "rte_eth_bond.h" +#include "rte_eth_bond_private.h" +#include "rte_eth_bond_8023ad_private.h" + +#define REORDER_PERIOD_MS 10 + +#define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port) + +/* Table for statistics in mode 5 TLB */ +static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS]; + +static inline size_t +get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto) +{ + size_t vlan_offset = 0; + + if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) { + struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1); + + vlan_offset = sizeof(struct vlan_hdr); + *proto = vlan_hdr->eth_proto; + + if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) { + vlan_hdr = vlan_hdr + 1; + *proto = vlan_hdr->eth_proto; + vlan_offset += sizeof(struct vlan_hdr); + } + } + return vlan_offset; +} + +static uint16_t +bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct bond_dev_private *internals; + + uint16_t num_rx_slave = 0; + uint16_t num_rx_total = 0; + + int i; + + /* Cast to structure, containing bonded device's port id and queue id */ + struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; + + internals = bd_rx_q->dev_private; + + + for (i = 0; i < internals->active_slave_count && nb_pkts; i++) { + /* Offset of pointer to *bufs increases as packets are received + * from other slaves */ + num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i], + bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts); + if (num_rx_slave) { + num_rx_total += num_rx_slave; + nb_pkts -= num_rx_slave; + } + } + + return num_rx_total; +} + +static uint16_t +bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + struct bond_dev_private *internals; + + /* Cast to structure, containing bonded device's port id and queue id */ + struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; + + internals = bd_rx_q->dev_private; + + return rte_eth_rx_burst(internals->current_primary_port, + bd_rx_q->queue_id, bufs, nb_pkts); +} + +static uint16_t +bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + /* Cast to structure, containing bonded device's port id and queue id */ + struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; + struct bond_dev_private *internals = bd_rx_q->dev_private; + struct ether_addr bond_mac; + + struct ether_hdr *hdr; + + const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW); + uint16_t num_rx_total = 0; /* Total number of received packets */ + uint8_t slaves[RTE_MAX_ETHPORTS]; + uint8_t slave_count; + + uint8_t collecting; /* current slave collecting status */ + const uint8_t promisc = internals->promiscuous_en; + uint8_t i, j, k; + + rte_eth_macaddr_get(internals->port_id, &bond_mac); + /* Copy slave list to protect against slave up/down changes during tx + * bursting */ + slave_count = internals->active_slave_count; + memcpy(slaves, internals->active_slaves, + sizeof(internals->active_slaves[0]) * slave_count); + + for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) { + j = num_rx_total; + collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING); + + /* Read packets from this slave */ + num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id, + &bufs[num_rx_total], nb_pkts - num_rx_total); + + for (k = j; k < 2 && k < num_rx_total; k++) + rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *)); + + /* Handle slow protocol packets. */ + while (j < num_rx_total) { + if (j + 3 < num_rx_total) + rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *)); + + hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *); + /* Remove packet from array if it is slow packet or slave is not + * in collecting state or bondign interface is not in promiscus + * mode and packet address does not match. */ + if (unlikely(hdr->ether_type == ether_type_slow_be || + !collecting || (!promisc && + !is_multicast_ether_addr(&hdr->d_addr) && + !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) { + + if (hdr->ether_type == ether_type_slow_be) { + bond_mode_8023ad_handle_slow_pkt(internals, slaves[i], + bufs[j]); + } else + rte_pktmbuf_free(bufs[j]); + + /* Packet is managed by mode 4 or dropped, shift the array */ + num_rx_total--; + if (j < num_rx_total) { + memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) * + (num_rx_total - j)); + } + } else + j++; + } + } + + return num_rx_total; +} + +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) +uint32_t burstnumberRX; +uint32_t burstnumberTX; + +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + +static void +arp_op_name(uint16_t arp_op, char *buf) +{ + switch (arp_op) { + case ARP_OP_REQUEST: + snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request"); + return; + case ARP_OP_REPLY: + snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply"); + return; + case ARP_OP_REVREQUEST: + snprintf(buf, sizeof("Reverse ARP Request"), "%s", + "Reverse ARP Request"); + return; + case ARP_OP_REVREPLY: + snprintf(buf, sizeof("Reverse ARP Reply"), "%s", + "Reverse ARP Reply"); + return; + case ARP_OP_INVREQUEST: + snprintf(buf, sizeof("Peer Identify Request"), "%s", + "Peer Identify Request"); + return; + case ARP_OP_INVREPLY: + snprintf(buf, sizeof("Peer Identify Reply"), "%s", + "Peer Identify Reply"); + return; + default: + break; + } + snprintf(buf, sizeof("Unknown"), "%s", "Unknown"); + return; +} +#endif +#define MaxIPv4String 16 +static void +ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size) +{ + uint32_t ipv4_addr; + + ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr); + snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF, + (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF, + ipv4_addr & 0xFF); +} + +#define MAX_CLIENTS_NUMBER 128 +uint8_t active_clients; +struct client_stats_t { + uint8_t port; + uint32_t ipv4_addr; + uint32_t ipv4_rx_packets; + uint32_t ipv4_tx_packets; +}; +struct client_stats_t client_stats[MAX_CLIENTS_NUMBER]; + +static void +update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator) +{ + int i = 0; + + for (; i < MAX_CLIENTS_NUMBER; i++) { + if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) { + /* Just update RX packets number for this client */ + if (TXorRXindicator == &burstnumberRX) + client_stats[i].ipv4_rx_packets++; + else + client_stats[i].ipv4_tx_packets++; + return; + } + } + /* We have a new client. Insert him to the table, and increment stats */ + if (TXorRXindicator == &burstnumberRX) + client_stats[active_clients].ipv4_rx_packets++; + else + client_stats[active_clients].ipv4_tx_packets++; + client_stats[active_clients].ipv4_addr = addr; + client_stats[active_clients].port = port; + active_clients++; + +} + +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB +#define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \ + RTE_LOG(DEBUG, PMD, \ + "%s " \ + "port:%d " \ + "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \ + "SrcIP:%s " \ + "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \ + "DstIP:%s " \ + "%s " \ + "%d\n", \ + info, \ + port, \ + eth_h->s_addr.addr_bytes[0], \ + eth_h->s_addr.addr_bytes[1], \ + eth_h->s_addr.addr_bytes[2], \ + eth_h->s_addr.addr_bytes[3], \ + eth_h->s_addr.addr_bytes[4], \ + eth_h->s_addr.addr_bytes[5], \ + src_ip, \ + eth_h->d_addr.addr_bytes[0], \ + eth_h->d_addr.addr_bytes[1], \ + eth_h->d_addr.addr_bytes[2], \ + eth_h->d_addr.addr_bytes[3], \ + eth_h->d_addr.addr_bytes[4], \ + eth_h->d_addr.addr_bytes[5], \ + dst_ip, \ + arp_op, \ + ++burstnumber) +#endif + +static void +mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h, + uint8_t port, uint32_t __attribute__((unused)) *burstnumber) +{ + struct ipv4_hdr *ipv4_h; +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + struct arp_hdr *arp_h; + char dst_ip[16]; + char ArpOp[24]; + char buf[16]; +#endif + char src_ip[16]; + + uint16_t ether_type = eth_h->ether_type; + uint16_t offset = get_vlan_offset(eth_h, ðer_type); + +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + snprintf(buf, 16, "%s", info); +#endif + + if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) { + ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset); + ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String); +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String); + MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber); +#endif + update_client_stats(ipv4_h->src_addr, port, burstnumber); + } +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) { + arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset); + ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String); + ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String); + arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp); + MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber); + } +#endif +} +#endif + +static uint16_t +bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; + struct ether_hdr *eth_h; + uint16_t ether_type, offset; + uint16_t nb_recv_pkts; + int i; + + nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts); + + for (i = 0; i < nb_recv_pkts; i++) { + eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *); + ether_type = eth_h->ether_type; + offset = get_vlan_offset(eth_h, ðer_type); + + if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) { +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX); +#endif + bond_mode_alb_arp_recv(eth_h, offset, internals); + } +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) + mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX); +#endif + } + + return nb_recv_pkts; +} + +static uint16_t +bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + struct bond_dev_private *internals; + struct bond_tx_queue *bd_tx_q; + + struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts]; + uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 }; + + uint8_t num_of_slaves; + uint8_t slaves[RTE_MAX_ETHPORTS]; + + uint16_t num_tx_total = 0, num_tx_slave; + + static int slave_idx = 0; + int i, cslave_idx = 0, tx_fail_total = 0; + + bd_tx_q = (struct bond_tx_queue *)queue; + internals = bd_tx_q->dev_private; + + /* Copy slave list to protect against slave up/down changes during tx + * bursting */ + num_of_slaves = internals->active_slave_count; + memcpy(slaves, internals->active_slaves, + sizeof(internals->active_slaves[0]) * num_of_slaves); + + if (num_of_slaves < 1) + return num_tx_total; + + /* Populate slaves mbuf with which packets are to be sent on it */ + for (i = 0; i < nb_pkts; i++) { + cslave_idx = (slave_idx + i) % num_of_slaves; + slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i]; + } + + /* increment current slave index so the next call to tx burst starts on the + * next slave */ + slave_idx = ++cslave_idx; + + /* Send packet burst on each slave device */ + for (i = 0; i < num_of_slaves; i++) { + if (slave_nb_pkts[i] > 0) { + num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + slave_bufs[i], slave_nb_pkts[i]); + + /* if tx burst fails move packets to end of bufs */ + if (unlikely(num_tx_slave < slave_nb_pkts[i])) { + int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave; + + tx_fail_total += tx_fail_slave; + + memcpy(&bufs[nb_pkts - tx_fail_total], + &slave_bufs[i][num_tx_slave], + tx_fail_slave * sizeof(bufs[0])); + } + num_tx_total += num_tx_slave; + } + } + + return num_tx_total; +} + +static uint16_t +bond_ethdev_tx_burst_active_backup(void *queue, + struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct bond_dev_private *internals; + struct bond_tx_queue *bd_tx_q; + + bd_tx_q = (struct bond_tx_queue *)queue; + internals = bd_tx_q->dev_private; + + if (internals->active_slave_count < 1) + return 0; + + return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id, + bufs, nb_pkts); +} + +static inline uint16_t +ether_hash(struct ether_hdr *eth_hdr) +{ + unaligned_uint16_t *word_src_addr = + (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes; + unaligned_uint16_t *word_dst_addr = + (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes; + + return (word_src_addr[0] ^ word_dst_addr[0]) ^ + (word_src_addr[1] ^ word_dst_addr[1]) ^ + (word_src_addr[2] ^ word_dst_addr[2]); +} + +static inline uint32_t +ipv4_hash(struct ipv4_hdr *ipv4_hdr) +{ + return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr; +} + +static inline uint32_t +ipv6_hash(struct ipv6_hdr *ipv6_hdr) +{ + unaligned_uint32_t *word_src_addr = + (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]); + unaligned_uint32_t *word_dst_addr = + (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]); + + return (word_src_addr[0] ^ word_dst_addr[0]) ^ + (word_src_addr[1] ^ word_dst_addr[1]) ^ + (word_src_addr[2] ^ word_dst_addr[2]) ^ + (word_src_addr[3] ^ word_dst_addr[3]); +} + +uint16_t +xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count) +{ + struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *); + + uint32_t hash = ether_hash(eth_hdr); + + return (hash ^= hash >> 8) % slave_count; +} + +uint16_t +xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count) +{ + struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *); + uint16_t proto = eth_hdr->ether_type; + size_t vlan_offset = get_vlan_offset(eth_hdr, &proto); + uint32_t hash, l3hash = 0; + + hash = ether_hash(eth_hdr); + + if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) { + struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + l3hash = ipv4_hash(ipv4_hdr); + + } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) { + struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + l3hash = ipv6_hash(ipv6_hdr); + } + + hash = hash ^ l3hash; + hash ^= hash >> 16; + hash ^= hash >> 8; + + return hash % slave_count; +} + +uint16_t +xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count) +{ + struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *); + uint16_t proto = eth_hdr->ether_type; + size_t vlan_offset = get_vlan_offset(eth_hdr, &proto); + + struct udp_hdr *udp_hdr = NULL; + struct tcp_hdr *tcp_hdr = NULL; + uint32_t hash, l3hash = 0, l4hash = 0; + + if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) { + struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + size_t ip_hdr_offset; + + l3hash = ipv4_hash(ipv4_hdr); + + /* there is no L4 header in fragmented packet */ + if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) { + ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) * + IPV4_IHL_MULTIPLIER; + + if (ipv4_hdr->next_proto_id == IPPROTO_TCP) { + tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + + ip_hdr_offset); + l4hash = HASH_L4_PORTS(tcp_hdr); + } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) { + udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr + + ip_hdr_offset); + l4hash = HASH_L4_PORTS(udp_hdr); + } + } + } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) { + struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + l3hash = ipv6_hash(ipv6_hdr); + + if (ipv6_hdr->proto == IPPROTO_TCP) { + tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1); + l4hash = HASH_L4_PORTS(tcp_hdr); + } else if (ipv6_hdr->proto == IPPROTO_UDP) { + udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1); + l4hash = HASH_L4_PORTS(udp_hdr); + } + } + + hash = l3hash ^ l4hash; + hash ^= hash >> 16; + hash ^= hash >> 8; + + return hash % slave_count; +} + +struct bwg_slave { + uint64_t bwg_left_int; + uint64_t bwg_left_remainder; + uint8_t slave; +}; + +void +bond_tlb_activate_slave(struct bond_dev_private *internals) { + int i; + + for (i = 0; i < internals->active_slave_count; i++) { + tlb_last_obytets[internals->active_slaves[i]] = 0; + } +} + +static int +bandwidth_cmp(const void *a, const void *b) +{ + const struct bwg_slave *bwg_a = a; + const struct bwg_slave *bwg_b = b; + int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int; + int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder - + (int64_t)bwg_a->bwg_left_remainder; + if (diff > 0) + return 1; + else if (diff < 0) + return -1; + else if (diff2 > 0) + return 1; + else if (diff2 < 0) + return -1; + else + return 0; +} + +static void +bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx, + struct bwg_slave *bwg_slave) +{ + struct rte_eth_link link_status; + + rte_eth_link_get(port_id, &link_status); + uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8; + if (link_bwg == 0) + return; + link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS; + bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg; + bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg; +} + +static void +bond_ethdev_update_tlb_slave_cb(void *arg) +{ + struct bond_dev_private *internals = arg; + struct rte_eth_stats slave_stats; + struct bwg_slave bwg_array[RTE_MAX_ETHPORTS]; + uint8_t slave_count; + uint64_t tx_bytes; + + uint8_t update_stats = 0; + uint8_t i, slave_id; + + internals->slave_update_idx++; + + + if (internals->slave_update_idx >= REORDER_PERIOD_MS) + update_stats = 1; + + for (i = 0; i < internals->active_slave_count; i++) { + slave_id = internals->active_slaves[i]; + rte_eth_stats_get(slave_id, &slave_stats); + tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id]; + bandwidth_left(slave_id, tx_bytes, + internals->slave_update_idx, &bwg_array[i]); + bwg_array[i].slave = slave_id; + + if (update_stats) { + tlb_last_obytets[slave_id] = slave_stats.obytes; + } + } + + if (update_stats == 1) + internals->slave_update_idx = 0; + + slave_count = i; + qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp); + for (i = 0; i < slave_count; i++) + internals->tlb_slaves_order[i] = bwg_array[i].slave; + + rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb, + (struct bond_dev_private *)internals); +} + +static uint16_t +bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; + + struct rte_eth_dev *primary_port = + &rte_eth_devices[internals->primary_port]; + uint16_t num_tx_total = 0; + uint8_t i, j; + + uint8_t num_of_slaves = internals->active_slave_count; + uint8_t slaves[RTE_MAX_ETHPORTS]; + + struct ether_hdr *ether_hdr; + struct ether_addr primary_slave_addr; + struct ether_addr active_slave_addr; + + if (num_of_slaves < 1) + return num_tx_total; + + memcpy(slaves, internals->tlb_slaves_order, + sizeof(internals->tlb_slaves_order[0]) * num_of_slaves); + + + ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr); + + if (nb_pkts > 3) { + for (i = 0; i < 3; i++) + rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*)); + } + + for (i = 0; i < num_of_slaves; i++) { + rte_eth_macaddr_get(slaves[i], &active_slave_addr); + for (j = num_tx_total; j < nb_pkts; j++) { + if (j + 3 < nb_pkts) + rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*)); + + ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *); + if (is_same_ether_addr(ðer_hdr->s_addr, &primary_slave_addr)) + ether_addr_copy(&active_slave_addr, ðer_hdr->s_addr); +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX); +#endif + } + + num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + bufs + num_tx_total, nb_pkts - num_tx_total); + + if (num_tx_total == nb_pkts) + break; + } + + return num_tx_total; +} + +void +bond_tlb_disable(struct bond_dev_private *internals) +{ + rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals); +} + +void +bond_tlb_enable(struct bond_dev_private *internals) +{ + bond_ethdev_update_tlb_slave_cb(internals); +} + +static uint16_t +bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; + + struct ether_hdr *eth_h; + uint16_t ether_type, offset; + + struct client_data *client_info; + + /* + * We create transmit buffers for every slave and one additional to send + * through tlb. In worst case every packet will be send on one port. + */ + struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts]; + uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 }; + + /* + * We create separate transmit buffers for update packets as they wont be + * counted in num_tx_total. + */ + struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE]; + uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 }; + + struct rte_mbuf *upd_pkt; + size_t pkt_size; + + uint16_t num_send, num_not_send = 0; + uint16_t num_tx_total = 0; + uint8_t slave_idx; + + int i, j; + + /* Search tx buffer for ARP packets and forward them to alb */ + for (i = 0; i < nb_pkts; i++) { + eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *); + ether_type = eth_h->ether_type; + offset = get_vlan_offset(eth_h, ðer_type); + + if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) { + slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals); + + /* Change src mac in eth header */ + rte_eth_macaddr_get(slave_idx, ð_h->s_addr); + + /* Add packet to slave tx buffer */ + slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i]; + slave_bufs_pkts[slave_idx]++; + } else { + /* If packet is not ARP, send it with TLB policy */ + slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] = + bufs[i]; + slave_bufs_pkts[RTE_MAX_ETHPORTS]++; + } + } + + /* Update connected client ARP tables */ + if (internals->mode6.ntt) { + for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) { + client_info = &internals->mode6.client_table[i]; + + if (client_info->in_use) { + /* Allocate new packet to send ARP update on current slave */ + upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool); + if (upd_pkt == NULL) { + RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n"); + continue; + } + pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr) + + client_info->vlan_count * sizeof(struct vlan_hdr); + upd_pkt->data_len = pkt_size; + upd_pkt->pkt_len = pkt_size; + + slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt, + internals); + + /* Add packet to update tx buffer */ + update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt; + update_bufs_pkts[slave_idx]++; + } + } + internals->mode6.ntt = 0; + } + + /* Send ARP packets on proper slaves */ + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + if (slave_bufs_pkts[i] > 0) { + num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, + slave_bufs[i], slave_bufs_pkts[i]); + for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) { + bufs[nb_pkts - 1 - num_not_send - j] = + slave_bufs[i][nb_pkts - 1 - j]; + } + + num_tx_total += num_send; + num_not_send += slave_bufs_pkts[i] - num_send; + +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + /* Print TX stats including update packets */ + for (j = 0; j < slave_bufs_pkts[i]; j++) { + eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *); + mode6_debug("TX ARP:", eth_h, i, &burstnumberTX); + } +#endif + } + } + + /* Send update packets on proper slaves */ + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + if (update_bufs_pkts[i] > 0) { + num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i], + update_bufs_pkts[i]); + for (j = num_send; j < update_bufs_pkts[i]; j++) { + rte_pktmbuf_free(update_bufs[i][j]); + } +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + for (j = 0; j < update_bufs_pkts[i]; j++) { + eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *); + mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX); + } +#endif + } + } + + /* Send non-ARP packets using tlb policy */ + if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) { + num_send = bond_ethdev_tx_burst_tlb(queue, + slave_bufs[RTE_MAX_ETHPORTS], + slave_bufs_pkts[RTE_MAX_ETHPORTS]); + + for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) { + bufs[nb_pkts - 1 - num_not_send - j] = + slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j]; + } + + num_tx_total += num_send; + num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send; + } + + return num_tx_total; +} + +static uint16_t +bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + struct bond_dev_private *internals; + struct bond_tx_queue *bd_tx_q; + + uint8_t num_of_slaves; + uint8_t slaves[RTE_MAX_ETHPORTS]; + + uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0; + + int i, op_slave_id; + + struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts]; + uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 }; + + bd_tx_q = (struct bond_tx_queue *)queue; + internals = bd_tx_q->dev_private; + + /* Copy slave list to protect against slave up/down changes during tx + * bursting */ + num_of_slaves = internals->active_slave_count; + memcpy(slaves, internals->active_slaves, + sizeof(internals->active_slaves[0]) * num_of_slaves); + + if (num_of_slaves < 1) + return num_tx_total; + + /* Populate slaves mbuf with the packets which are to be sent on it */ + for (i = 0; i < nb_pkts; i++) { + /* Select output slave using hash based on xmit policy */ + op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves); + + /* Populate slave mbuf arrays with mbufs for that slave */ + slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i]; + } + + /* Send packet burst on each slave device */ + for (i = 0; i < num_of_slaves; i++) { + if (slave_nb_pkts[i] > 0) { + num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + slave_bufs[i], slave_nb_pkts[i]); + + /* if tx burst fails move packets to end of bufs */ + if (unlikely(num_tx_slave < slave_nb_pkts[i])) { + int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave; + + tx_fail_total += slave_tx_fail_count; + memcpy(&bufs[nb_pkts - tx_fail_total], + &slave_bufs[i][num_tx_slave], + slave_tx_fail_count * sizeof(bufs[0])); + } + + num_tx_total += num_tx_slave; + } + } + + return num_tx_total; +} + +static uint16_t +bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + struct bond_dev_private *internals; + struct bond_tx_queue *bd_tx_q; + + uint8_t num_of_slaves; + uint8_t slaves[RTE_MAX_ETHPORTS]; + /* positions in slaves, not ID */ + uint8_t distributing_offsets[RTE_MAX_ETHPORTS]; + uint8_t distributing_count; + + uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0; + uint16_t i, j, op_slave_idx; + const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1; + + /* Allocate additional packets in case 8023AD mode. */ + struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size]; + void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL }; + + /* Total amount of packets in slave_bufs */ + uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 }; + /* Slow packets placed in each slave */ + uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 }; + + bd_tx_q = (struct bond_tx_queue *)queue; + internals = bd_tx_q->dev_private; + + /* Copy slave list to protect against slave up/down changes during tx + * bursting */ + num_of_slaves = internals->active_slave_count; + if (num_of_slaves < 1) + return num_tx_total; + + memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves); + + distributing_count = 0; + for (i = 0; i < num_of_slaves; i++) { + struct port *port = &mode_8023ad_ports[slaves[i]]; + + slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring, + slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS); + slave_nb_pkts[i] = slave_slow_nb_pkts[i]; + + for (j = 0; j < slave_slow_nb_pkts[i]; j++) + slave_bufs[i][j] = slow_pkts[j]; + + if (ACTOR_STATE(port, DISTRIBUTING)) + distributing_offsets[distributing_count++] = i; + } + + if (likely(distributing_count > 0)) { + /* Populate slaves mbuf with the packets which are to be sent on it */ + for (i = 0; i < nb_pkts; i++) { + /* Select output slave using hash based on xmit policy */ + op_slave_idx = internals->xmit_hash(bufs[i], distributing_count); + + /* Populate slave mbuf arrays with mbufs for that slave. Use only + * slaves that are currently distributing. */ + uint8_t slave_offset = distributing_offsets[op_slave_idx]; + slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i]; + slave_nb_pkts[slave_offset]++; + } + } + + /* Send packet burst on each slave device */ + for (i = 0; i < num_of_slaves; i++) { + if (slave_nb_pkts[i] == 0) + continue; + + num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + slave_bufs[i], slave_nb_pkts[i]); + + /* If tx burst fails drop slow packets */ + for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++) + rte_pktmbuf_free(slave_bufs[i][num_tx_slave]); + + num_tx_total += num_tx_slave - slave_slow_nb_pkts[i]; + num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave; + + /* If tx burst fails move packets to end of bufs */ + if (unlikely(num_tx_slave < slave_nb_pkts[i])) { + uint16_t j = nb_pkts - num_tx_fail_total; + for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++) + bufs[j] = slave_bufs[i][num_tx_slave]; + } + } + + return num_tx_total; +} + +static uint16_t +bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + struct bond_dev_private *internals; + struct bond_tx_queue *bd_tx_q; + + uint8_t tx_failed_flag = 0, num_of_slaves; + uint8_t slaves[RTE_MAX_ETHPORTS]; + + uint16_t max_nb_of_tx_pkts = 0; + + int slave_tx_total[RTE_MAX_ETHPORTS]; + int i, most_successful_tx_slave = -1; + + bd_tx_q = (struct bond_tx_queue *)queue; + internals = bd_tx_q->dev_private; + + /* Copy slave list to protect against slave up/down changes during tx + * bursting */ + num_of_slaves = internals->active_slave_count; + memcpy(slaves, internals->active_slaves, + sizeof(internals->active_slaves[0]) * num_of_slaves); + + if (num_of_slaves < 1) + return 0; + + /* Increment reference count on mbufs */ + for (i = 0; i < nb_pkts; i++) + rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1); + + /* Transmit burst on each active slave */ + for (i = 0; i < num_of_slaves; i++) { + slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + bufs, nb_pkts); + + if (unlikely(slave_tx_total[i] < nb_pkts)) + tx_failed_flag = 1; + + /* record the value and slave index for the slave which transmits the + * maximum number of packets */ + if (slave_tx_total[i] > max_nb_of_tx_pkts) { + max_nb_of_tx_pkts = slave_tx_total[i]; + most_successful_tx_slave = i; + } + } + + /* if slaves fail to transmit packets from burst, the calling application + * is not expected to know about multiple references to packets so we must + * handle failures of all packets except those of the most successful slave + */ + if (unlikely(tx_failed_flag)) + for (i = 0; i < num_of_slaves; i++) + if (i != most_successful_tx_slave) + while (slave_tx_total[i] < nb_pkts) + rte_pktmbuf_free(bufs[slave_tx_total[i]++]); + + return max_nb_of_tx_pkts; +} + +void +link_properties_set(struct rte_eth_dev *bonded_eth_dev, + struct rte_eth_link *slave_dev_link) +{ + struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link; + struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; + + if (slave_dev_link->link_status && + bonded_eth_dev->data->dev_started) { + bonded_dev_link->link_duplex = slave_dev_link->link_duplex; + bonded_dev_link->link_speed = slave_dev_link->link_speed; + + internals->link_props_set = 1; + } +} + +void +link_properties_reset(struct rte_eth_dev *bonded_eth_dev) +{ + struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; + + memset(&(bonded_eth_dev->data->dev_link), 0, + sizeof(bonded_eth_dev->data->dev_link)); + + internals->link_props_set = 0; +} + +int +link_properties_valid(struct rte_eth_link *bonded_dev_link, + struct rte_eth_link *slave_dev_link) +{ + if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex || + bonded_dev_link->link_speed != slave_dev_link->link_speed) + return -1; + + return 0; +} + +int +mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr) +{ + struct ether_addr *mac_addr; + + if (eth_dev == NULL) { + RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__); + return -1; + } + + if (dst_mac_addr == NULL) { + RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__); + return -1; + } + + mac_addr = eth_dev->data->mac_addrs; + + ether_addr_copy(mac_addr, dst_mac_addr); + return 0; +} + +int +mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr) +{ + struct ether_addr *mac_addr; + + if (eth_dev == NULL) { + RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified"); + return -1; + } + + if (new_mac_addr == NULL) { + RTE_BOND_LOG(ERR, "NULL pointer MAC specified"); + return -1; + } + + mac_addr = eth_dev->data->mac_addrs; + + /* If new MAC is different to current MAC then update */ + if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0) + memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr)); + + return 0; +} + +int +mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev) +{ + struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; + int i; + + /* Update slave devices MAC addresses */ + if (internals->slave_count < 1) + return -1; + + switch (internals->mode) { + case BONDING_MODE_ROUND_ROBIN: + case BONDING_MODE_BALANCE: + case BONDING_MODE_BROADCAST: + for (i = 0; i < internals->slave_count; i++) { + if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id], + bonded_eth_dev->data->mac_addrs)) { + RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address", + internals->slaves[i].port_id); + return -1; + } + } + break; + case BONDING_MODE_8023AD: + bond_mode_8023ad_mac_address_update(bonded_eth_dev); + break; + case BONDING_MODE_ACTIVE_BACKUP: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: + default: + for (i = 0; i < internals->slave_count; i++) { + if (internals->slaves[i].port_id == + internals->current_primary_port) { + if (mac_address_set(&rte_eth_devices[internals->primary_port], + bonded_eth_dev->data->mac_addrs)) { + RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address", + internals->current_primary_port); + return -1; + } + } else { + if (mac_address_set( + &rte_eth_devices[internals->slaves[i].port_id], + &internals->slaves[i].persisted_mac_addr)) { + RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address", + internals->slaves[i].port_id); + return -1; + } + } + } + } + + return 0; +} + +int +bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode) +{ + struct bond_dev_private *internals; + + internals = eth_dev->data->dev_private; + + switch (mode) { + case BONDING_MODE_ROUND_ROBIN: + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin; + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst; + break; + case BONDING_MODE_ACTIVE_BACKUP: + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup; + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup; + break; + case BONDING_MODE_BALANCE: + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance; + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst; + break; + case BONDING_MODE_BROADCAST: + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast; + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst; + break; + case BONDING_MODE_8023AD: + if (bond_mode_8023ad_enable(eth_dev) != 0) + return -1; + + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad; + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad; + RTE_LOG(WARNING, PMD, + "Using mode 4, it is necessary to do TX burst and RX burst " + "at least every 100ms.\n"); + break; + case BONDING_MODE_TLB: + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb; + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup; + break; + case BONDING_MODE_ALB: + if (bond_mode_alb_enable(eth_dev) != 0) + return -1; + + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb; + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb; + break; + default: + return -1; + } + + internals->mode = mode; + + return 0; +} + +int +slave_configure(struct rte_eth_dev *bonded_eth_dev, + struct rte_eth_dev *slave_eth_dev) +{ + struct bond_rx_queue *bd_rx_q; + struct bond_tx_queue *bd_tx_q; + + uint16_t old_nb_tx_queues = slave_eth_dev->data->nb_tx_queues; + uint16_t old_nb_rx_queues = slave_eth_dev->data->nb_rx_queues; + int errval; + uint16_t q_id; + + /* Stop slave */ + rte_eth_dev_stop(slave_eth_dev->data->port_id); + + /* Enable interrupts on slave device if supported */ + if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) + slave_eth_dev->data->dev_conf.intr_conf.lsc = 1; + + /* If RSS is enabled for bonding, try to enable it for slaves */ + if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { + if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len + != 0) { + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = + bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len; + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = + bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; + } else { + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; + } + + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = + bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + slave_eth_dev->data->dev_conf.rxmode.mq_mode = + bonded_eth_dev->data->dev_conf.rxmode.mq_mode; + } + + /* Configure device */ + errval = rte_eth_dev_configure(slave_eth_dev->data->port_id, + bonded_eth_dev->data->nb_rx_queues, + bonded_eth_dev->data->nb_tx_queues, + &(slave_eth_dev->data->dev_conf)); + if (errval != 0) { + RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)", + slave_eth_dev->data->port_id, errval); + return errval; + } + + /* Setup Rx Queues */ + /* Use existing queues, if any */ + for (q_id = old_nb_rx_queues; + q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) { + bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id]; + + errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id, + bd_rx_q->nb_rx_desc, + rte_eth_dev_socket_id(slave_eth_dev->data->port_id), + &(bd_rx_q->rx_conf), bd_rx_q->mb_pool); + if (errval != 0) { + RTE_BOND_LOG(ERR, + "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)", + slave_eth_dev->data->port_id, q_id, errval); + return errval; + } + } + + /* Setup Tx Queues */ + /* Use existing queues, if any */ + for (q_id = old_nb_tx_queues; + q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) { + bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id]; + + errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id, + bd_tx_q->nb_tx_desc, + rte_eth_dev_socket_id(slave_eth_dev->data->port_id), + &bd_tx_q->tx_conf); + if (errval != 0) { + RTE_BOND_LOG(ERR, + "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)", + slave_eth_dev->data->port_id, q_id, errval); + return errval; + } + } + + /* Start device */ + errval = rte_eth_dev_start(slave_eth_dev->data->port_id); + if (errval != 0) { + RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)", + slave_eth_dev->data->port_id, errval); + return -1; + } + + /* If RSS is enabled for bonding, synchronize RETA */ + if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { + int i; + struct bond_dev_private *internals; + + internals = bonded_eth_dev->data->dev_private; + + for (i = 0; i < internals->slave_count; i++) { + if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) { + errval = rte_eth_dev_rss_reta_update( + slave_eth_dev->data->port_id, + &internals->reta_conf[0], + internals->slaves[i].reta_size); + if (errval != 0) { + RTE_LOG(WARNING, PMD, + "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)." + " RSS Configuration for bonding may be inconsistent.\n", + slave_eth_dev->data->port_id, errval); + } + break; + } + } + } + + /* If lsc interrupt is set, check initial slave's link status */ + if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) + bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id, + RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id); + + return 0; +} + +void +slave_remove(struct bond_dev_private *internals, + struct rte_eth_dev *slave_eth_dev) +{ + uint8_t i; + + for (i = 0; i < internals->slave_count; i++) + if (internals->slaves[i].port_id == + slave_eth_dev->data->port_id) + break; + + if (i < (internals->slave_count - 1)) + memmove(&internals->slaves[i], &internals->slaves[i + 1], + sizeof(internals->slaves[0]) * + (internals->slave_count - i - 1)); + + internals->slave_count--; +} + +static void +bond_ethdev_slave_link_status_change_monitor(void *cb_arg); + +void +slave_add(struct bond_dev_private *internals, + struct rte_eth_dev *slave_eth_dev) +{ + struct bond_slave_details *slave_details = + &internals->slaves[internals->slave_count]; + + slave_details->port_id = slave_eth_dev->data->port_id; + slave_details->last_link_status = 0; + + /* Mark slave devices that don't support interrupts so we can + * compensate when we start the bond + */ + if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) { + slave_details->link_status_poll_enabled = 1; + } + + slave_details->link_status_wait_to_complete = 0; + /* clean tlb_last_obytes when adding port for bonding device */ + memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs, + sizeof(struct ether_addr)); +} + +void +bond_ethdev_primary_set(struct bond_dev_private *internals, + uint8_t slave_port_id) +{ + int i; + + if (internals->active_slave_count < 1) + internals->current_primary_port = slave_port_id; + else + /* Search bonded device slave ports for new proposed primary port */ + for (i = 0; i < internals->active_slave_count; i++) { + if (internals->active_slaves[i] == slave_port_id) + internals->current_primary_port = slave_port_id; + } +} + +static void +bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev); + +static int +bond_ethdev_start(struct rte_eth_dev *eth_dev) +{ + struct bond_dev_private *internals; + int i; + + /* slave eth dev will be started by bonded device */ + if (check_for_bonded_ethdev(eth_dev)) { + RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)", + eth_dev->data->port_id); + return -1; + } + + eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; + eth_dev->data->dev_started = 1; + + internals = eth_dev->data->dev_private; + + if (internals->slave_count == 0) { + RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices"); + return -1; + } + + if (internals->user_defined_mac == 0) { + struct ether_addr *new_mac_addr = NULL; + + for (i = 0; i < internals->slave_count; i++) + if (internals->slaves[i].port_id == internals->primary_port) + new_mac_addr = &internals->slaves[i].persisted_mac_addr; + + if (new_mac_addr == NULL) + return -1; + + if (mac_address_set(eth_dev, new_mac_addr) != 0) { + RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address", + eth_dev->data->port_id); + return -1; + } + } + + /* Update all slave devices MACs*/ + if (mac_address_slaves_update(eth_dev) != 0) + return -1; + + /* If bonded device is configure in promiscuous mode then re-apply config */ + if (internals->promiscuous_en) + bond_ethdev_promiscuous_enable(eth_dev); + + /* Reconfigure each slave device if starting bonded device */ + for (i = 0; i < internals->slave_count; i++) { + if (slave_configure(eth_dev, + &(rte_eth_devices[internals->slaves[i].port_id])) != 0) { + RTE_BOND_LOG(ERR, + "bonded port (%d) failed to reconfigure slave device (%d)", + eth_dev->data->port_id, internals->slaves[i].port_id); + return -1; + } + /* We will need to poll for link status if any slave doesn't + * support interrupts + */ + if (internals->slaves[i].link_status_poll_enabled) + internals->link_status_polling_enabled = 1; + } + /* start polling if needed */ + if (internals->link_status_polling_enabled) { + rte_eal_alarm_set( + internals->link_status_polling_interval_ms * 1000, + bond_ethdev_slave_link_status_change_monitor, + (void *)&rte_eth_devices[internals->port_id]); + } + + if (internals->user_defined_primary_port) + bond_ethdev_primary_set(internals, internals->primary_port); + + if (internals->mode == BONDING_MODE_8023AD) + bond_mode_8023ad_start(eth_dev); + + if (internals->mode == BONDING_MODE_TLB || + internals->mode == BONDING_MODE_ALB) + bond_tlb_enable(internals); + + return 0; +} + +static void +bond_ethdev_free_queues(struct rte_eth_dev *dev) +{ + uint8_t i; + + if (dev->data->rx_queues != NULL) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rte_free(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + } + + if (dev->data->tx_queues != NULL) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + rte_free(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; + } +} + +void +bond_ethdev_stop(struct rte_eth_dev *eth_dev) +{ + struct bond_dev_private *internals = eth_dev->data->dev_private; + uint8_t i; + + if (internals->mode == BONDING_MODE_8023AD) { + struct port *port; + void *pkt = NULL; + + bond_mode_8023ad_stop(eth_dev); + + /* Discard all messages to/from mode 4 state machines */ + for (i = 0; i < internals->active_slave_count; i++) { + port = &mode_8023ad_ports[internals->active_slaves[i]]; + + RTE_VERIFY(port->rx_ring != NULL); + while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT) + rte_pktmbuf_free(pkt); + + RTE_VERIFY(port->tx_ring != NULL); + while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT) + rte_pktmbuf_free(pkt); + } + } + + if (internals->mode == BONDING_MODE_TLB || + internals->mode == BONDING_MODE_ALB) { + bond_tlb_disable(internals); + for (i = 0; i < internals->active_slave_count; i++) + tlb_last_obytets[internals->active_slaves[i]] = 0; + } + + internals->active_slave_count = 0; + internals->link_status_polling_enabled = 0; + for (i = 0; i < internals->slave_count; i++) + internals->slaves[i].last_link_status = 0; + + eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; + eth_dev->data->dev_started = 0; +} + +void +bond_ethdev_close(struct rte_eth_dev *dev) +{ + bond_ethdev_free_queues(dev); +} + +/* forward declaration */ +static int bond_ethdev_configure(struct rte_eth_dev *dev); + +static void +bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct bond_dev_private *internals = dev->data->dev_private; + + dev_info->max_mac_addrs = 1; + + dev_info->max_rx_pktlen = (uint32_t)2048; + + dev_info->max_rx_queues = (uint16_t)128; + dev_info->max_tx_queues = (uint16_t)512; + + dev_info->min_rx_bufsize = 0; + dev_info->pci_dev = NULL; + + dev_info->rx_offload_capa = internals->rx_offload_capa; + dev_info->tx_offload_capa = internals->tx_offload_capa; + dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads; + + dev_info->reta_size = internals->reta_size; +} + +static int +bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool) +{ + struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *) + rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue), + 0, dev->data->numa_node); + if (bd_rx_q == NULL) + return -1; + + bd_rx_q->queue_id = rx_queue_id; + bd_rx_q->dev_private = dev->data->dev_private; + + bd_rx_q->nb_rx_desc = nb_rx_desc; + + memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf)); + bd_rx_q->mb_pool = mb_pool; + + dev->data->rx_queues[rx_queue_id] = bd_rx_q; + + return 0; +} + +static int +bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf) +{ + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *) + rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue), + 0, dev->data->numa_node); + + if (bd_tx_q == NULL) + return -1; + + bd_tx_q->queue_id = tx_queue_id; + bd_tx_q->dev_private = dev->data->dev_private; + + bd_tx_q->nb_tx_desc = nb_tx_desc; + memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf)); + + dev->data->tx_queues[tx_queue_id] = bd_tx_q; + + return 0; +} + +static void +bond_ethdev_rx_queue_release(void *queue) +{ + if (queue == NULL) + return; + + rte_free(queue); +} + +static void +bond_ethdev_tx_queue_release(void *queue) +{ + if (queue == NULL) + return; + + rte_free(queue); +} + +static void +bond_ethdev_slave_link_status_change_monitor(void *cb_arg) +{ + struct rte_eth_dev *bonded_ethdev, *slave_ethdev; + struct bond_dev_private *internals; + + /* Default value for polling slave found is true as we don't want to + * disable the polling thread if we cannot get the lock */ + int i, polling_slave_found = 1; + + if (cb_arg == NULL) + return; + + bonded_ethdev = (struct rte_eth_dev *)cb_arg; + internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private; + + if (!bonded_ethdev->data->dev_started || + !internals->link_status_polling_enabled) + return; + + /* If device is currently being configured then don't check slaves link + * status, wait until next period */ + if (rte_spinlock_trylock(&internals->lock)) { + if (internals->slave_count > 0) + polling_slave_found = 0; + + for (i = 0; i < internals->slave_count; i++) { + if (!internals->slaves[i].link_status_poll_enabled) + continue; + + slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id]; + polling_slave_found = 1; + + /* Update slave link status */ + (*slave_ethdev->dev_ops->link_update)(slave_ethdev, + internals->slaves[i].link_status_wait_to_complete); + + /* if link status has changed since last checked then call lsc + * event callback */ + if (slave_ethdev->data->dev_link.link_status != + internals->slaves[i].last_link_status) { + internals->slaves[i].last_link_status = + slave_ethdev->data->dev_link.link_status; + + bond_ethdev_lsc_event_callback(internals->slaves[i].port_id, + RTE_ETH_EVENT_INTR_LSC, + &bonded_ethdev->data->port_id); + } + } + rte_spinlock_unlock(&internals->lock); + } + + if (polling_slave_found) + /* Set alarm to continue monitoring link status of slave ethdev's */ + rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000, + bond_ethdev_slave_link_status_change_monitor, cb_arg); +} + +static int +bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev, + int wait_to_complete) +{ + struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; + + if (!bonded_eth_dev->data->dev_started || + internals->active_slave_count == 0) { + bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; + return 0; + } else { + struct rte_eth_dev *slave_eth_dev; + int i, link_up = 0; + + for (i = 0; i < internals->active_slave_count; i++) { + slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]]; + + (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev, + wait_to_complete); + if (slave_eth_dev->data->dev_link.link_status == ETH_LINK_UP) { + link_up = 1; + break; + } + } + + bonded_eth_dev->data->dev_link.link_status = link_up; + } + + return 0; +} + +static void +bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_eth_stats slave_stats; + int i, j; + + for (i = 0; i < internals->slave_count; i++) { + rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats); + + stats->ipackets += slave_stats.ipackets; + stats->opackets += slave_stats.opackets; + stats->ibytes += slave_stats.ibytes; + stats->obytes += slave_stats.obytes; + stats->imissed += slave_stats.imissed; + stats->ierrors += slave_stats.ierrors; + stats->oerrors += slave_stats.oerrors; + stats->imcasts += slave_stats.imcasts; + stats->rx_nombuf += slave_stats.rx_nombuf; + + for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { + stats->q_ipackets[j] += slave_stats.q_ipackets[j]; + stats->q_opackets[j] += slave_stats.q_opackets[j]; + stats->q_ibytes[j] += slave_stats.q_ibytes[j]; + stats->q_obytes[j] += slave_stats.q_obytes[j]; + stats->q_errors[j] += slave_stats.q_errors[j]; + } + + } +} + +static void +bond_ethdev_stats_reset(struct rte_eth_dev *dev) +{ + struct bond_dev_private *internals = dev->data->dev_private; + int i; + + for (i = 0; i < internals->slave_count; i++) + rte_eth_stats_reset(internals->slaves[i].port_id); +} + +static void +bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev) +{ + struct bond_dev_private *internals = eth_dev->data->dev_private; + int i; + + internals->promiscuous_en = 1; + + switch (internals->mode) { + /* Promiscuous mode is propagated to all slaves */ + case BONDING_MODE_ROUND_ROBIN: + case BONDING_MODE_BALANCE: + case BONDING_MODE_BROADCAST: + for (i = 0; i < internals->slave_count; i++) + rte_eth_promiscuous_enable(internals->slaves[i].port_id); + break; + /* In mode4 promiscus mode is managed when slave is added/removed */ + case BONDING_MODE_8023AD: + break; + /* Promiscuous mode is propagated only to primary slave */ + case BONDING_MODE_ACTIVE_BACKUP: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: + default: + rte_eth_promiscuous_enable(internals->current_primary_port); + } +} + +static void +bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct bond_dev_private *internals = dev->data->dev_private; + int i; + + internals->promiscuous_en = 0; + + switch (internals->mode) { + /* Promiscuous mode is propagated to all slaves */ + case BONDING_MODE_ROUND_ROBIN: + case BONDING_MODE_BALANCE: + case BONDING_MODE_BROADCAST: + for (i = 0; i < internals->slave_count; i++) + rte_eth_promiscuous_disable(internals->slaves[i].port_id); + break; + /* In mode4 promiscus mode is set managed when slave is added/removed */ + case BONDING_MODE_8023AD: + break; + /* Promiscuous mode is propagated only to primary slave */ + case BONDING_MODE_ACTIVE_BACKUP: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: + default: + rte_eth_promiscuous_disable(internals->current_primary_port); + } +} + +static void +bond_ethdev_delayed_lsc_propagation(void *arg) +{ + if (arg == NULL) + return; + + _rte_eth_dev_callback_process((struct rte_eth_dev *)arg, + RTE_ETH_EVENT_INTR_LSC); +} + +void +bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type, + void *param) +{ + struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev; + struct bond_dev_private *internals; + struct rte_eth_link link; + + int i, valid_slave = 0; + uint8_t active_pos; + uint8_t lsc_flag = 0; + + if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL) + return; + + bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param]; + slave_eth_dev = &rte_eth_devices[port_id]; + + if (check_for_bonded_ethdev(bonded_eth_dev)) + return; + + internals = bonded_eth_dev->data->dev_private; + + /* If the device isn't started don't handle interrupts */ + if (!bonded_eth_dev->data->dev_started) + return; + + /* verify that port_id is a valid slave of bonded port */ + for (i = 0; i < internals->slave_count; i++) { + if (internals->slaves[i].port_id == port_id) { + valid_slave = 1; + break; + } + } + + if (!valid_slave) + return; + + /* Search for port in active port list */ + active_pos = find_slave_by_id(internals->active_slaves, + internals->active_slave_count, port_id); + + rte_eth_link_get_nowait(port_id, &link); + if (link.link_status) { + if (active_pos < internals->active_slave_count) + return; + + /* if no active slave ports then set this port to be primary port */ + if (internals->active_slave_count < 1) { + /* If first active slave, then change link status */ + bonded_eth_dev->data->dev_link.link_status = ETH_LINK_UP; + internals->current_primary_port = port_id; + lsc_flag = 1; + + mac_address_slaves_update(bonded_eth_dev); + + /* Inherit eth dev link properties from first active slave */ + link_properties_set(bonded_eth_dev, + &(slave_eth_dev->data->dev_link)); + } + + activate_slave(bonded_eth_dev, port_id); + + /* If user has defined the primary port then default to using it */ + if (internals->user_defined_primary_port && + internals->primary_port == port_id) + bond_ethdev_primary_set(internals, port_id); + } else { + if (active_pos == internals->active_slave_count) + return; + + /* Remove from active slave list */ + deactivate_slave(bonded_eth_dev, port_id); + + /* No active slaves, change link status to down and reset other + * link properties */ + if (internals->active_slave_count < 1) { + lsc_flag = 1; + bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; + + link_properties_reset(bonded_eth_dev); + } + + /* Update primary id, take first active slave from list or if none + * available set to -1 */ + if (port_id == internals->current_primary_port) { + if (internals->active_slave_count > 0) + bond_ethdev_primary_set(internals, + internals->active_slaves[0]); + else + internals->current_primary_port = internals->primary_port; + } + } + + if (lsc_flag) { + /* Cancel any possible outstanding interrupts if delays are enabled */ + if (internals->link_up_delay_ms > 0 || + internals->link_down_delay_ms > 0) + rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation, + bonded_eth_dev); + + if (bonded_eth_dev->data->dev_link.link_status) { + if (internals->link_up_delay_ms > 0) + rte_eal_alarm_set(internals->link_up_delay_ms * 1000, + bond_ethdev_delayed_lsc_propagation, + (void *)bonded_eth_dev); + else + _rte_eth_dev_callback_process(bonded_eth_dev, + RTE_ETH_EVENT_INTR_LSC); + + } else { + if (internals->link_down_delay_ms > 0) + rte_eal_alarm_set(internals->link_down_delay_ms * 1000, + bond_ethdev_delayed_lsc_propagation, + (void *)bonded_eth_dev); + else + _rte_eth_dev_callback_process(bonded_eth_dev, + RTE_ETH_EVENT_INTR_LSC); + } + } +} + +static int +bond_ethdev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) +{ + unsigned i, j; + int result = 0; + int slave_reta_size; + unsigned reta_count; + struct bond_dev_private *internals = dev->data->dev_private; + + if (reta_size != internals->reta_size) + return -EINVAL; + + /* Copy RETA table */ + reta_count = reta_size / RTE_RETA_GROUP_SIZE; + + for (i = 0; i < reta_count; i++) { + internals->reta_conf[i].mask = reta_conf[i].mask; + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + if ((reta_conf[i].mask >> j) & 0x01) + internals->reta_conf[i].reta[j] = reta_conf[i].reta[j]; + } + + /* Fill rest of array */ + for (; i < RTE_DIM(internals->reta_conf); i += reta_count) + memcpy(&internals->reta_conf[i], &internals->reta_conf[0], + sizeof(internals->reta_conf[0]) * reta_count); + + /* Propagate RETA over slaves */ + for (i = 0; i < internals->slave_count; i++) { + slave_reta_size = internals->slaves[i].reta_size; + result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id, + &internals->reta_conf[0], slave_reta_size); + if (result < 0) + return result; + } + + return 0; +} + +static int +bond_ethdev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) +{ + int i, j; + struct bond_dev_private *internals = dev->data->dev_private; + + if (reta_size != internals->reta_size) + return -EINVAL; + + /* Copy RETA table */ + for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++) + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + if ((reta_conf[i].mask >> j) & 0x01) + reta_conf[i].reta[j] = internals->reta_conf[i].reta[j]; + + return 0; +} + +static int +bond_ethdev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + int i, result = 0; + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_eth_rss_conf bond_rss_conf; + + memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf)); + + bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads; + + if (bond_rss_conf.rss_hf != 0) + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf; + + if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len < + sizeof(internals->rss_key)) { + if (bond_rss_conf.rss_key_len == 0) + bond_rss_conf.rss_key_len = 40; + internals->rss_key_len = bond_rss_conf.rss_key_len; + memcpy(internals->rss_key, bond_rss_conf.rss_key, + internals->rss_key_len); + } + + for (i = 0; i < internals->slave_count; i++) { + result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id, + &bond_rss_conf); + if (result < 0) + return result; + } + + return 0; +} + +static int +bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct bond_dev_private *internals = dev->data->dev_private; + + rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + rss_conf->rss_key_len = internals->rss_key_len; + if (rss_conf->rss_key) + memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len); + + return 0; +} + +const struct eth_dev_ops default_dev_ops = { + .dev_start = bond_ethdev_start, + .dev_stop = bond_ethdev_stop, + .dev_close = bond_ethdev_close, + .dev_configure = bond_ethdev_configure, + .dev_infos_get = bond_ethdev_info, + .rx_queue_setup = bond_ethdev_rx_queue_setup, + .tx_queue_setup = bond_ethdev_tx_queue_setup, + .rx_queue_release = bond_ethdev_rx_queue_release, + .tx_queue_release = bond_ethdev_tx_queue_release, + .link_update = bond_ethdev_link_update, + .stats_get = bond_ethdev_stats_get, + .stats_reset = bond_ethdev_stats_reset, + .promiscuous_enable = bond_ethdev_promiscuous_enable, + .promiscuous_disable = bond_ethdev_promiscuous_disable, + .reta_update = bond_ethdev_rss_reta_update, + .reta_query = bond_ethdev_rss_reta_query, + .rss_hash_update = bond_ethdev_rss_hash_update, + .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get +}; + +static int +bond_init(const char *name, const char *params) +{ + struct bond_dev_private *internals; + struct rte_kvargs *kvlist; + uint8_t bonding_mode, socket_id; + int arg_count, port_id; + + RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name); + + kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments); + if (kvlist == NULL) + return -1; + + /* Parse link bonding mode */ + if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) { + if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG, + &bond_ethdev_parse_slave_mode_kvarg, + &bonding_mode) != 0) { + RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n", + name); + goto parse_error; + } + } else { + RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded " + "device %s\n", name); + goto parse_error; + } + + /* Parse socket id to create bonding device on */ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG); + if (arg_count == 1) { + if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG, + &bond_ethdev_parse_socket_id_kvarg, &socket_id) + != 0) { + RTE_LOG(ERR, EAL, "Invalid socket Id specified for " + "bonded device %s\n", name); + goto parse_error; + } + } else if (arg_count > 1) { + RTE_LOG(ERR, EAL, "Socket Id can be specified only once for " + "bonded device %s\n", name); + goto parse_error; + } else { + socket_id = rte_socket_id(); + } + + /* Create link bonding eth device */ + port_id = rte_eth_bond_create(name, bonding_mode, socket_id); + if (port_id < 0) { + RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on " + "socket %u.\n", name, bonding_mode, socket_id); + goto parse_error; + } + internals = rte_eth_devices[port_id].data->dev_private; + internals->kvlist = kvlist; + + RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on " + "socket %u.\n", name, port_id, bonding_mode, socket_id); + return 0; + +parse_error: + rte_kvargs_free(kvlist); + + return -1; +} + +static int +bond_uninit(const char *name) +{ + int ret; + + if (name == NULL) + return -EINVAL; + + RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name); + + /* free link bonding eth device */ + ret = rte_eth_bond_free(name); + if (ret < 0) + RTE_LOG(ERR, EAL, "Failed to free %s\n", name); + + return ret; +} + +/* this part will resolve the slave portids after all the other pdev and vdev + * have been allocated */ +static int +bond_ethdev_configure(struct rte_eth_dev *dev) +{ + char *name = dev->data->name; + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_kvargs *kvlist = internals->kvlist; + int arg_count; + uint8_t port_id = dev - rte_eth_devices; + + static const uint8_t default_rss_key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D, + 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B, + 0xBE, 0xAC, 0x01, 0xFA + }; + + unsigned i, j; + + /* If RSS is enabled, fill table and key with default values */ + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key; + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0; + memcpy(internals->rss_key, default_rss_key, 40); + + for (i = 0; i < RTE_DIM(internals->reta_conf); i++) { + internals->reta_conf[i].mask = ~0LL; + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues; + } + } + + /* + * if no kvlist, it means that this bonded device has been created + * through the bonding api. + */ + if (!kvlist) + return 0; + + /* Parse MAC address for bonded device */ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG); + if (arg_count == 1) { + struct ether_addr bond_mac; + + if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG, + &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) { + RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n", + name); + return -1; + } + + /* Set MAC address */ + if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) { + RTE_LOG(ERR, EAL, + "Failed to set mac address on bonded device %s\n", + name); + return -1; + } + } else if (arg_count > 1) { + RTE_LOG(ERR, EAL, + "MAC address can be specified only once for bonded device %s\n", + name); + return -1; + } + + /* Parse/set balance mode transmit policy */ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG); + if (arg_count == 1) { + uint8_t xmit_policy; + + if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG, + &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) != + 0) { + RTE_LOG(INFO, EAL, + "Invalid xmit policy specified for bonded device %s\n", + name); + return -1; + } + + /* Set balance mode transmit policy*/ + if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) { + RTE_LOG(ERR, EAL, + "Failed to set balance xmit policy on bonded device %s\n", + name); + return -1; + } + } else if (arg_count > 1) { + RTE_LOG(ERR, EAL, + "Transmit policy can be specified only once for bonded device" + " %s\n", name); + return -1; + } + + /* Parse/add slave ports to bonded device */ + if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) { + struct bond_ethdev_slave_ports slave_ports; + unsigned i; + + memset(&slave_ports, 0, sizeof(slave_ports)); + + if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG, + &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) { + RTE_LOG(ERR, EAL, + "Failed to parse slave ports for bonded device %s\n", + name); + return -1; + } + + for (i = 0; i < slave_ports.slave_count; i++) { + if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) { + RTE_LOG(ERR, EAL, + "Failed to add port %d as slave to bonded device %s\n", + slave_ports.slaves[i], name); + } + } + + } else { + RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name); + return -1; + } + + /* Parse/set primary slave port id*/ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG); + if (arg_count == 1) { + uint8_t primary_slave_port_id; + + if (rte_kvargs_process(kvlist, + PMD_BOND_PRIMARY_SLAVE_KVARG, + &bond_ethdev_parse_primary_slave_port_id_kvarg, + &primary_slave_port_id) < 0) { + RTE_LOG(INFO, EAL, + "Invalid primary slave port id specified for bonded device" + " %s\n", name); + return -1; + } + + /* Set balance mode transmit policy*/ + if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id) + != 0) { + RTE_LOG(ERR, EAL, + "Failed to set primary slave port %d on bonded device %s\n", + primary_slave_port_id, name); + return -1; + } + } else if (arg_count > 1) { + RTE_LOG(INFO, EAL, + "Primary slave can be specified only once for bonded device" + " %s\n", name); + return -1; + } + + /* Parse link status monitor polling interval */ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG); + if (arg_count == 1) { + uint32_t lsc_poll_interval_ms; + + if (rte_kvargs_process(kvlist, + PMD_BOND_LSC_POLL_PERIOD_KVARG, + &bond_ethdev_parse_time_ms_kvarg, + &lsc_poll_interval_ms) < 0) { + RTE_LOG(INFO, EAL, + "Invalid lsc polling interval value specified for bonded" + " device %s\n", name); + return -1; + } + + if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms) + != 0) { + RTE_LOG(ERR, EAL, + "Failed to set lsc monitor polling interval (%u ms) on" + " bonded device %s\n", lsc_poll_interval_ms, name); + return -1; + } + } else if (arg_count > 1) { + RTE_LOG(INFO, EAL, + "LSC polling interval can be specified only once for bonded" + " device %s\n", name); + return -1; + } + + /* Parse link up interrupt propagation delay */ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG); + if (arg_count == 1) { + uint32_t link_up_delay_ms; + + if (rte_kvargs_process(kvlist, + PMD_BOND_LINK_UP_PROP_DELAY_KVARG, + &bond_ethdev_parse_time_ms_kvarg, + &link_up_delay_ms) < 0) { + RTE_LOG(INFO, EAL, + "Invalid link up propagation delay value specified for" + " bonded device %s\n", name); + return -1; + } + + /* Set balance mode transmit policy*/ + if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms) + != 0) { + RTE_LOG(ERR, EAL, + "Failed to set link up propagation delay (%u ms) on bonded" + " device %s\n", link_up_delay_ms, name); + return -1; + } + } else if (arg_count > 1) { + RTE_LOG(INFO, EAL, + "Link up propagation delay can be specified only once for" + " bonded device %s\n", name); + return -1; + } + + /* Parse link down interrupt propagation delay */ + arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG); + if (arg_count == 1) { + uint32_t link_down_delay_ms; + + if (rte_kvargs_process(kvlist, + PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG, + &bond_ethdev_parse_time_ms_kvarg, + &link_down_delay_ms) < 0) { + RTE_LOG(INFO, EAL, + "Invalid link down propagation delay value specified for" + " bonded device %s\n", name); + return -1; + } + + /* Set balance mode transmit policy*/ + if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms) + != 0) { + RTE_LOG(ERR, EAL, + "Failed to set link down propagation delay (%u ms) on" + " bonded device %s\n", link_down_delay_ms, name); + return -1; + } + } else if (arg_count > 1) { + RTE_LOG(INFO, EAL, + "Link down propagation delay can be specified only once for" + " bonded device %s\n", name); + return -1; + } + + return 0; +} + +static struct rte_driver bond_drv = { + .name = "eth_bond", + .type = PMD_VDEV, + .init = bond_init, + .uninit = bond_uninit, +}; + +PMD_REGISTER_DRIVER(bond_drv); diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h new file mode 100644 index 00000000..83123978 --- /dev/null +++ b/drivers/net/bonding/rte_eth_bond_private.h @@ -0,0 +1,305 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_ETH_BOND_PRIVATE_H_ +#define _RTE_ETH_BOND_PRIVATE_H_ + +#include <rte_ethdev.h> +#include <rte_spinlock.h> + +#include "rte_eth_bond.h" +#include "rte_eth_bond_8023ad_private.h" +#include "rte_eth_bond_alb.h" + +#define PMD_BOND_SLAVE_PORT_KVARG ("slave") +#define PMD_BOND_PRIMARY_SLAVE_KVARG ("primary") +#define PMD_BOND_MODE_KVARG ("mode") +#define PMD_BOND_XMIT_POLICY_KVARG ("xmit_policy") +#define PMD_BOND_SOCKET_ID_KVARG ("socket_id") +#define PMD_BOND_MAC_ADDR_KVARG ("mac") +#define PMD_BOND_LSC_POLL_PERIOD_KVARG ("lsc_poll_period_ms") +#define PMD_BOND_LINK_UP_PROP_DELAY_KVARG ("up_delay") +#define PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG ("down_delay") + +#define PMD_BOND_XMIT_POLICY_LAYER2_KVARG ("l2") +#define PMD_BOND_XMIT_POLICY_LAYER23_KVARG ("l23") +#define PMD_BOND_XMIT_POLICY_LAYER34_KVARG ("l34") + +#define RTE_BOND_LOG(lvl, msg, ...) \ + RTE_LOG(lvl, PMD, "%s(%d) - " msg "\n", __func__, __LINE__, ##__VA_ARGS__) + +#define BONDING_MODE_INVALID 0xFF + +extern const char *pmd_bond_init_valid_arguments[]; + +extern const char pmd_bond_driver_name[]; + +/** Port Queue Mapping Structure */ +struct bond_rx_queue { + uint16_t queue_id; + /**< Queue Id */ + struct bond_dev_private *dev_private; + /**< Reference to eth_dev private structure */ + uint16_t nb_rx_desc; + /**< Number of RX descriptors available for the queue */ + struct rte_eth_rxconf rx_conf; + /**< Copy of RX configuration structure for queue */ + struct rte_mempool *mb_pool; + /**< Reference to mbuf pool to use for RX queue */ +}; + +struct bond_tx_queue { + uint16_t queue_id; + /**< Queue Id */ + struct bond_dev_private *dev_private; + /**< Reference to dev private structure */ + uint16_t nb_tx_desc; + /**< Number of TX descriptors available for the queue */ + struct rte_eth_txconf tx_conf; + /**< Copy of TX configuration structure for queue */ +}; + +/** Bonded slave devices structure */ +struct bond_ethdev_slave_ports { + uint8_t slaves[RTE_MAX_ETHPORTS]; /**< Slave port id array */ + uint8_t slave_count; /**< Number of slaves */ +}; + +struct bond_slave_details { + uint8_t port_id; + + uint8_t link_status_poll_enabled; + uint8_t link_status_wait_to_complete; + uint8_t last_link_status; + /**< Port Id of slave eth_dev */ + struct ether_addr persisted_mac_addr; + + uint16_t reta_size; +}; + + +typedef uint16_t (*xmit_hash_t)(const struct rte_mbuf *buf, uint8_t slave_count); + +/** Link Bonding PMD device private configuration Structure */ +struct bond_dev_private { + uint8_t port_id; /**< Port Id of Bonded Port */ + uint8_t mode; /**< Link Bonding Mode */ + + rte_spinlock_t lock; + + uint8_t primary_port; /**< Primary Slave Port */ + uint8_t current_primary_port; /**< Primary Slave Port */ + uint8_t user_defined_primary_port; + /**< Flag for whether primary port is user defined or not */ + + uint8_t balance_xmit_policy; + /**< Transmit policy - l2 / l23 / l34 for operation in balance mode */ + xmit_hash_t xmit_hash; + /**< Transmit policy hash function */ + + uint8_t user_defined_mac; + /**< Flag for whether MAC address is user defined or not */ + uint8_t promiscuous_en; + /**< Enabled/disable promiscuous mode on bonding device */ + uint8_t link_props_set; + /**< flag to denote if the link properties are set */ + + uint8_t link_status_polling_enabled; + uint32_t link_status_polling_interval_ms; + + uint32_t link_down_delay_ms; + uint32_t link_up_delay_ms; + + uint16_t nb_rx_queues; /**< Total number of rx queues */ + uint16_t nb_tx_queues; /**< Total number of tx queues*/ + + uint8_t active_slave_count; /**< Number of active slaves */ + uint8_t active_slaves[RTE_MAX_ETHPORTS]; /**< Active slave list */ + + uint8_t slave_count; /**< Number of bonded slaves */ + struct bond_slave_details slaves[RTE_MAX_ETHPORTS]; + /**< Arary of bonded slaves details */ + + struct mode8023ad_private mode4; + uint8_t tlb_slaves_order[RTE_MAX_ETHPORTS]; /* TLB active slaves send order */ + struct mode_alb_private mode6; + + uint32_t rx_offload_capa; /** Rx offload capability */ + uint32_t tx_offload_capa; /** Tx offload capability */ + + /** Bit mask of RSS offloads, the bit offset also means flow type */ + uint64_t flow_type_rss_offloads; + + uint16_t reta_size; + struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 / + RTE_RETA_GROUP_SIZE]; + + uint8_t rss_key[52]; /**< 52-byte hash key buffer. */ + uint8_t rss_key_len; /**< hash key length in bytes. */ + + struct rte_kvargs *kvlist; + uint8_t slave_update_idx; +}; + +extern const struct eth_dev_ops default_dev_ops; + +int +check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev); + +/* Search given slave array to find possition of given id. + * Return slave pos or slaves_count if not found. */ +static inline uint8_t +find_slave_by_id(uint8_t *slaves, uint8_t slaves_count, uint8_t slave_id) { + + uint8_t pos; + for (pos = 0; pos < slaves_count; pos++) { + if (slave_id == slaves[pos]) + break; + } + + return pos; +} + +int +valid_port_id(uint8_t port_id); + +int +valid_bonded_port_id(uint8_t port_id); + +int +valid_slave_port_id(uint8_t port_id); + +void +deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id); + +void +activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id); + +void +link_properties_set(struct rte_eth_dev *bonded_eth_dev, + struct rte_eth_link *slave_dev_link); +void +link_properties_reset(struct rte_eth_dev *bonded_eth_dev); + +int +link_properties_valid(struct rte_eth_link *bonded_dev_link, + struct rte_eth_link *slave_dev_link); + +int +mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr); + +int +mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr); + +int +mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev); + +uint8_t +number_of_sockets(void); + +int +bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode); + +int +slave_configure(struct rte_eth_dev *bonded_eth_dev, + struct rte_eth_dev *slave_eth_dev); + +void +slave_remove(struct bond_dev_private *internals, + struct rte_eth_dev *slave_eth_dev); + +void +slave_add(struct bond_dev_private *internals, + struct rte_eth_dev *slave_eth_dev); + +uint16_t +xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count); + +uint16_t +xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count); + +uint16_t +xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count); + +void +bond_ethdev_primary_set(struct bond_dev_private *internals, + uint8_t slave_port_id); + +void +bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type, + void *param); + +int +bond_ethdev_parse_slave_port_kvarg(const char *key __rte_unused, + const char *value, void *extra_args); + +int +bond_ethdev_parse_slave_mode_kvarg(const char *key __rte_unused, + const char *value, void *extra_args); + +int +bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused, + const char *value, void *extra_args); + +int +bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused, + const char *value, void *extra_args); + +int +bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key __rte_unused, + const char *value, void *extra_args); + +int +bond_ethdev_parse_bond_mac_addr_kvarg(const char *key __rte_unused, + const char *value, void *extra_args); + +int +bond_ethdev_parse_time_ms_kvarg(const char *key __rte_unused, + const char *value, void *extra_args); + +void +bond_tlb_disable(struct bond_dev_private *internals); + +void +bond_tlb_enable(struct bond_dev_private *internals); + +void +bond_tlb_activate_slave(struct bond_dev_private *internals); + +void +bond_ethdev_stop(struct rte_eth_dev *eth_dev); + +void +bond_ethdev_close(struct rte_eth_dev *dev); + +#endif diff --git a/drivers/net/bonding/rte_eth_bond_version.map b/drivers/net/bonding/rte_eth_bond_version.map new file mode 100644 index 00000000..22bd9200 --- /dev/null +++ b/drivers/net/bonding/rte_eth_bond_version.map @@ -0,0 +1,29 @@ +DPDK_2.0 { + global: + + rte_eth_bond_8023ad_conf_get; + rte_eth_bond_8023ad_setup; + rte_eth_bond_active_slaves_get; + rte_eth_bond_create; + rte_eth_bond_link_monitoring_set; + rte_eth_bond_mac_address_reset; + rte_eth_bond_mac_address_set; + rte_eth_bond_mode_get; + rte_eth_bond_mode_set; + rte_eth_bond_primary_get; + rte_eth_bond_primary_set; + rte_eth_bond_slave_add; + rte_eth_bond_slave_remove; + rte_eth_bond_slaves_get; + rte_eth_bond_xmit_policy_get; + rte_eth_bond_xmit_policy_set; + + local: *; +}; + +DPDK_2.1 { + global: + + rte_eth_bond_free; + +} DPDK_2.0; diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile new file mode 100644 index 00000000..07119764 --- /dev/null +++ b/drivers/net/cxgbe/Makefile @@ -0,0 +1,87 @@ +# BSD LICENSE +# +# Copyright(c) 2014-2015 Chelsio Communications. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Chelsio Communications nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_cxgbe.a + +CFLAGS += -I$(SRCDIR)/base/ +CFLAGS += -I$(SRCDIR) +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_cxgbe_version.map + +LIBABIVER := 1 + +ifeq ($(CC), icc) +# +# CFLAGS for icc +# +CFLAGS_BASE_DRIVER = -wd188 +else +# +# CFLAGS for gcc/clang +# +ifeq ($(shell test $(CC) = gcc && test $(GCC_VERSION) -ge 44 && echo 1), 1) +CFLAGS += -Wno-deprecated +endif +CFLAGS_BASE_DRIVER = + +endif + +# +# Add extra flags for base driver files (also known as shared code) +# to disable warnings in them +# +BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))) +$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +VPATH += $(SRCDIR)/base + + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_main.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += sge.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_net lib/librte_malloc + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h new file mode 100644 index 00000000..a5225c0e --- /dev/null +++ b/drivers/net/cxgbe/base/adapter.h @@ -0,0 +1,576 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* This file should not be included directly. Include common.h instead. */ + +#ifndef __T4_ADAPTER_H__ +#define __T4_ADAPTER_H__ + +#include <rte_mbuf.h> + +#include "cxgbe_compat.h" +#include "t4_regs_values.h" + +enum { + MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */ +}; + +struct adapter; +struct sge_rspq; + +enum { + PORT_RSS_DONE = (1 << 0), +}; + +struct port_info { + struct adapter *adapter; /* adapter that this port belongs to */ + struct rte_eth_dev *eth_dev; /* associated rte eth device */ + struct port_stats stats_base; /* port statistics base */ + struct link_config link_cfg; /* link configuration info */ + + unsigned long flags; /* port related flags */ + short int xact_addr_filt; /* index of exact MAC address filter */ + + u16 viid; /* associated virtual interface id */ + s8 mdio_addr; /* address of the PHY */ + u8 port_type; /* firmware port type */ + u8 mod_type; /* firmware module type */ + u8 port_id; /* physical port ID */ + u8 tx_chan; /* associated channel */ + + u8 n_rx_qsets; /* # of rx qsets */ + u8 n_tx_qsets; /* # of tx qsets */ + u8 first_qset; /* index of first qset */ + + u16 *rss; /* rss table */ + u8 rss_mode; /* rss mode */ + u16 rss_size; /* size of VI's RSS table slice */ +}; + +/* Enable or disable autonegotiation. If this is set to enable, + * the forced link modes above are completely ignored. + */ +#define AUTONEG_DISABLE 0x00 +#define AUTONEG_ENABLE 0x01 + +enum { /* adapter flags */ + FULL_INIT_DONE = (1 << 0), + USING_MSI = (1 << 1), + USING_MSIX = (1 << 2), + FW_QUEUE_BOUND = (1 << 3), + FW_OK = (1 << 4), + CFG_QUEUES = (1 << 5), + MASTER_PF = (1 << 6), +}; + +struct rx_sw_desc { /* SW state per Rx descriptor */ + void *buf; /* struct page or mbuf */ + dma_addr_t dma_addr; +}; + +struct sge_fl { /* SGE free-buffer queue state */ + /* RO fields */ + struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ + + dma_addr_t addr; /* bus address of HW ring start */ + __be64 *desc; /* address of HW Rx descriptor ring */ + + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ + + unsigned int cntxt_id; /* SGE relative QID for the free list */ + unsigned int size; /* capacity of free list */ + + unsigned int avail; /* # of available Rx buffers */ + unsigned int pend_cred; /* new buffers since last FL DB ring */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + + unsigned long alloc_failed; /* # of times buffer allocation failed */ + unsigned long low; /* # of times momentarily starving */ +}; + +#define MAX_MBUF_FRAGS (16384 / 512 + 2) + +/* A packet gather list */ +struct pkt_gl { + union { + struct rte_mbuf *mbufs[MAX_MBUF_FRAGS]; + } /* UNNAMED */; + void *va; /* virtual address of first byte */ + unsigned int nfrags; /* # of fragments */ + unsigned int tot_len; /* total length of fragments */ + bool usembufs; /* use mbufs for fragments */ +}; + +typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); + +struct sge_rspq { /* state for an SGE response queue */ + struct adapter *adapter; /* adapter that this queue belongs to */ + struct rte_eth_dev *eth_dev; /* associated rte eth device */ + struct rte_mempool *mb_pool; /* associated mempool */ + + dma_addr_t phys_addr; /* physical address of the ring */ + __be64 *desc; /* address of HW response ring */ + const __be64 *cur_desc; /* current descriptor in queue */ + + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ + + unsigned int cidx; /* consumer index */ + unsigned int gts_idx; /* last gts write sent */ + unsigned int iqe_len; /* entry size */ + unsigned int size; /* capacity of response queue */ + int offset; /* offset into current Rx buffer */ + + u8 gen; /* current generation bit */ + u8 intr_params; /* interrupt holdoff parameters */ + u8 next_intr_params; /* holdoff params for next interrupt */ + u8 pktcnt_idx; /* interrupt packet threshold */ + u8 port_id; /* associated port-id */ + u8 idx; /* queue index within its group */ + u16 cntxt_id; /* SGE relative QID for the response Q */ + u16 abs_id; /* absolute SGE id for the response q */ + + rspq_handler_t handler; /* associated handler for this response q */ +}; + +struct sge_eth_rx_stats { /* Ethernet rx queue statistics */ + u64 pkts; /* # of ethernet packets */ + u64 rx_bytes; /* # of ethernet bytes */ + u64 rx_cso; /* # of Rx checksum offloads */ + u64 vlan_ex; /* # of Rx VLAN extractions */ + u64 rx_drops; /* # of packets dropped due to no mem */ +}; + +struct sge_eth_rxq { /* a SW Ethernet Rx queue */ + struct sge_rspq rspq; + struct sge_fl fl; + struct sge_eth_rx_stats stats; + bool usembufs; /* one ingress packet per mbuf FL buffer */ +} __rte_cache_aligned; + +/* + * Currently there are two types of coalesce WR. Type 0 needs 48 bytes per + * packet (if one sgl is present) and type 1 needs 32 bytes. This means + * that type 0 can fit a maximum of 10 packets per WR and type 1 can fit + * 15 packets. We need to keep track of the mbuf pointers in a coalesce WR + * to be able to free those mbufs when we get completions back from the FW. + * Allocating the maximum number of pointers in every tx desc is a waste + * of memory resources so we only store 2 pointers per tx desc which should + * be enough since a tx desc can only fit 2 packets in the best case + * scenario where a packet needs 32 bytes. + */ +#define ETH_COALESCE_PKT_NUM 15 +#define ETH_COALESCE_PKT_PER_DESC 2 + +struct tx_eth_coal_desc { + struct rte_mbuf *mbuf[ETH_COALESCE_PKT_PER_DESC]; + struct ulptx_sgl *sgl[ETH_COALESCE_PKT_PER_DESC]; + int idx; +}; + +struct tx_desc { + __be64 flit[8]; +}; + +struct tx_sw_desc { /* SW state per Tx descriptor */ + struct rte_mbuf *mbuf; + struct ulptx_sgl *sgl; + struct tx_eth_coal_desc coalesce; +}; + +enum { + EQ_STOPPED = (1 << 0), +}; + +struct eth_coalesce { + unsigned char *ptr; + unsigned char type; + unsigned int idx; + unsigned int len; + unsigned int flits; + unsigned int max; +}; + +struct sge_txq { + struct tx_desc *desc; /* address of HW Tx descriptor ring */ + struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ + struct sge_qstat *stat; /* queue status entry */ + struct eth_coalesce coalesce; /* coalesce info */ + + uint64_t phys_addr; /* physical address of the ring */ + + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ + + unsigned int cntxt_id; /* SGE relative QID for the Tx Q */ + unsigned int in_use; /* # of in-use Tx descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int cidx; /* SW consumer index */ + unsigned int pidx; /* producer index */ + unsigned int dbidx; /* last idx when db ring was done */ + unsigned int equeidx; /* last sent credit request */ + unsigned int last_pidx; /* last pidx recorded by tx monitor */ + unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */ + + int db_disabled; /* doorbell state */ + unsigned short db_pidx; /* doorbell producer index */ + unsigned short db_pidx_inc; /* doorbell producer increment */ +}; + +struct sge_eth_tx_stats { /* Ethernet tx queue statistics */ + u64 pkts; /* # of ethernet packets */ + u64 tx_bytes; /* # of ethernet bytes */ + u64 tso; /* # of TSO requests */ + u64 tx_cso; /* # of Tx checksum offloads */ + u64 vlan_ins; /* # of Tx VLAN insertions */ + u64 mapping_err; /* # of I/O MMU packet mapping errors */ + u64 coal_wr; /* # of coalesced wr */ + u64 coal_pkts; /* # of coalesced packets */ +}; + +struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ + struct sge_txq q; + struct rte_eth_dev *eth_dev; /* port that this queue belongs to */ + struct sge_eth_tx_stats stats; /* queue statistics */ + rte_spinlock_t txq_lock; + + unsigned int flags; /* flags for state of the queue */ +} __rte_cache_aligned; + +struct sge { + struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; + struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; + struct sge_rspq fw_evtq __rte_cache_aligned; + + u16 max_ethqsets; /* # of available Ethernet queue sets */ + u32 stat_len; /* length of status page at ring end */ + u32 pktshift; /* padding between CPL & packet data */ + + /* response queue interrupt parameters */ + u16 timer_val[SGE_NTIMERS]; + u8 counter_val[SGE_NCOUNTERS]; + + u32 fl_align; /* response queue message alignment */ + u32 fl_pg_order; /* large page allocation size */ + u32 fl_starve_thres; /* Free List starvation threshold */ +}; + +#define T4_OS_NEEDS_MBOX_LOCKING 1 + +/* + * OS Lock/List primitives for those interfaces in the Common Code which + * need this. + */ + +struct mbox_entry { + TAILQ_ENTRY(mbox_entry) next; +}; + +TAILQ_HEAD(mbox_list, mbox_entry); + +struct adapter { + struct rte_pci_device *pdev; /* associated rte pci device */ + struct rte_eth_dev *eth_dev; /* first port's rte eth device */ + struct adapter_params params; /* adapter parameters */ + struct port_info port[MAX_NPORTS]; /* ports belonging to this adapter */ + struct sge sge; /* associated SGE */ + + /* support for single-threading access to adapter mailbox registers */ + struct mbox_list mbox_list; + rte_spinlock_t mbox_lock; + + u8 *regs; /* pointer to registers region */ + u8 *bar2; /* pointer to bar2 region */ + unsigned long flags; /* adapter flags */ + unsigned int mbox; /* associated mailbox */ + unsigned int pf; /* associated physical function id */ + + int use_unpacked_mode; /* unpacked rx mode state */ +}; + +#define CXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg))) + +static inline uint64_t cxgbe_read_addr64(volatile void *addr) +{ + uint64_t val = CXGBE_PCI_REG(addr); + uint64_t val2 = CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4)); + + val2 = (uint64_t)(val2 << 32); + val += val2; + return val; +} + +static inline uint32_t cxgbe_read_addr(volatile void *addr) +{ + return CXGBE_PCI_REG(addr); +} + +#define CXGBE_PCI_REG_ADDR(adap, reg) \ + ((volatile uint32_t *)((char *)(adap)->regs + (reg))) + +#define CXGBE_READ_REG(adap, reg) \ + cxgbe_read_addr(CXGBE_PCI_REG_ADDR((adap), (reg))) + +#define CXGBE_READ_REG64(adap, reg) \ + cxgbe_read_addr64(CXGBE_PCI_REG_ADDR((adap), (reg))) + +#define CXGBE_PCI_REG_WRITE(reg, value) ({ \ + CXGBE_PCI_REG((reg)) = (value); }) + +#define CXGBE_WRITE_REG(adap, reg, value) \ + CXGBE_PCI_REG_WRITE(CXGBE_PCI_REG_ADDR((adap), (reg)), (value)) + +static inline uint64_t cxgbe_write_addr64(volatile void *addr, uint64_t val) +{ + CXGBE_PCI_REG(addr) = val; + CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4)) = (val >> 32); + return val; +} + +#define CXGBE_WRITE_REG64(adap, reg, value) \ + cxgbe_write_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)), (value)) + +/** + * t4_read_reg - read a HW register + * @adapter: the adapter + * @reg_addr: the register address + * + * Returns the 32-bit value of the given HW register. + */ +static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr) +{ + u32 val = CXGBE_READ_REG(adapter, reg_addr); + + CXGBE_DEBUG_REG(adapter, "read register 0x%x value 0x%x\n", reg_addr, + val); + return val; +} + +/** + * t4_write_reg - write a HW register + * @adapter: the adapter + * @reg_addr: the register address + * @val: the value to write + * + * Write a 32-bit value into the given HW register. + */ +static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val) +{ + CXGBE_DEBUG_REG(adapter, "setting register 0x%x to 0x%x\n", reg_addr, + val); + CXGBE_WRITE_REG(adapter, reg_addr, val); +} + +/** + * t4_read_reg64 - read a 64-bit HW register + * @adapter: the adapter + * @reg_addr: the register address + * + * Returns the 64-bit value of the given HW register. + */ +static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr) +{ + u64 val = CXGBE_READ_REG64(adapter, reg_addr); + + CXGBE_DEBUG_REG(adapter, "64-bit read register %#x value %#llx\n", + reg_addr, (unsigned long long)val); + return val; +} + +/** + * t4_write_reg64 - write a 64-bit HW register + * @adapter: the adapter + * @reg_addr: the register address + * @val: the value to write + * + * Write a 64-bit value into the given HW register. + */ +static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr, + u64 val) +{ + CXGBE_DEBUG_REG(adapter, "setting register %#x to %#llx\n", reg_addr, + (unsigned long long)val); + + CXGBE_WRITE_REG64(adapter, reg_addr, val); +} + +/** + * t4_os_set_hw_addr - store a port's MAC address in SW + * @adapter: the adapter + * @port_idx: the port index + * @hw_addr: the Ethernet address + * + * Store the Ethernet address of the given port in SW. Called by the + * common code when it retrieves a port's Ethernet address from EEPROM. + */ +static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx, + u8 hw_addr[]) +{ + struct port_info *pi = &adapter->port[port_idx]; + + ether_addr_copy((struct ether_addr *)hw_addr, + &pi->eth_dev->data->mac_addrs[0]); +} + +/** + * t4_os_lock_init - initialize spinlock + * @lock: the spinlock + */ +static inline void t4_os_lock_init(rte_spinlock_t *lock) +{ + rte_spinlock_init(lock); +} + +/** + * t4_os_lock - spin until lock is acquired + * @lock: the spinlock + */ +static inline void t4_os_lock(rte_spinlock_t *lock) +{ + rte_spinlock_lock(lock); +} + +/** + * t4_os_unlock - unlock a spinlock + * @lock: the spinlock + */ +static inline void t4_os_unlock(rte_spinlock_t *lock) +{ + rte_spinlock_unlock(lock); +} + +/** + * t4_os_trylock - try to get a lock + * @lock: the spinlock + */ +static inline int t4_os_trylock(rte_spinlock_t *lock) +{ + return rte_spinlock_trylock(lock); +} + +/** + * t4_os_init_list_head - initialize + * @head: head of list to initialize [to empty] + */ +static inline void t4_os_init_list_head(struct mbox_list *head) +{ + TAILQ_INIT(head); +} + +static inline struct mbox_entry *t4_os_list_first_entry(struct mbox_list *head) +{ + return TAILQ_FIRST(head); +} + +/** + * t4_os_atomic_add_tail - Enqueue list element atomically onto list + * @new: the entry to be addded to the queue + * @head: current head of the linked list + * @lock: lock to use to guarantee atomicity + */ +static inline void t4_os_atomic_add_tail(struct mbox_entry *entry, + struct mbox_list *head, + rte_spinlock_t *lock) +{ + t4_os_lock(lock); + TAILQ_INSERT_TAIL(head, entry, next); + t4_os_unlock(lock); +} + +/** + * t4_os_atomic_list_del - Dequeue list element atomically from list + * @entry: the entry to be remove/dequeued from the list. + * @lock: the spinlock + */ +static inline void t4_os_atomic_list_del(struct mbox_entry *entry, + struct mbox_list *head, + rte_spinlock_t *lock) +{ + t4_os_lock(lock); + TAILQ_REMOVE(head, entry, next); + t4_os_unlock(lock); +} + +/** + * adap2pinfo - return the port_info of a port + * @adap: the adapter + * @idx: the port index + * + * Return the port_info structure for the port of the given index. + */ +static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) +{ + return &adap->port[idx]; +} + +void *t4_alloc_mem(size_t size); +void t4_free_mem(void *addr); +#define t4_os_alloc(_size) t4_alloc_mem((_size)) +#define t4_os_free(_ptr) t4_free_mem((_ptr)) + +void t4_os_portmod_changed(const struct adapter *adap, int port_id); +void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); + +void reclaim_completed_tx(struct sge_txq *q); +void t4_free_sge_resources(struct adapter *adap); +void t4_sge_tx_monitor_start(struct adapter *adap); +void t4_sge_tx_monitor_stop(struct adapter *adap); +int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf); +int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); +int t4_sge_init(struct adapter *adap); +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct rte_eth_dev *eth_dev, uint16_t queue_id, + unsigned int iqid, int socket_id); +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq, + struct rte_eth_dev *eth_dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t handler, + int cong, struct rte_mempool *mp, int queue_id, + int socket_id); +int t4_sge_eth_txq_start(struct sge_eth_txq *txq); +int t4_sge_eth_txq_stop(struct sge_eth_txq *txq); +void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq); +int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq); +int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq); +void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq); +void t4_sge_eth_clear_queues(struct port_info *pi); +int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, + unsigned int cnt); +int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts, + unsigned int budget, unsigned int *work_done); +int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); + +#endif /* __T4_ADAPTER_H__ */ diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h new file mode 100644 index 00000000..cf2e82dd --- /dev/null +++ b/drivers/net/cxgbe/base/common.h @@ -0,0 +1,401 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CHELSIO_COMMON_H +#define __CHELSIO_COMMON_H + +#include "cxgbe_compat.h" +#include "t4_hw.h" +#include "t4_chip_type.h" +#include "t4fw_interface.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define CXGBE_PAGE_SIZE RTE_PGSIZE_4K + +enum { + MAX_NPORTS = 4, /* max # of ports */ +}; + +enum { + MEMWIN0_APERTURE = 2048, + MEMWIN0_BASE = 0x1b800, +}; + +enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST }; + +enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR }; + +enum { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +struct port_stats { + u64 tx_octets; /* total # of octets in good frames */ + u64 tx_frames; /* all good frames */ + u64 tx_bcast_frames; /* all broadcast frames */ + u64 tx_mcast_frames; /* all multicast frames */ + u64 tx_ucast_frames; /* all unicast frames */ + u64 tx_error_frames; /* all error frames */ + + u64 tx_frames_64; /* # of Tx frames in a particular range */ + u64 tx_frames_65_127; + u64 tx_frames_128_255; + u64 tx_frames_256_511; + u64 tx_frames_512_1023; + u64 tx_frames_1024_1518; + u64 tx_frames_1519_max; + + u64 tx_drop; /* # of dropped Tx frames */ + u64 tx_pause; /* # of transmitted pause frames */ + u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ + u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ + u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ + u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ + u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ + u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ + u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ + u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ + + u64 rx_octets; /* total # of octets in good frames */ + u64 rx_frames; /* all good frames */ + u64 rx_bcast_frames; /* all broadcast frames */ + u64 rx_mcast_frames; /* all multicast frames */ + u64 rx_ucast_frames; /* all unicast frames */ + u64 rx_too_long; /* # of frames exceeding MTU */ + u64 rx_jabber; /* # of jabber frames */ + u64 rx_fcs_err; /* # of received frames with bad FCS */ + u64 rx_len_err; /* # of received frames with length error */ + u64 rx_symbol_err; /* symbol errors */ + u64 rx_runt; /* # of short frames */ + + u64 rx_frames_64; /* # of Rx frames in a particular range */ + u64 rx_frames_65_127; + u64 rx_frames_128_255; + u64 rx_frames_256_511; + u64 rx_frames_512_1023; + u64 rx_frames_1024_1518; + u64 rx_frames_1519_max; + + u64 rx_pause; /* # of received pause frames */ + u64 rx_ppp0; /* # of received PPP prio 0 frames */ + u64 rx_ppp1; /* # of received PPP prio 1 frames */ + u64 rx_ppp2; /* # of received PPP prio 2 frames */ + u64 rx_ppp3; /* # of received PPP prio 3 frames */ + u64 rx_ppp4; /* # of received PPP prio 4 frames */ + u64 rx_ppp5; /* # of received PPP prio 5 frames */ + u64 rx_ppp6; /* # of received PPP prio 6 frames */ + u64 rx_ppp7; /* # of received PPP prio 7 frames */ + + u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ + u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ + u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ + u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ + u64 rx_trunc0; /* buffer-group 0 truncated packets */ + u64 rx_trunc1; /* buffer-group 1 truncated packets */ + u64 rx_trunc2; /* buffer-group 2 truncated packets */ + u64 rx_trunc3; /* buffer-group 3 truncated packets */ +}; + +struct sge_params { + u32 hps; /* host page size for our PF/VF */ + u32 eq_qpp; /* egress queues/page for our PF/VF */ + u32 iq_qpp; /* egress queues/page for our PF/VF */ +}; + +struct tp_params { + unsigned int ntxchan; /* # of Tx channels */ + unsigned int tre; /* log2 of core clocks per TP tick */ + unsigned int dack_re; /* DACK timer resolution */ + unsigned int la_mask; /* what events are recorded by TP LA */ + unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ + + u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ + u32 ingress_config; /* cached TP_INGRESS_CONFIG */ + + /* + * TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a + * subset of the set of fields which may be present in the Compressed + * Filter Tuple portion of filters and TCP TCB connections. The + * fields which are present are controlled by the TP_VLAN_PRI_MAP. + * Since a variable number of fields may or may not be present, their + * shifted field positions within the Compressed Filter Tuple may + * vary, or not even be present if the field isn't selected in + * TP_VLAN_PRI_MAP. Since some of these fields are needed in various + * places we store their offsets here, or a -1 if the field isn't + * present. + */ + int vlan_shift; + int vnic_shift; + int port_shift; + int protocol_shift; +}; + +struct vpd_params { + unsigned int cclk; +}; + +struct pci_params { + uint16_t vendor_id; + uint16_t device_id; + uint32_t vpd_cap_addr; + uint16_t speed; + uint8_t width; +}; + +/* + * Firmware device log. + */ +struct devlog_params { + u32 memtype; /* which memory (EDC0, EDC1, MC) */ + u32 start; /* start of log in firmware memory */ + u32 size; /* size of log */ +}; + +struct arch_specific_params { + u8 nchan; + u16 mps_rplc_size; + u16 vfcount; + u32 sge_fl_db; + u16 mps_tcam_size; +}; + +struct adapter_params { + struct sge_params sge; + struct tp_params tp; + struct vpd_params vpd; + struct pci_params pci; + struct devlog_params devlog; + enum pcie_memwin drv_memwin; + + unsigned int sf_size; /* serial flash size in bytes */ + unsigned int sf_nsec; /* # of flash sectors */ + + unsigned int fw_vers; + unsigned int tp_vers; + + unsigned short mtus[NMTUS]; + unsigned short a_wnd[NCCTRL_WIN]; + unsigned short b_wnd[NCCTRL_WIN]; + + unsigned int mc_size; /* MC memory size */ + unsigned int cim_la_size; + + unsigned char nports; /* # of ethernet ports */ + unsigned char portvec; + + enum chip_type chip; /* chip code */ + struct arch_specific_params arch; /* chip specific params */ + + bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ +}; + +struct link_config { + unsigned short supported; /* link capabilities */ + unsigned short advertising; /* advertised capabilities */ + unsigned short requested_speed; /* speed user has requested */ + unsigned short speed; /* actual link speed */ + unsigned char requested_fc; /* flow control user has requested */ + unsigned char fc; /* actual link flow control */ + unsigned char autoneg; /* autonegotiating? */ + unsigned char link_ok; /* link up? */ +}; + +#include "adapter.h" + +void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, + u32 val); +int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, + int attempts, int delay, u32 *valp); + +static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay) +{ + return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, + delay, NULL); +} + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; ++iter) + +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); +void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, + unsigned int mask, unsigned int val); +void t4_intr_enable(struct adapter *adapter); +void t4_intr_disable(struct adapter *adapter); +int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc); +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta); +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state); +int t4_fw_bye(struct adapter *adap, unsigned int mbox); +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); +int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset); +int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset); +int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size, + unsigned int cache_line_size, + enum chip_type chip_compat); +int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, + unsigned int cache_line_size); +int t4_fw_initialize(struct adapter *adap, unsigned int mbox); +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val); +int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int nparams, const u32 *params, + const u32 *val, int timeout); +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val); +int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, + unsigned int port, unsigned int pf, unsigned int vf, + unsigned int nmac, u8 *mac, unsigned int *rss_size, + unsigned int portfunc, unsigned int idstype); +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size); +int t4_free_vi(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int viid); +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, int vlanex, + bool sleep_ok); +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt); +int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool rx_en, bool tx_en, bool dcb_en); +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en); +int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, + unsigned int pf, unsigned int vf, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); + +static inline unsigned int core_ticks_per_usec(const struct adapter *adap) +{ + return adap->params.vpd.cclk / 1000; +} + +static inline unsigned int us_to_core_ticks(const struct adapter *adap, + unsigned int us) +{ + return (us * adap->params.vpd.cclk) / 1000; +} + +static inline unsigned int core_ticks_to_us(const struct adapter *adapter, + unsigned int ticks) +{ + /* add Core Clock / 2 to round ticks to nearest uS */ + return ((ticks * 1000 + adapter->params.vpd.cclk / 2) / + adapter->params.vpd.cclk); +} + +int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl, bool sleep_ok, int timeout); +int t4_wr_mbox_meat(struct adapter *adap, int mbox, + const void __attribute__((__may_alias__)) *cmd, int size, + void *rpl, bool sleep_ok); + +static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox, + const void *cmd, int size, void *rpl, + int timeout) +{ + return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true, + timeout); +} + +int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p); + +static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); +} + +static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); +} + +void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, unsigned int nregs, + unsigned int start_idx); +void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, const u32 *vals, + unsigned int nregs, unsigned int start_idx); + +int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p); +int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented); +int t4_flash_cfg_addr(struct adapter *adapter); +unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx); +const char *t4_get_port_type_description(enum fw_port_type port_type); +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); +void t4_get_port_stats_offset(struct adapter *adap, int idx, + struct port_stats *stats, + struct port_stats *offset); +void t4_clr_port_stats(struct adapter *adap, int idx); +void t4_reset_link_config(struct adapter *adap, int idx); +int t4_get_fw_version(struct adapter *adapter, u32 *vers); +int t4_get_tp_version(struct adapter *adapter, u32 *vers); +int t4_get_flash_params(struct adapter *adapter); +int t4_prep_adapter(struct adapter *adapter); +int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); +int t4_init_rss_mode(struct adapter *adap, int mbox); +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq); +int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, + unsigned int flags, unsigned int defq); + +enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; +int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, + unsigned int qtype, u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + +int t4_init_sge_params(struct adapter *adapter); +int t4_init_tp_params(struct adapter *adap); +int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel); +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); +#endif /* __CHELSIO_COMMON_H */ diff --git a/drivers/net/cxgbe/base/t4_chip_type.h b/drivers/net/cxgbe/base/t4_chip_type.h new file mode 100644 index 00000000..1ca68039 --- /dev/null +++ b/drivers/net/cxgbe/base/t4_chip_type.h @@ -0,0 +1,79 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __T4_CHIP_TYPE_H__ +#define __T4_CHIP_TYPE_H__ + +/* + * All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where: + * + * V = "4" for T4; "5" for T5, etc. or + * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs + * PP = adapter product designation + * + * We use the "version" (V) of the adpater to code the Chip Version above. + */ +#define CHELSIO_PCI_ID_VER(devid) ((devid) >> 12) +#define CHELSIO_PCI_ID_FUNC(devid) (((devid) >> 8) & 0xf) +#define CHELSIO_PCI_ID_PROD(devid) ((devid) & 0xff) + +#define CHELSIO_T4 0x4 +#define CHELSIO_T5 0x5 + +#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) +#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) +#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) + +enum chip_type { + T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), + T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), + T4_FIRST_REV = T4_A1, + T4_LAST_REV = T4_A2, + + T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), + T5_FIRST_REV = T5_A0, + T5_LAST_REV = T5_A1, +}; + +static inline int is_t4(enum chip_type chip) +{ + return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4); +} + +static inline int is_t5(enum chip_type chip) +{ + return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5); +} + +#endif /* __T4_CHIP_TYPE_H__ */ diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c new file mode 100644 index 00000000..79af8067 --- /dev/null +++ b/drivers/net/cxgbe/base/t4_hw.c @@ -0,0 +1,2686 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <netinet/in.h> + +#include <rte_interrupts.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_pci.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_tailq.h> +#include <rte_eal.h> +#include <rte_alarm.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_atomic.h> +#include <rte_malloc.h> +#include <rte_random.h> +#include <rte_dev.h> +#include <rte_byteorder.h> + +#include "common.h" +#include "t4_regs.h" +#include "t4_regs_values.h" +#include "t4fw_interface.h" + +static void init_link_config(struct link_config *lc, unsigned int caps); + +/** + * t4_read_mtu_tbl - returns the values in the HW path MTU table + * @adap: the adapter + * @mtus: where to store the MTU values + * @mtu_log: where to store the MTU base-2 log (may be %NULL) + * + * Reads the HW path MTU table. + */ +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) +{ + u32 v; + int i; + + for (i = 0; i < NMTUS; ++i) { + t4_write_reg(adap, A_TP_MTU_TABLE, + V_MTUINDEX(0xff) | V_MTUVALUE(i)); + v = t4_read_reg(adap, A_TP_MTU_TABLE); + mtus[i] = G_MTUVALUE(v); + if (mtu_log) + mtu_log[i] = G_MTUWIDTH(v); + } +} + +/** + * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register + * @adap: the adapter + * @addr: the indirect TP register address + * @mask: specifies the field within the register to modify + * @val: new value for the field + * + * Sets a field of an indirect TP register to the given value. + */ +void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, + unsigned int mask, unsigned int val) +{ + t4_write_reg(adap, A_TP_PIO_ADDR, addr); + val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; + t4_write_reg(adap, A_TP_PIO_DATA, val); +} + +/* The minimum additive increment value for the congestion control table */ +#define CC_MIN_INCR 2U + +/** + * t4_load_mtus - write the MTU and congestion control HW tables + * @adap: the adapter + * @mtus: the values for the MTU table + * @alpha: the values for the congestion control alpha parameter + * @beta: the values for the congestion control beta parameter + * + * Write the HW MTU table with the supplied MTUs and the high-speed + * congestion control table with the supplied alpha, beta, and MTUs. + * We write the two tables together because the additive increments + * depend on the MTUs. + */ +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta) +{ + static const unsigned int avg_pkts[NCCTRL_WIN] = { + 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, + 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, + 28672, 40960, 57344, 81920, 114688, 163840, 229376 + }; + + unsigned int i, w; + + for (i = 0; i < NMTUS; ++i) { + unsigned int mtu = mtus[i]; + unsigned int log2 = cxgbe_fls(mtu); + + if (!(mtu & ((1 << log2) >> 2))) /* round */ + log2--; + t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | + V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); + + for (w = 0; w < NCCTRL_WIN; ++w) { + unsigned int inc; + + inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], + CC_MIN_INCR); + + t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | + (w << 16) | (beta[w] << 13) | inc); + } + } +} + +/** + * t4_wait_op_done_val - wait until an operation is completed + * @adapter: the adapter performing the operation + * @reg: the register to check for completion + * @mask: a single-bit field within @reg that indicates completion + * @polarity: the value of the field when the operation is completed + * @attempts: number of check iterations + * @delay: delay in usecs between iterations + * @valp: where to store the value of the register at completion time + * + * Wait until an operation is completed by checking a bit in a register + * up to @attempts times. If @valp is not NULL the value of the register + * at the time it indicated completion is stored there. Returns 0 if the + * operation completes and -EAGAIN otherwise. + */ +int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay, u32 *valp) +{ + while (1) { + u32 val = t4_read_reg(adapter, reg); + + if (!!(val & mask) == polarity) { + if (valp) + *valp = val; + return 0; + } + if (--attempts == 0) + return -EAGAIN; + if (delay) + udelay(delay); + } +} + +/** + * t4_set_reg_field - set a register field to a value + * @adapter: the adapter to program + * @addr: the register address + * @mask: specifies the portion of the register to modify + * @val: the new value for the register field + * + * Sets a register field specified by the supplied mask to the + * given value. + */ +void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, + u32 val) +{ + u32 v = t4_read_reg(adapter, addr) & ~mask; + + t4_write_reg(adapter, addr, v | val); + (void)t4_read_reg(adapter, addr); /* flush */ +} + +/** + * t4_read_indirect - read indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect address + * @data_reg: register holding the value of the indirect register + * @vals: where the read register values are stored + * @nregs: how many indirect registers to read + * @start_idx: index of first indirect register to read + * + * Reads registers that are accessed indirectly through an address/data + * register pair. + */ +void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, unsigned int nregs, + unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx); + *vals++ = t4_read_reg(adap, data_reg); + start_idx++; + } +} + +/** + * t4_write_indirect - write indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect addresses + * @data_reg: register holding the value for the indirect registers + * @vals: values to write + * @nregs: how many indirect registers to write + * @start_idx: address of first indirect register to write + * + * Writes a sequential block of registers that are accessed indirectly + * through an address/data register pair. + */ +void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, const u32 *vals, + unsigned int nregs, unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx++); + t4_write_reg(adap, data_reg, *vals++); + } +} + +/** + * t4_report_fw_error - report firmware error + * @adap: the adapter + * + * The adapter firmware can indicate error conditions to the host. + * If the firmware has indicated an error, print out the reason for + * the firmware error. + */ +static void t4_report_fw_error(struct adapter *adap) +{ + static const char * const reason[] = { + "Crash", /* PCIE_FW_EVAL_CRASH */ + "During Device Preparation", /* PCIE_FW_EVAL_PREP */ + "During Device Configuration", /* PCIE_FW_EVAL_CONF */ + "During Device Initialization", /* PCIE_FW_EVAL_INIT */ + "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ + "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ + "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ + "Reserved", /* reserved */ + }; + u32 pcie_fw; + + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + if (pcie_fw & F_PCIE_FW_ERR) + pr_err("%s: Firmware reports adapter error: %s\n", + __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]); +} + +/* + * Get the reply to a mailbox command and store it in @rpl in big-endian order. + */ +static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, + u32 mbox_addr) +{ + for ( ; nflit; nflit--, mbox_addr += 8) + *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr)); +} + +/* + * Handle a FW assertion reported in a mailbox. + */ +static void fw_asrt(struct adapter *adap, u32 mbox_addr) +{ + struct fw_debug_cmd asrt; + + get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); + pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", + asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line), + be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y)); +} + +#define X_CIM_PF_NOACCESS 0xeeeeeeee + +/* + * If the Host OS Driver needs locking arround accesses to the mailbox, this + * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ... + */ +/* makes single-statement usage a bit cleaner ... */ +#ifdef T4_OS_NEEDS_MBOX_LOCKING +#define T4_OS_MBOX_LOCKING(x) x +#else +#define T4_OS_MBOX_LOCKING(x) do {} while (0) +#endif + +/** + * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox + * @adap: the adapter + * @mbox: index of the mailbox to use + * @cmd: the command to write + * @size: command length in bytes + * @rpl: where to optionally store the reply + * @sleep_ok: if true we may sleep while awaiting command completion + * @timeout: time to wait for command to finish before timing out + * (negative implies @sleep_ok=false) + * + * Sends the given command to FW through the selected mailbox and waits + * for the FW to execute the command. If @rpl is not %NULL it is used to + * store the FW's reply to the command. The command and its optional + * reply are of the same length. Some FW commands like RESET and + * INITIALIZE can take a considerable amount of time to execute. + * @sleep_ok determines whether we may sleep while awaiting the response. + * If sleeping is allowed we use progressive backoff otherwise we spin. + * Note that passing in a negative @timeout is an alternate mechanism + * for specifying @sleep_ok=false. This is useful when a higher level + * interface allows for specification of @timeout but not @sleep_ok ... + * + * Returns 0 on success or a negative errno on failure. A + * failure can happen either because we are not able to execute the + * command or FW executes it but signals an error. In the latter case + * the return value is the error code indicated by FW (negated). + */ +int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, + const void __attribute__((__may_alias__)) *cmd, + int size, void *rpl, bool sleep_ok, int timeout) +{ + /* + * We delay in small increments at first in an effort to maintain + * responsiveness for simple, fast executing commands but then back + * off to larger delays to a maximum retry delay. + */ + static const int delay[] = { + 1, 1, 3, 5, 10, 10, 20, 50, 100 + }; + + u32 v; + u64 res; + int i, ms; + unsigned int delay_idx; + __be64 *temp = (__be64 *)malloc(size * sizeof(char)); + __be64 *p = temp; + u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); + u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); + u32 ctl; + struct mbox_entry entry; + u32 pcie_fw = 0; + + if ((size & 15) || size > MBOX_LEN) { + free(temp); + return -EINVAL; + } + + bzero(p, size); + memcpy(p, (const __be64 *)cmd, size); + + /* + * If we have a negative timeout, that implies that we can't sleep. + */ + if (timeout < 0) { + sleep_ok = false; + timeout = -timeout; + } + +#ifdef T4_OS_NEEDS_MBOX_LOCKING + /* + * Queue ourselves onto the mailbox access list. When our entry is at + * the front of the list, we have rights to access the mailbox. So we + * wait [for a while] till we're at the front [or bail out with an + * EBUSY] ... + */ + t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock); + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; ; i += ms) { + /* + * If we've waited too long, return a busy indication. This + * really ought to be based on our initial position in the + * mailbox access list but this is a start. We very rarely + * contend on access to the mailbox ... Also check for a + * firmware error which we'll report as a device error. + */ + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) { + t4_os_atomic_list_del(&entry, &adap->mbox_list, + &adap->mbox_lock); + t4_report_fw_error(adap); + return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY; + } + + /* + * If we're at the head, break out and start the mailbox + * protocol. + */ + if (t4_os_list_first_entry(&adap->mbox_list) == &entry) + break; + + /* + * Delay for a bit before checking again ... + */ + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + rte_delay_ms(ms); + } + } +#endif /* T4_OS_NEEDS_MBOX_LOCKING */ + + /* + * Attempt to gain access to the mailbox. + */ + for (i = 0; i < 4; i++) { + ctl = t4_read_reg(adap, ctl_reg); + v = G_MBOWNER(ctl); + if (v != X_MBOWNER_NONE) + break; + } + + /* + * If we were unable to gain access, dequeue ourselves from the + * mailbox atomic access list and report the error to our caller. + */ + if (v != X_MBOWNER_PL) { + T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, + &adap->mbox_list, + &adap->mbox_lock)); + t4_report_fw_error(adap); + return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT); + } + + /* + * If we gain ownership of the mailbox and there's a "valid" message + * in it, this is likely an asynchronous error message from the + * firmware. So we'll report that and then proceed on with attempting + * to issue our own command ... which may well fail if the error + * presaged the firmware crashing ... + */ + if (ctl & F_MBMSGVALID) { + dev_err(adap, "found VALID command in mbox %u: " + "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, + (unsigned long long)t4_read_reg64(adap, data_reg), + (unsigned long long)t4_read_reg64(adap, data_reg + 8), + (unsigned long long)t4_read_reg64(adap, data_reg + 16), + (unsigned long long)t4_read_reg64(adap, data_reg + 24), + (unsigned long long)t4_read_reg64(adap, data_reg + 32), + (unsigned long long)t4_read_reg64(adap, data_reg + 40), + (unsigned long long)t4_read_reg64(adap, data_reg + 48), + (unsigned long long)t4_read_reg64(adap, data_reg + 56)); + } + + /* + * Copy in the new mailbox command and send it on its way ... + */ + for (i = 0; i < size; i += 8, p++) + t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); + + CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx " + "%016llx %016llx %016llx %016llx\n", __func__, (mbox), + (unsigned long long)t4_read_reg64(adap, data_reg), + (unsigned long long)t4_read_reg64(adap, data_reg + 8), + (unsigned long long)t4_read_reg64(adap, data_reg + 16), + (unsigned long long)t4_read_reg64(adap, data_reg + 24), + (unsigned long long)t4_read_reg64(adap, data_reg + 32), + (unsigned long long)t4_read_reg64(adap, data_reg + 40), + (unsigned long long)t4_read_reg64(adap, data_reg + 48), + (unsigned long long)t4_read_reg64(adap, data_reg + 56)); + + t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); + t4_read_reg(adap, ctl_reg); /* flush write */ + + delay_idx = 0; + ms = delay[0]; + + /* + * Loop waiting for the reply; bail out if we time out or the firmware + * reports an error. + */ + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) { + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + msleep(ms); + } + + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + v = t4_read_reg(adap, ctl_reg); + if (v == X_CIM_PF_NOACCESS) + continue; + if (G_MBOWNER(v) == X_MBOWNER_PL) { + if (!(v & F_MBMSGVALID)) { + t4_write_reg(adap, ctl_reg, + V_MBOWNER(X_MBOWNER_NONE)); + continue; + } + + CXGBE_DEBUG_MBOX(adap, + "%s: mbox %u: %016llx %016llx %016llx %016llx " + "%016llx %016llx %016llx %016llx\n", __func__, (mbox), + (unsigned long long)t4_read_reg64(adap, data_reg), + (unsigned long long)t4_read_reg64(adap, data_reg + 8), + (unsigned long long)t4_read_reg64(adap, data_reg + 16), + (unsigned long long)t4_read_reg64(adap, data_reg + 24), + (unsigned long long)t4_read_reg64(adap, data_reg + 32), + (unsigned long long)t4_read_reg64(adap, data_reg + 40), + (unsigned long long)t4_read_reg64(adap, data_reg + 48), + (unsigned long long)t4_read_reg64(adap, data_reg + 56)); + + CXGBE_DEBUG_MBOX(adap, + "command %#x completed in %d ms (%ssleeping)\n", + *(const u8 *)cmd, + i + ms, sleep_ok ? "" : "non-"); + + res = t4_read_reg64(adap, data_reg); + if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { + fw_asrt(adap, data_reg); + res = V_FW_CMD_RETVAL(EIO); + } else if (rpl) { + get_mbox_rpl(adap, rpl, size / 8, data_reg); + } + t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); + T4_OS_MBOX_LOCKING( + t4_os_atomic_list_del(&entry, &adap->mbox_list, + &adap->mbox_lock)); + return -G_FW_CMD_RETVAL((int)res); + } + } + + /* + * We timed out waiting for a reply to our mailbox command. Report + * the error and also check to see if the firmware reported any + * errors ... + */ + dev_err(adap, "command %#x in mailbox %d timed out\n", + *(const u8 *)cmd, mbox); + T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, + &adap->mbox_list, + &adap->mbox_lock)); + t4_report_fw_error(adap); + free(temp); + return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT; +} + +int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, + void *rpl, bool sleep_ok) +{ + return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok, + FW_CMD_MAX_TIMEOUT); +} + +/** + * t4_config_rss_range - configure a portion of the RSS mapping table + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @viid: virtual interface whose RSS subtable is to be written + * @start: start entry in the table to write + * @n: how many table entries to write + * @rspq: values for the "response queue" (Ingress Queue) lookup table + * @nrspq: number of values in @rspq + * + * Programs the selected part of the VI's RSS mapping table with the + * provided values. If @nrspq < @n the supplied values are used repeatedly + * until the full table range is populated. + * + * The caller must ensure the values in @rspq are in the range allowed for + * @viid. + */ +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq) +{ + int ret; + const u16 *rsp = rspq; + const u16 *rsp_end = rspq + nrspq; + struct fw_rss_ind_tbl_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + + /* + * Each firmware RSS command can accommodate up to 32 RSS Ingress + * Queue Identifiers. These Ingress Queue IDs are packed three to + * a 32-bit word as 10-bit values with the upper remaining 2 bits + * reserved. + */ + while (n > 0) { + int nq = min(n, 32); + int nq_packed = 0; + __be32 *qp = &cmd.iq0_to_iq2; + + /* + * Set up the firmware RSS command header to send the next + * "nq" Ingress Queue IDs to the firmware. + */ + cmd.niqid = cpu_to_be16(nq); + cmd.startidx = cpu_to_be16(start); + + /* + * "nq" more done for the start of the next loop. + */ + start += nq; + n -= nq; + + /* + * While there are still Ingress Queue IDs to stuff into the + * current firmware RSS command, retrieve them from the + * Ingress Queue ID array and insert them into the command. + */ + while (nq > 0) { + /* + * Grab up to the next 3 Ingress Queue IDs (wrapping + * around the Ingress Queue ID array if necessary) and + * insert them into the firmware RSS command at the + * current 3-tuple position within the commad. + */ + u16 qbuf[3]; + u16 *qbp = qbuf; + int nqbuf = min(3, nq); + + nq -= nqbuf; + qbuf[0] = 0; + qbuf[1] = 0; + qbuf[2] = 0; + while (nqbuf && nq_packed < 32) { + nqbuf--; + nq_packed++; + *qbp++ = *rsp++; + if (rsp >= rsp_end) + rsp = rspq; + } + *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | + V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | + V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); + } + + /* + * Send this portion of the RRS table update to the firmware; + * bail out on any errors. + */ + ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); + if (ret) + return ret; + } + + return 0; +} + +/** + * t4_config_vi_rss - configure per VI RSS settings + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @viid: the VI id + * @flags: RSS flags + * @defq: id of the default RSS queue for the VI. + * + * Configures VI-specific RSS properties. + */ +int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, + unsigned int flags, unsigned int defq) +{ + struct fw_rss_vi_config_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | + V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); + return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); +} + +/** + * init_cong_ctrl - initialize congestion control parameters + * @a: the alpha values for congestion control + * @b: the beta values for congestion control + * + * Initialize the congestion control parameters. + */ +static void init_cong_ctrl(unsigned short *a, unsigned short *b) +{ + int i; + + for (i = 0; i < 9; i++) { + a[i] = 1; + b[i] = 0; + } + + a[9] = 2; + a[10] = 3; + a[11] = 4; + a[12] = 5; + a[13] = 6; + a[14] = 7; + a[15] = 8; + a[16] = 9; + a[17] = 10; + a[18] = 14; + a[19] = 17; + a[20] = 21; + a[21] = 25; + a[22] = 30; + a[23] = 35; + a[24] = 45; + a[25] = 60; + a[26] = 80; + a[27] = 100; + a[28] = 200; + a[29] = 300; + a[30] = 400; + a[31] = 500; + + b[9] = 1; + b[10] = 1; + b[11] = 2; + b[12] = 2; + b[13] = 3; + b[14] = 3; + b[15] = 3; + b[16] = 3; + b[17] = 4; + b[18] = 4; + b[19] = 4; + b[20] = 4; + b[21] = 4; + b[22] = 5; + b[23] = 5; + b[24] = 5; + b[25] = 5; + b[26] = 5; + b[27] = 5; + b[28] = 6; + b[29] = 6; + b[30] = 7; + b[31] = 7; +} + +#define INIT_CMD(var, cmd, rd_wr) do { \ + (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \ + F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \ + (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \ +} while (0) + +int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p) +{ + u32 cclk_param, cclk_val; + int ret; + + /* + * Ask firmware for the Core Clock since it knows how to translate the + * Reference Clock ('V2') VPD field into a Core Clock value ... + */ + cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, + 1, &cclk_param, &cclk_val); + if (ret) { + dev_err(adapter, "%s: error in fetching from coreclock - %d\n", + __func__, ret); + return ret; + } + + p->cclk = cclk_val; + dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk); + return 0; +} + +/* serial flash and firmware constants and flash config file constants */ +enum { + SF_ATTEMPTS = 10, /* max retries for SF operations */ + + /* flash command opcodes */ + SF_PROG_PAGE = 2, /* program page */ + SF_WR_DISABLE = 4, /* disable writes */ + SF_RD_STATUS = 5, /* read status register */ + SF_WR_ENABLE = 6, /* enable writes */ + SF_RD_DATA_FAST = 0xb, /* read flash */ + SF_RD_ID = 0x9f, /* read ID */ + SF_ERASE_SECTOR = 0xd8, /* erase sector */ +}; + +/** + * sf1_read - read data from the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to read + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @valp: where to store the read data + * + * Reads up to 4 bytes of data from the serial flash. The location of + * the read needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 *valp) +{ + int ret; + + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) + return -EBUSY; + t4_write_reg(adapter, A_SF_OP, + V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); + ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); + if (!ret) + *valp = t4_read_reg(adapter, A_SF_DATA); + return ret; +} + +/** + * sf1_write - write data to the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to write + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @val: value to write + * + * Writes up to 4 bytes of data to the serial flash. The location of + * the write needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 val) +{ + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) + return -EBUSY; + t4_write_reg(adapter, A_SF_DATA, val); + t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | + V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); + return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); +} + +/** + * t4_read_flash - read words from serial flash + * @adapter: the adapter + * @addr: the start address for the read + * @nwords: how many 32-bit words to read + * @data: where to store the read data + * @byte_oriented: whether to store data as bytes or as words + * + * Read the specified number of 32-bit words from the serial flash. + * If @byte_oriented is set the read data is stored as a byte array + * (i.e., big-endian), otherwise as 32-bit words in the platform's + * natural endianness. + */ +int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented) +{ + int ret; + + if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) || + (addr & 3)) + return -EINVAL; + + addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST; + + ret = sf1_write(adapter, 4, 1, 0, addr); + if (ret != 0) + return ret; + + ret = sf1_read(adapter, 1, 1, 0, data); + if (ret != 0) + return ret; + + for ( ; nwords; nwords--, data++) { + ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); + if (nwords == 1) + t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ + if (ret) + return ret; + if (byte_oriented) + *data = cpu_to_be32(*data); + } + return 0; +} + +/** + * t4_get_fw_version - read the firmware version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the FW version from flash. + */ +int t4_get_fw_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, FLASH_FW_START + + offsetof(struct fw_hdr, fw_ver), 1, vers, 0); +} + +/** + * t4_get_tp_version - read the TP microcode version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the TP microcode version from flash. + */ +int t4_get_tp_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, FLASH_FW_START + + offsetof(struct fw_hdr, tp_microcode_ver), + 1, vers, 0); +} + +#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ + FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) + +/** + * t4_link_l1cfg - apply link configuration to MAC/PHY + * @phy: the PHY to setup + * @mac: the MAC to setup + * @lc: the requested link configuration + * + * Set up a port's MAC and PHY according to a desired link configuration. + * - If the PHY can auto-negotiate first decide what to advertise, then + * enable/disable auto-negotiation as desired, and reset. + * - If the PHY does not auto-negotiate just reset it. + * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, + * otherwise do it later based on the outcome of auto-negotiation. + */ +int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc) +{ + struct fw_port_cmd c; + unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO); + + lc->link_ok = 0; + if (lc->requested_fc & PAUSE_RX) + fc |= FW_PORT_CAP_FC_RX; + if (lc->requested_fc & PAUSE_TX) + fc |= FW_PORT_CAP_FC_TX; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_EXEC | + V_FW_PORT_CMD_PORTID(port)); + c.action_to_len16 = + cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + FW_LEN16(c)); + + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) | + fc); + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + } else if (lc->autoneg == AUTONEG_DISABLE) { + c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi); + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + } else { + c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi); + } + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_flash_cfg_addr - return the address of the flash configuration file + * @adapter: the adapter + * + * Return the address within the flash where the Firmware Configuration + * File is stored, or an error if the device FLASH is too small to contain + * a Firmware Configuration File. + */ +int t4_flash_cfg_addr(struct adapter *adapter) +{ + /* + * If the device FLASH isn't large enough to hold a Firmware + * Configuration File, return an error. + */ + if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) + return -ENOSPC; + + return FLASH_CFG_START; +} + +#define PF_INTR_MASK (F_PFSW | F_PFCIM) + +/** + * t4_intr_enable - enable interrupts + * @adapter: the adapter whose interrupts should be enabled + * + * Enable PF-specific interrupts for the calling function and the top-level + * interrupt concentrator for global interrupts. Interrupts are already + * enabled at each module, here we just enable the roots of the interrupt + * hierarchies. + * + * Note: this function should be called only when the driver manages + * non PF-specific interrupts from the various HW modules. Only one PCI + * function at a time should be doing this. + */ +void t4_intr_enable(struct adapter *adapter) +{ + u32 val = 0; + u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI)); + + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT; + t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE | + F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | + F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR | + F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | + F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | + F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | + F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val); + t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); + t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf); +} + +/** + * t4_intr_disable - disable interrupts + * @adapter: the adapter whose interrupts should be disabled + * + * Disable interrupts. We only disable the top-level interrupt + * concentrators. The caller must be a PCI function managing global + * interrupts. + */ +void t4_intr_disable(struct adapter *adapter) +{ + u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI)); + + t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0); + t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0); +} + +/** + * t4_get_port_type_description - return Port Type string description + * @port_type: firmware Port Type enumeration + */ +const char *t4_get_port_type_description(enum fw_port_type port_type) +{ + static const char * const port_type_description[] = { + "Fiber_XFI", + "Fiber_XAUI", + "BT_SGMII", + "BT_XFI", + "BT_XAUI", + "KX4", + "CX4", + "KX", + "KR", + "SFP", + "BP_AP", + "BP4_AP", + "QSFP_10G", + "QSA", + "QSFP", + "BP40_BA", + }; + + if (port_type < ARRAY_SIZE(port_type_description)) + return port_type_description[port_type]; + return "UNKNOWN"; +} + +/** + * t4_get_mps_bg_map - return the buffer groups associated with a port + * @adap: the adapter + * @idx: the port index + * + * Returns a bitmap indicating which MPS buffer groups are associated + * with the given port. Bit i is set if buffer group i is used by the + * port. + */ +unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) +{ + u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); + + if (n == 0) + return idx == 0 ? 0xf : 0; + if (n == 1) + return idx < 2 ? (3 << (2 * idx)) : 0; + return 1 << idx; +} + +/** + * t4_get_port_stats - collect port statistics + * @adap: the adapter + * @idx: the port index + * @p: the stats structure to fill + * + * Collect statistics related to the given port from HW. + */ +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) +{ + u32 bgmap = t4_get_mps_bg_map(adap, idx); + +#define GET_STAT(name) \ + t4_read_reg64(adap, \ + (is_t4(adap->params.chip) ? \ + PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\ + T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) +#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) + + p->tx_octets = GET_STAT(TX_PORT_BYTES); + p->tx_frames = GET_STAT(TX_PORT_FRAMES); + p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); + p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); + p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); + p->tx_error_frames = GET_STAT(TX_PORT_ERROR); + p->tx_frames_64 = GET_STAT(TX_PORT_64B); + p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); + p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); + p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); + p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); + p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); + p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); + p->tx_drop = GET_STAT(TX_PORT_DROP); + p->tx_pause = GET_STAT(TX_PORT_PAUSE); + p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); + p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); + p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); + p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); + p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); + p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); + p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); + p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); + + p->rx_octets = GET_STAT(RX_PORT_BYTES); + p->rx_frames = GET_STAT(RX_PORT_FRAMES); + p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); + p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); + p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); + p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); + p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); + p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); + p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); + p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); + p->rx_runt = GET_STAT(RX_PORT_LESS_64B); + p->rx_frames_64 = GET_STAT(RX_PORT_64B); + p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); + p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); + p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); + p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); + p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); + p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); + p->rx_pause = GET_STAT(RX_PORT_PAUSE); + p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); + p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); + p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); + p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); + p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); + p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); + p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); + p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); + p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; + p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; + p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; + p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; + p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; + p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; + p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; + p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; + +#undef GET_STAT +#undef GET_STAT_COM +} + +/** + * t4_get_port_stats_offset - collect port stats relative to a previous snapshot + * @adap: The adapter + * @idx: The port + * @stats: Current stats to fill + * @offset: Previous stats snapshot + */ +void t4_get_port_stats_offset(struct adapter *adap, int idx, + struct port_stats *stats, + struct port_stats *offset) +{ + u64 *s, *o; + unsigned int i; + + t4_get_port_stats(adap, idx, stats); + for (i = 0, s = (u64 *)stats, o = (u64 *)offset; + i < (sizeof(struct port_stats) / sizeof(u64)); + i++, s++, o++) + *s -= *o; +} + +/** + * t4_clr_port_stats - clear port statistics + * @adap: the adapter + * @idx: the port index + * + * Clear HW statistics for the given port. + */ +void t4_clr_port_stats(struct adapter *adap, int idx) +{ + unsigned int i; + u32 bgmap = t4_get_mps_bg_map(adap, idx); + u32 port_base_addr; + + if (is_t4(adap->params.chip)) + port_base_addr = PORT_BASE(idx); + else + port_base_addr = T5_PORT_BASE(idx); + + for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; + i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) + t4_write_reg(adap, port_base_addr + i, 0); + for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; + i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) + t4_write_reg(adap, port_base_addr + i, 0); + for (i = 0; i < 4; i++) + if (bgmap & (1 << i)) { + t4_write_reg(adap, + A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + + i * 8, 0); + t4_write_reg(adap, + A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + + i * 8, 0); + } +} + +/** + * t4_fw_hello - establish communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @evt_mbox: mailbox to receive async FW events + * @master: specifies the caller's willingness to be the device master + * @state: returns the current device state (if non-NULL) + * + * Issues a command to establish communication with FW. Returns either + * an error (negative integer) or the mailbox of the Master PF. + */ +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state) +{ + int ret; + struct fw_hello_cmd c; + u32 v; + unsigned int master_mbox; + int retries = FW_CMD_HELLO_RETRIES; + +retry: + memset(&c, 0, sizeof(c)); + INIT_CMD(c, HELLO, WRITE); + c.err_to_clearinit = cpu_to_be32( + V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | + V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | + V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : + M_FW_HELLO_CMD_MBMASTER) | + V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | + V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | + F_FW_HELLO_CMD_CLEARINIT); + + /* + * Issue the HELLO command to the firmware. If it's not successful + * but indicates that we got a "busy" or "timeout" condition, retry + * the HELLO until we exhaust our retry limit. If we do exceed our + * retry limit, check to see if the firmware left us any error + * information and report that if so ... + */ + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret != FW_SUCCESS) { + if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) + goto retry; + if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) + t4_report_fw_error(adap); + return ret; + } + + v = be32_to_cpu(c.err_to_clearinit); + master_mbox = G_FW_HELLO_CMD_MBMASTER(v); + if (state) { + if (v & F_FW_HELLO_CMD_ERR) + *state = DEV_STATE_ERR; + else if (v & F_FW_HELLO_CMD_INIT) + *state = DEV_STATE_INIT; + else + *state = DEV_STATE_UNINIT; + } + + /* + * If we're not the Master PF then we need to wait around for the + * Master PF Driver to finish setting up the adapter. + * + * Note that we also do this wait if we're a non-Master-capable PF and + * there is no current Master PF; a Master PF may show up momentarily + * and we wouldn't want to fail pointlessly. (This can happen when an + * OS loads lots of different drivers rapidly at the same time). In + * this case, the Master PF returned by the firmware will be + * M_PCIE_FW_MASTER so the test below will work ... + */ + if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 && + master_mbox != mbox) { + int waiting = FW_CMD_HELLO_TIMEOUT; + + /* + * Wait for the firmware to either indicate an error or + * initialized state. If we see either of these we bail out + * and report the issue to the caller. If we exhaust the + * "hello timeout" and we haven't exhausted our retries, try + * again. Otherwise bail with a timeout error. + */ + for (;;) { + u32 pcie_fw; + + msleep(50); + waiting -= 50; + + /* + * If neither Error nor Initialialized are indicated + * by the firmware keep waiting till we exaust our + * timeout ... and then retry if we haven't exhausted + * our retries ... + */ + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) { + if (waiting <= 0) { + if (retries-- > 0) + goto retry; + + return -ETIMEDOUT; + } + continue; + } + + /* + * We either have an Error or Initialized condition + * report errors preferentially. + */ + if (state) { + if (pcie_fw & F_PCIE_FW_ERR) + *state = DEV_STATE_ERR; + else if (pcie_fw & F_PCIE_FW_INIT) + *state = DEV_STATE_INIT; + } + + /* + * If we arrived before a Master PF was selected and + * there's not a valid Master PF, grab its identity + * for our caller. + */ + if (master_mbox == M_PCIE_FW_MASTER && + (pcie_fw & F_PCIE_FW_MASTER_VLD)) + master_mbox = G_PCIE_FW_MASTER(pcie_fw); + break; + } + } + + return master_mbox; +} + +/** + * t4_fw_bye - end communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to terminate communication with FW. + */ +int t4_fw_bye(struct adapter *adap, unsigned int mbox) +{ + struct fw_bye_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, BYE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_reset - issue a reset to FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @reset: specifies the type of reset to perform + * + * Issues a reset command of the specified type to FW. + */ +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) +{ + struct fw_reset_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, RESET, WRITE); + c.val = cpu_to_be32(reset); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_halt - issue a reset/halt to FW and put uP into RESET + * @adap: the adapter + * @mbox: mailbox to use for the FW RESET command (if desired) + * @force: force uP into RESET even if FW RESET command fails + * + * Issues a RESET command to firmware (if desired) with a HALT indication + * and then puts the microprocessor into RESET state. The RESET command + * will only be issued if a legitimate mailbox is provided (mbox <= + * M_PCIE_FW_MASTER). + * + * This is generally used in order for the host to safely manipulate the + * adapter without fear of conflicting with whatever the firmware might + * be doing. The only way out of this state is to RESTART the firmware + * ... + */ +int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) +{ + int ret = 0; + + /* + * If a legitimate mailbox is provided, issue a RESET command + * with a HALT indication. + */ + if (mbox <= M_PCIE_FW_MASTER) { + struct fw_reset_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, RESET, WRITE); + c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE); + c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT); + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + } + + /* + * Normally we won't complete the operation if the firmware RESET + * command fails but if our caller insists we'll go ahead and put the + * uP into RESET. This can be useful if the firmware is hung or even + * missing ... We'll have to take the risk of putting the uP into + * RESET without the cooperation of firmware in that case. + * + * We also force the firmware's HALT flag to be on in case we bypassed + * the firmware RESET command above or we're dealing with old firmware + * which doesn't have the HALT capability. This will serve as a flag + * for the incoming firmware to know that it's coming out of a HALT + * rather than a RESET ... if it's new enough to understand that ... + */ + if (ret == 0 || force) { + t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); + t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, + F_PCIE_FW_HALT); + } + + /* + * And we always return the result of the firmware RESET command + * even when we force the uP into RESET ... + */ + return ret; +} + +/** + * t4_fw_restart - restart the firmware by taking the uP out of RESET + * @adap: the adapter + * @mbox: mailbox to use for the FW RESET command (if desired) + * @reset: if we want to do a RESET to restart things + * + * Restart firmware previously halted by t4_fw_halt(). On successful + * return the previous PF Master remains as the new PF Master and there + * is no need to issue a new HELLO command, etc. + * + * We do this in two ways: + * + * 1. If we're dealing with newer firmware we'll simply want to take + * the chip's microprocessor out of RESET. This will cause the + * firmware to start up from its start vector. And then we'll loop + * until the firmware indicates it's started again (PCIE_FW.HALT + * reset to 0) or we timeout. + * + * 2. If we're dealing with older firmware then we'll need to RESET + * the chip since older firmware won't recognize the PCIE_FW.HALT + * flag and automatically RESET itself on startup. + */ +int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) +{ + if (reset) { + /* + * Since we're directing the RESET instead of the firmware + * doing it automatically, we need to clear the PCIE_FW.HALT + * bit. + */ + t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0); + + /* + * If we've been given a valid mailbox, first try to get the + * firmware to do the RESET. If that works, great and we can + * return success. Otherwise, if we haven't been given a + * valid mailbox or the RESET command failed, fall back to + * hitting the chip with a hammer. + */ + if (mbox <= M_PCIE_FW_MASTER) { + t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); + msleep(100); + if (t4_fw_reset(adap, mbox, + F_PIORST | F_PIORSTMODE) == 0) + return 0; + } + + t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE); + msleep(2000); + } else { + int ms; + + t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); + for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { + if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) + return FW_SUCCESS; + msleep(100); + ms += 100; + } + return -ETIMEDOUT; + } + return 0; +} + +/** + * t4_fixup_host_params_compat - fix up host-dependent parameters + * @adap: the adapter + * @page_size: the host's Base Page Size + * @cache_line_size: the host's Cache Line Size + * @chip_compat: maintain compatibility with designated chip + * + * Various registers in the chip contain values which are dependent on the + * host's Base Page and Cache Line Sizes. This function will fix all of + * those registers with the appropriate values as passed in ... + * + * @chip_compat is used to limit the set of changes that are made + * to be compatible with the indicated chip release. This is used by + * drivers to maintain compatibility with chip register settings when + * the drivers haven't [yet] been updated with new chip support. + */ +int t4_fixup_host_params_compat(struct adapter *adap, + unsigned int page_size, + unsigned int cache_line_size, + enum chip_type chip_compat) +{ + unsigned int page_shift = cxgbe_fls(page_size) - 1; + unsigned int sge_hps = page_shift - 10; + unsigned int stat_len = cache_line_size > 64 ? 128 : 64; + unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; + unsigned int fl_align_log = cxgbe_fls(fl_align) - 1; + + t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE, + V_HOSTPAGESIZEPF0(sge_hps) | + V_HOSTPAGESIZEPF1(sge_hps) | + V_HOSTPAGESIZEPF2(sge_hps) | + V_HOSTPAGESIZEPF3(sge_hps) | + V_HOSTPAGESIZEPF4(sge_hps) | + V_HOSTPAGESIZEPF5(sge_hps) | + V_HOSTPAGESIZEPF6(sge_hps) | + V_HOSTPAGESIZEPF7(sge_hps)); + + if (is_t4(adap->params.chip) || is_t4(chip_compat)) + t4_set_reg_field(adap, A_SGE_CONTROL, + V_INGPADBOUNDARY(M_INGPADBOUNDARY) | + F_EGRSTATUSPAGESIZE, + V_INGPADBOUNDARY(fl_align_log - + X_INGPADBOUNDARY_SHIFT) | + V_EGRSTATUSPAGESIZE(stat_len != 64)); + else { + /* + * T5 introduced the separation of the Free List Padding and + * Packing Boundaries. Thus, we can select a smaller Padding + * Boundary to avoid uselessly chewing up PCIe Link and Memory + * Bandwidth, and use a Packing Boundary which is large enough + * to avoid false sharing between CPUs, etc. + * + * For the PCI Link, the smaller the Padding Boundary the + * better. For the Memory Controller, a smaller Padding + * Boundary is better until we cross under the Memory Line + * Size (the minimum unit of transfer to/from Memory). If we + * have a Padding Boundary which is smaller than the Memory + * Line Size, that'll involve a Read-Modify-Write cycle on the + * Memory Controller which is never good. For T5 the smallest + * Padding Boundary which we can select is 32 bytes which is + * larger than any known Memory Controller Line Size so we'll + * use that. + */ + + /* + * N.B. T5 has a different interpretation of the "0" value for + * the Packing Boundary. This corresponds to 16 bytes instead + * of the expected 32 bytes. We never have a Packing Boundary + * less than 32 bytes so we can't use that special value but + * on the other hand, if we wanted 32 bytes, the best we can + * really do is 64 bytes ... + */ + if (fl_align <= 32) { + fl_align = 64; + fl_align_log = 6; + } + t4_set_reg_field(adap, A_SGE_CONTROL, + V_INGPADBOUNDARY(M_INGPADBOUNDARY) | + F_EGRSTATUSPAGESIZE, + V_INGPADBOUNDARY(X_INGPCIEBOUNDARY_32B) | + V_EGRSTATUSPAGESIZE(stat_len != 64)); + t4_set_reg_field(adap, A_SGE_CONTROL2, + V_INGPACKBOUNDARY(M_INGPACKBOUNDARY), + V_INGPACKBOUNDARY(fl_align_log - + X_INGPACKBOUNDARY_SHIFT)); + } + + /* + * Adjust various SGE Free List Host Buffer Sizes. + * + * The first four entries are: + * + * 0: Host Page Size + * 1: 64KB + * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode) + * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode) + * + * For the single-MTU buffers in unpacked mode we need to include + * space for the SGE Control Packet Shift, 14 byte Ethernet header, + * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet + * Padding boundary. All of these are accommodated in the Factory + * Default Firmware Configuration File but we need to adjust it for + * this host's cache line size. + */ + t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size); + t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2, + (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1) + & ~(fl_align - 1)); + t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3, + (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1) + & ~(fl_align - 1)); + + t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12)); + + return 0; +} + +/** + * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible) + * @adap: the adapter + * @page_size: the host's Base Page Size + * @cache_line_size: the host's Cache Line Size + * + * Various registers in T4 contain values which are dependent on the + * host's Base Page and Cache Line Sizes. This function will fix all of + * those registers with the appropriate values as passed in ... + * + * This routine makes changes which are compatible with T4 chips. + */ +int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, + unsigned int cache_line_size) +{ + return t4_fixup_host_params_compat(adap, page_size, cache_line_size, + T4_LAST_REV); +} + +/** + * t4_fw_initialize - ask FW to initialize the device + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to FW to partially initialize the device. This + * performs initialization that generally doesn't depend on user input. + */ +int t4_fw_initialize(struct adapter *adap, unsigned int mbox) +{ + struct fw_initialize_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, INITIALIZE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_query_params_rw - query FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * @rw: Write and read flag + * + * Reads the value of FW or device parameters. Up to 7 parameters can be + * queried at once. + */ +static int t4_query_params_rw(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int nparams, const u32 *params, + u32 *val, int rw) +{ + unsigned int i; + int ret; + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ | + V_FW_PARAMS_CMD_PFN(pf) | + V_FW_PARAMS_CMD_VFN(vf)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + + for (i = 0; i < nparams; i++) { + *p++ = cpu_to_be32(*params++); + if (rw) + *p = cpu_to_be32(*(val + i)); + p++; + } + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) + for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) + *val++ = be32_to_cpu(*p); + return ret; +} + +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val) +{ + return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0); +} + +/** + * t4_set_params_timeout - sets FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * @timeout: the timeout time + * + * Sets the value of FW or device parameters. Up to 7 parameters can be + * specified at once. + */ +int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int nparams, const u32 *params, + const u32 *val, int timeout) +{ + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_PARAMS_CMD_PFN(pf) | + V_FW_PARAMS_CMD_VFN(vf)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + + while (nparams--) { + *p++ = cpu_to_be32(*params++); + *p++ = cpu_to_be32(*val++); + } + + return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); +} + +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val) +{ + return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val, + FW_CMD_MAX_TIMEOUT); +} + +/** + * t4_alloc_vi_func - allocate a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @port: physical port associated with the VI + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @nmac: number of MAC addresses needed (1 to 5) + * @mac: the MAC addresses of the VI + * @rss_size: size of RSS table slice associated with this VI + * @portfunc: which Port Application Function MAC Address is desired + * @idstype: Intrusion Detection Type + * + * Allocates a virtual interface for the given physical port. If @mac is + * not %NULL it contains the MAC addresses of the VI as assigned by FW. + * @mac should be large enough to hold @nmac Ethernet addresses, they are + * stored consecutively so the space needed is @nmac * 6 bytes. + * Returns a negative error number or the non-negative VI id. + */ +int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, + unsigned int port, unsigned int pf, unsigned int vf, + unsigned int nmac, u8 *mac, unsigned int *rss_size, + unsigned int portfunc, unsigned int idstype) +{ + int ret; + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | F_FW_CMD_EXEC | + V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); + c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) | + V_FW_VI_CMD_FUNC(portfunc)); + c.portid_pkd = V_FW_VI_CMD_PORTID(port); + c.nmac = nmac - 1; + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + if (mac) { + memcpy(mac, c.mac, sizeof(c.mac)); + switch (nmac) { + case 5: + memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); + /* FALLTHROUGH */ + case 4: + memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); + /* FALLTHROUGH */ + case 3: + memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); + /* FALLTHROUGH */ + case 2: + memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); + /* FALLTHROUGH */ + } + } + if (rss_size) + *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize)); + return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid)); +} + +/** + * t4_alloc_vi - allocate an [Ethernet Function] virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @port: physical port associated with the VI + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @nmac: number of MAC addresses needed (1 to 5) + * @mac: the MAC addresses of the VI + * @rss_size: size of RSS table slice associated with this VI + * + * Backwards compatible and convieniance routine to allocate a Virtual + * Interface with a Ethernet Port Application Function and Intrustion + * Detection System disabled. + */ +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size) +{ + return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, + FW_VI_FUNC_ETH, 0); +} + +/** + * t4_free_vi - free a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @viid: virtual interface identifiler + * + * Free a previously allocated virtual interface. + */ +int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int viid) +{ + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) | + V_FW_VI_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); + c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); +} + +/** + * t4_set_rxmode - set Rx properties of a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @mtu: the new MTU or -1 + * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change + * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change + * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change + * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, + * -1 no change + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sets Rx properties of a virtual interface. + */ +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, int vlanex, + bool sleep_ok) +{ + struct fw_vi_rxmode_cmd c; + + /* convert to FW values */ + if (mtu < 0) + mtu = M_FW_VI_RXMODE_CMD_MTU; + if (promisc < 0) + promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; + if (all_multi < 0) + all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; + if (bcast < 0) + bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; + if (vlanex < 0) + vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_VI_RXMODE_CMD_VIID(viid)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) | + V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | + V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | + V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | + V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); +} + +/** + * t4_change_mac - modifies the exact-match filter for a MAC address + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @idx: index of existing filter for old value of MAC address, or -1 + * @addr: the new MAC address value + * @persist: whether a new MAC allocation should be persistent + * @add_smt: if true also add the address to the HW SMT + * + * Modifies an exact-match filter and sets it to the new MAC address if + * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the + * latter case the address is added persistently if @persist is %true. + * + * Note that in general it is not possible to modify the value of a given + * filter so the generic way to modify an address filter is to free the one + * being used by the old address value and allocate a new filter for the + * new address value. + * + * Returns a negative error number or the index of the filter with the new + * MAC value. Note that this index may differ from @idx. + */ +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt) +{ + int ret, mode; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_exact *p = c.u.exact; + int max_mac_addr = adap->params.arch.mps_tcam_size; + + if (idx < 0) /* new allocation */ + idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; + mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_VI_MAC_CMD_VIID(viid)); + c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1)); + p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | + V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | + V_FW_VI_MAC_CMD_IDX(idx)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) { + ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); + if (ret >= max_mac_addr) + ret = -ENOMEM; + } + return ret; +} + +/** + * t4_enable_vi_params - enable/disable a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * @dcb_en: 1=enable delivery of Data Center Bridging messages. + * + * Enables/disables a virtual interface. Note that setting DCB Enable + * only makes sense when enabling a Virtual Interface ... + */ +int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) +{ + struct fw_vi_enable_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_EXEC | + V_FW_VI_ENABLE_CMD_VIID(viid)); + c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) | + V_FW_VI_ENABLE_CMD_EEN(tx_en) | + V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | + FW_LEN16(c)); + return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_enable_vi - enable/disable a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * + * Enables/disables a virtual interface. Note that setting DCB Enable + * only makes sense when enabling a Virtual Interface ... + */ +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en) +{ + return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); +} + +/** + * t4_iq_start_stop - enable/disable an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @start: %true to enable the queues, %false to disable them + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Starts or stops an ingress queue and its associated FLs, if any. + */ +int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, + unsigned int pf, unsigned int vf, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | + V_FW_IQ_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) | + V_FW_IQ_CMD_IQSTOP(!start) | + FW_LEN16(c)); + c.iqid = cpu_to_be16(iqid); + c.fl0id = cpu_to_be16(fl0id); + c.fl1id = cpu_to_be16(fl1id); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_iq_free - free an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Frees an ingress queue and its associated FLs, if any. + */ +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | + V_FW_IQ_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); + c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); + c.iqid = cpu_to_be16(iqid); + c.fl0id = cpu_to_be16(fl0id); + c.fl1id = cpu_to_be16(fl1id); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_eth_eq_free - free an Ethernet egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees an Ethernet egress queue. + */ +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_eth_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_EXEC | + V_FW_EQ_ETH_CMD_PFN(pf) | + V_FW_EQ_ETH_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); + c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_handle_fw_rpl - process a FW reply message + * @adap: the adapter + * @rpl: start of the FW message + * + * Processes a FW message, such as link state change messages. + */ +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) +{ + u8 opcode = *(const u8 *)rpl; + + /* + * This might be a port command ... this simplifies the following + * conditionals ... We can get away with pre-dereferencing + * action_to_len16 because it's in the first 16 bytes and all messages + * will be at least that long. + */ + const struct fw_port_cmd *p = (const void *)rpl; + unsigned int action = + G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); + + if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { + /* link/module state change message */ + int speed = 0, fc = 0, i; + int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); + struct port_info *pi = NULL; + struct link_config *lc; + u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); + int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; + u32 mod = G_FW_PORT_CMD_MODTYPE(stat); + + if (stat & F_FW_PORT_CMD_RXPAUSE) + fc |= PAUSE_RX; + if (stat & F_FW_PORT_CMD_TXPAUSE) + fc |= PAUSE_TX; + if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + speed = ETH_SPEED_NUM_100M; + else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + speed = ETH_SPEED_NUM_1G; + else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + speed = ETH_SPEED_NUM_10G; + else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) + speed = ETH_SPEED_NUM_40G; + + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + if (pi->tx_chan == chan) + break; + } + lc = &pi->link_cfg; + + if (mod != pi->mod_type) { + pi->mod_type = mod; + t4_os_portmod_changed(adap, i); + } + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc) { /* something changed */ + if (!link_ok && lc->link_ok) { + static const char * const reason[] = { + "Link Down", + "Remote Fault", + "Auto-negotiation Failure", + "Reserved", + "Insufficient Airflow", + "Unable To Determine Reason", + "No RX Signal Detected", + "Reserved", + }; + unsigned int rc = G_FW_PORT_CMD_LINKDNRC(stat); + + dev_warn(adap, "Port %d link down, reason: %s\n", + chan, reason[rc]); + } + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + lc->supported = be16_to_cpu(p->u.info.pcap); + } + } else { + dev_warn(adap, "Unknown firmware reply %d\n", opcode); + return -EINVAL; + } + return 0; +} + +void t4_reset_link_config(struct adapter *adap, int idx) +{ + struct port_info *pi = adap2pinfo(adap, idx); + struct link_config *lc = &pi->link_cfg; + + lc->link_ok = 0; + lc->requested_speed = 0; + lc->requested_fc = 0; + lc->speed = 0; + lc->fc = 0; +} + +/** + * init_link_config - initialize a link's SW state + * @lc: structure holding the link state + * @caps: link capabilities + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/flow-control/autonegotiation settings. + */ +static void init_link_config(struct link_config *lc, + unsigned int caps) +{ + lc->supported = caps; + lc->requested_speed = 0; + lc->speed = 0; + lc->requested_fc = 0; + lc->fc = 0; + if (lc->supported & FW_PORT_CAP_ANEG) { + lc->advertising = lc->supported & ADVERT_MASK; + lc->autoneg = AUTONEG_ENABLE; + } else { + lc->advertising = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +/** + * t4_wait_dev_ready - wait till to reads of registers work + * + * Right after the device is RESET is can take a small amount of time + * for it to respond to register reads. Until then, all reads will + * return either 0xff...ff or 0xee...ee. Return an error if reads + * don't work within a reasonable time frame. + */ +static int t4_wait_dev_ready(struct adapter *adapter) +{ + u32 whoami; + + whoami = t4_read_reg(adapter, A_PL_WHOAMI); + + if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS) + return 0; + + msleep(500); + whoami = t4_read_reg(adapter, A_PL_WHOAMI); + return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS + ? 0 : -EIO); +} + +struct flash_desc { + u32 vendor_and_model_id; + u32 size_mb; +}; + +int t4_get_flash_params(struct adapter *adapter) +{ + /* + * Table for non-Numonix supported flash parts. Numonix parts are left + * to the preexisting well-tested code. All flash parts have 64KB + * sectors. + */ + static struct flash_desc supported_flash[] = { + { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ + }; + + int ret; + unsigned int i; + u32 info = 0; + + ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); + if (!ret) + ret = sf1_read(adapter, 3, 0, 1, &info); + t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ + if (ret < 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(supported_flash); ++i) + if (supported_flash[i].vendor_and_model_id == info) { + adapter->params.sf_size = supported_flash[i].size_mb; + adapter->params.sf_nsec = + adapter->params.sf_size / SF_SEC_SIZE; + return 0; + } + + if ((info & 0xff) != 0x20) /* not a Numonix flash */ + return -EINVAL; + info >>= 16; /* log2 of size */ + if (info >= 0x14 && info < 0x18) + adapter->params.sf_nsec = 1 << (info - 16); + else if (info == 0x18) + adapter->params.sf_nsec = 64; + else + return -EINVAL; + adapter->params.sf_size = 1 << info; + + /* + * We should reject adapters with FLASHes which are too small. So, emit + * a warning. + */ + if (adapter->params.sf_size < FLASH_MIN_SIZE) { + dev_warn(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n", + adapter->params.sf_size, FLASH_MIN_SIZE); + } + + return 0; +} + +/** + * t4_prep_adapter - prepare SW and HW for operation + * @adapter: the adapter + * + * Initialize adapter SW state for the various HW modules, set initial + * values for some adapter tunables, take PHYs out of reset, and + * initialize the MDIO interface. + */ +int t4_prep_adapter(struct adapter *adapter) +{ + int ret, ver; + u32 pl_rev; + + ret = t4_wait_dev_ready(adapter); + if (ret < 0) + return ret; + + pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV)); + adapter->params.pci.device_id = adapter->pdev->id.device_id; + adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id; + + /* + * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS + * ADAPTER (VERSION << 4 | REVISION) + */ + ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id); + adapter->params.chip = 0; + switch (ver) { + case CHELSIO_T5: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); + adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + adapter->params.arch.mps_rplc_size = 128; + adapter->params.arch.nchan = NCHAN; + adapter->params.arch.vfcount = 128; + break; + default: + dev_err(adapter, "%s: Device %d is not supported\n", + __func__, adapter->params.pci.device_id); + return -EINVAL; + } + + ret = t4_get_flash_params(adapter); + if (ret < 0) + return ret; + + adapter->params.cim_la_size = CIMLA_SIZE; + + init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); + + /* + * Default port and clock for debugging in case we can't reach FW. + */ + adapter->params.nports = 1; + adapter->params.portvec = 1; + adapter->params.vpd.cclk = 50000; + + return 0; +} + +/** + * t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * @adapter: the adapter + * @qid: the Queue ID + * @qtype: the Ingress or Egress type for @qid + * @pbar2_qoffset: BAR2 Queue Offset + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 SGE Queue Registers information associated with the + * indicated Absolute Queue ID. These are passed back in return value + * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue + * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. + * + * This may return an error which indicates that BAR2 SGE Queue + * registers aren't available. If an error is not returned, then the + * following values are returned: + * + * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers + * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid + * + * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which + * require the "Inferred Queue ID" ability may be used. E.g. the + * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, + * then these "Inferred Queue ID" register may not be used. + */ +int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, + enum t4_bar2_qtype qtype, u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + unsigned int page_shift, page_size, qpp_shift, qpp_mask; + u64 bar2_page_offset, bar2_qoffset; + unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; + + /* + * T4 doesn't support BAR2 SGE Queue registers. + */ + if (is_t4(adapter->params.chip)) + return -EINVAL; + + /* + * Get our SGE Page Size parameters. + */ + page_shift = adapter->params.sge.hps + 10; + page_size = 1 << page_shift; + + /* + * Get the right Queues per Page parameters for our Queue. + */ + qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ? + adapter->params.sge.eq_qpp : + adapter->params.sge.iq_qpp); + qpp_mask = (1 << qpp_shift) - 1; + + /* + * Calculate the basics of the BAR2 SGE Queue register area: + * o The BAR2 page the Queue registers will be in. + * o The BAR2 Queue ID. + * o The BAR2 Queue ID Offset into the BAR2 page. + */ + bar2_page_offset = ((qid >> qpp_shift) << page_shift); + bar2_qid = qid & qpp_mask; + bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; + + /* + * If the BAR2 Queue ID Offset is less than the Page Size, then the + * hardware will infer the Absolute Queue ID simply from the writes to + * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a + * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply + * write to the first BAR2 SGE Queue Area within the BAR2 Page with + * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID + * from the BAR2 Page and BAR2 Queue ID. + * + * One important censequence of this is that some BAR2 SGE registers + * have a "Queue ID" field and we can write the BAR2 SGE Queue ID + * there. But other registers synthesize the SGE Queue ID purely + * from the writes to the registers -- the Write Combined Doorbell + * Buffer is a good example. These BAR2 SGE Registers are only + * available for those BAR2 SGE Register areas where the SGE Absolute + * Queue ID can be inferred from simple writes. + */ + bar2_qoffset = bar2_page_offset; + bar2_qinferred = (bar2_qid_offset < page_size); + if (bar2_qinferred) { + bar2_qoffset += bar2_qid_offset; + bar2_qid = 0; + } + + *pbar2_qoffset = bar2_qoffset; + *pbar2_qid = bar2_qid; + return 0; +} + +/** + * t4_init_sge_params - initialize adap->params.sge + * @adapter: the adapter + * + * Initialize various fields of the adapter's SGE Parameters structure. + */ +int t4_init_sge_params(struct adapter *adapter) +{ + struct sge_params *sge_params = &adapter->params.sge; + u32 hps, qpp; + unsigned int s_hps, s_qpp; + + /* + * Extract the SGE Page Size for our PF. + */ + hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE); + s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * + adapter->pf); + sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0); + + /* + * Extract the SGE Egress and Ingess Queues Per Page for our PF. + */ + s_qpp = (S_QUEUESPERPAGEPF0 + + (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf); + qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); + sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0); + qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); + sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0); + + return 0; +} + +/** + * t4_init_tp_params - initialize adap->params.tp + * @adap: the adapter + * + * Initialize various fields of the adapter's TP Parameters structure. + */ +int t4_init_tp_params(struct adapter *adap) +{ + int chan; + u32 v; + + v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); + adap->params.tp.tre = G_TIMERRESOLUTION(v); + adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v); + + /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ + for (chan = 0; chan < NCHAN; chan++) + adap->params.tp.tx_modq[chan] = chan; + + /* + * Cache the adapter's Compressed Filter Mode and global Incress + * Configuration. + */ + t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, + &adap->params.tp.vlan_pri_map, 1, A_TP_VLAN_PRI_MAP); + t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, + &adap->params.tp.ingress_config, 1, + A_TP_INGRESS_CONFIG); + + /* + * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field + * shift positions of several elements of the Compressed Filter Tuple + * for this adapter which we need frequently ... + */ + adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); + adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); + adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); + adap->params.tp.protocol_shift = t4_filter_field_shift(adap, + F_PROTOCOL); + + /* + * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID + * represents the presense of an Outer VLAN instead of a VNIC ID. + */ + if ((adap->params.tp.ingress_config & F_VNIC) == 0) + adap->params.tp.vnic_shift = -1; + + return 0; +} + +/** + * t4_filter_field_shift - calculate filter field shift + * @adap: the adapter + * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) + * + * Return the shift position of a filter field within the Compressed + * Filter Tuple. The filter field is specified via its selection bit + * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. + */ +int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel) +{ + unsigned int filter_mode = adap->params.tp.vlan_pri_map; + unsigned int sel; + int field_shift; + + if ((filter_mode & filter_sel) == 0) + return -1; + + for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { + switch (filter_mode & sel) { + case F_FCOE: + field_shift += W_FT_FCOE; + break; + case F_PORT: + field_shift += W_FT_PORT; + break; + case F_VNIC_ID: + field_shift += W_FT_VNIC_ID; + break; + case F_VLAN: + field_shift += W_FT_VLAN; + break; + case F_TOS: + field_shift += W_FT_TOS; + break; + case F_PROTOCOL: + field_shift += W_FT_PROTOCOL; + break; + case F_ETHERTYPE: + field_shift += W_FT_ETHERTYPE; + break; + case F_MACMATCH: + field_shift += W_FT_MACMATCH; + break; + case F_MPSHITTYPE: + field_shift += W_FT_MPSHITTYPE; + break; + case F_FRAGMENTATION: + field_shift += W_FT_FRAGMENTATION; + break; + } + } + return field_shift; +} + +int t4_init_rss_mode(struct adapter *adap, int mbox) +{ + int i, ret; + struct fw_rss_vi_config_cmd rvc; + + memset(&rvc, 0, sizeof(rvc)); + + for_each_port(adap, i) { + struct port_info *p = adap2pinfo(adap, i); + + rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ | + V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); + rvc.retval_len16 = htonl(FW_LEN16(rvc)); + ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); + if (ret) + return ret; + p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); + } + return 0; +} + +int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) +{ + u8 addr[6]; + int ret, i, j = 0; + struct fw_port_cmd c; + + memset(&c, 0, sizeof(c)); + + for_each_port(adap, i) { + unsigned int rss_size = 0; + struct port_info *p = adap2pinfo(adap, i); + + while ((adap->params.portvec & (1 << j)) == 0) + j++; + + c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ | + V_FW_PORT_CMD_PORTID(j)); + c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION( + FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(c)); + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); + if (ret < 0) + return ret; + + p->viid = ret; + p->tx_chan = j; + p->rss_size = rss_size; + t4_os_set_hw_addr(adap, i, addr); + + ret = be32_to_cpu(c.u.info.lstatus_to_modtype); + p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ? + G_FW_PORT_CMD_MDIOADDR(ret) : -1; + p->port_type = G_FW_PORT_CMD_PTYPE(ret); + p->mod_type = FW_PORT_MOD_TYPE_NA; + + init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap)); + j++; + } + return 0; +} diff --git a/drivers/net/cxgbe/base/t4_hw.h b/drivers/net/cxgbe/base/t4_hw.h new file mode 100644 index 00000000..bf623cf4 --- /dev/null +++ b/drivers/net/cxgbe/base/t4_hw.h @@ -0,0 +1,149 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __T4_HW_H +#define __T4_HW_H + +enum { + NCHAN = 4, /* # of HW channels */ + NMTUS = 16, /* size of MTU table */ + NCCTRL_WIN = 32, /* # of congestion control windows */ + MBOX_LEN = 64, /* mailbox size in bytes */ + UDBS_SEG_SIZE = 128, /* segment size for BAR2 user doorbells */ +}; + +enum { + CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */ +}; + +enum { + SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ +}; + +enum { + SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ + SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ +}; + +/* PCI-e memory window access */ +enum pcie_memwin { + MEMWIN_NIC = 0, +}; + +enum { + SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ + SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */ + /* max no. of desc allowed in WR */ + SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / SGE_EQ_IDXSIZE, +}; + +struct sge_qstat { /* data written to SGE queue status entries */ + __be32 qid; + __be16 cidx; + __be16 pidx; +}; + +/* + * Structure for last 128 bits of response descriptors + */ +struct rsp_ctrl { + __be32 hdrbuflen_pidx; + __be32 pldbuflen_qid; + union { + u8 type_gen; + __be64 last_flit; + } u; +}; + +#define S_RSPD_NEWBUF 31 +#define V_RSPD_NEWBUF(x) ((x) << S_RSPD_NEWBUF) +#define F_RSPD_NEWBUF V_RSPD_NEWBUF(1U) + +#define S_RSPD_LEN 0 +#define M_RSPD_LEN 0x7fffffff +#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN) +#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN) + +#define S_RSPD_GEN 7 +#define V_RSPD_GEN(x) ((x) << S_RSPD_GEN) +#define F_RSPD_GEN V_RSPD_GEN(1U) + +#define S_RSPD_TYPE 4 +#define M_RSPD_TYPE 0x3 +#define V_RSPD_TYPE(x) ((x) << S_RSPD_TYPE) +#define G_RSPD_TYPE(x) (((x) >> S_RSPD_TYPE) & M_RSPD_TYPE) + +/* Rx queue interrupt deferral field: timer index */ +#define S_QINTR_CNT_EN 0 +#define V_QINTR_CNT_EN(x) ((x) << S_QINTR_CNT_EN) +#define F_QINTR_CNT_EN V_QINTR_CNT_EN(1U) + +#define S_QINTR_TIMER_IDX 1 +#define M_QINTR_TIMER_IDX 0x7 +#define V_QINTR_TIMER_IDX(x) ((x) << S_QINTR_TIMER_IDX) +#define G_QINTR_TIMER_IDX(x) (((x) >> S_QINTR_TIMER_IDX) & M_QINTR_TIMER_IDX) + +/* + * Flash layout. + */ +#define FLASH_START(start) ((start) * SF_SEC_SIZE) +#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE) + +enum { + /* + * Location of firmware image in FLASH. + */ + FLASH_FW_START_SEC = 8, + FLASH_FW_NSECS = 16, + FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), + FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), + + /* + * Location of Firmware Configuration File in FLASH. + */ + FLASH_CFG_START_SEC = 31, + FLASH_CFG_NSECS = 1, + FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC), + FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS), + + /* + * We don't support FLASH devices which can't support the full + * standard set of sections which we need for normal operations. + */ + FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE, +}; + +#undef FLASH_START +#undef FLASH_MAX_SIZE + +#endif /* __T4_HW_H */ diff --git a/drivers/net/cxgbe/base/t4_msg.h b/drivers/net/cxgbe/base/t4_msg.h new file mode 100644 index 00000000..4b04cd0d --- /dev/null +++ b/drivers/net/cxgbe/base/t4_msg.h @@ -0,0 +1,345 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef T4_MSG_H +#define T4_MSG_H + +enum { + CPL_SGE_EGR_UPDATE = 0xA5, + CPL_FW4_MSG = 0xC0, + CPL_FW6_MSG = 0xE0, + CPL_TX_PKT_LSO = 0xED, + CPL_TX_PKT_XT = 0xEE, +}; + +enum { /* TX_PKT_XT checksum types */ + TX_CSUM_TCPIP = 8, + TX_CSUM_UDPIP = 9, + TX_CSUM_TCPIP6 = 10, +}; + +union opcode_tid { + __be32 opcode_tid; + __u8 opcode; +}; + +struct rss_header { + __u8 opcode; +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + __u8 channel:2; + __u8 filter_hit:1; + __u8 filter_tid:1; + __u8 hash_type:2; + __u8 ipv6:1; + __u8 send2fw:1; +#else + __u8 send2fw:1; + __u8 ipv6:1; + __u8 hash_type:2; + __u8 filter_tid:1; + __u8 filter_hit:1; + __u8 channel:2; +#endif + __be16 qid; + __be32 hash_val; +}; + +#if defined(RSS_HDR_VLD) || defined(CHELSIO_FW) +#define RSS_HDR struct rss_header rss_hdr +#else +#define RSS_HDR +#endif + +#ifndef CHELSIO_FW +struct work_request_hdr { + __be32 wr_hi; + __be32 wr_mid; + __be64 wr_lo; +}; + +#define WR_HDR struct work_request_hdr wr +#define WR_HDR_SIZE sizeof(struct work_request_hdr) +#else +#define WR_HDR +#define WR_HDR_SIZE 0 +#endif + +struct cpl_tx_data { + union opcode_tid ot; + __be32 len; + __be32 rsvd; + __be32 flags; +}; + +struct cpl_tx_pkt_core { + __be32 ctrl0; + __be16 pack; + __be16 len; + __be64 ctrl1; +}; + +struct cpl_tx_pkt { + WR_HDR; + struct cpl_tx_pkt_core c; +}; + +/* cpl_tx_pkt_core.ctrl0 fields */ +#define S_TXPKT_PF 8 +#define M_TXPKT_PF 0x7 +#define V_TXPKT_PF(x) ((x) << S_TXPKT_PF) +#define G_TXPKT_PF(x) (((x) >> S_TXPKT_PF) & M_TXPKT_PF) + +#define S_TXPKT_INTF 16 +#define M_TXPKT_INTF 0xF +#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF) +#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF) + +#define S_TXPKT_OPCODE 24 +#define M_TXPKT_OPCODE 0xFF +#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE) +#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE) + +/* cpl_tx_pkt_core.ctrl1 fields */ +#define S_TXPKT_IPHDR_LEN 20 +#define M_TXPKT_IPHDR_LEN 0x3FFF +#define V_TXPKT_IPHDR_LEN(x) ((__u64)(x) << S_TXPKT_IPHDR_LEN) +#define G_TXPKT_IPHDR_LEN(x) (((x) >> S_TXPKT_IPHDR_LEN) & M_TXPKT_IPHDR_LEN) + +#define S_TXPKT_ETHHDR_LEN 34 +#define M_TXPKT_ETHHDR_LEN 0x3F +#define V_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_TXPKT_ETHHDR_LEN) +#define G_TXPKT_ETHHDR_LEN(x) (((x) >> S_TXPKT_ETHHDR_LEN) & M_TXPKT_ETHHDR_LEN) + +#define S_T6_TXPKT_ETHHDR_LEN 32 +#define M_T6_TXPKT_ETHHDR_LEN 0xFF +#define V_T6_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_T6_TXPKT_ETHHDR_LEN) +#define G_T6_TXPKT_ETHHDR_LEN(x) \ + (((x) >> S_T6_TXPKT_ETHHDR_LEN) & M_T6_TXPKT_ETHHDR_LEN) + +#define S_TXPKT_CSUM_TYPE 40 +#define M_TXPKT_CSUM_TYPE 0xF +#define V_TXPKT_CSUM_TYPE(x) ((__u64)(x) << S_TXPKT_CSUM_TYPE) +#define G_TXPKT_CSUM_TYPE(x) (((x) >> S_TXPKT_CSUM_TYPE) & M_TXPKT_CSUM_TYPE) + +#define S_TXPKT_VLAN 44 +#define M_TXPKT_VLAN 0xFFFF +#define V_TXPKT_VLAN(x) ((__u64)(x) << S_TXPKT_VLAN) +#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN) + +#define S_TXPKT_VLAN_VLD 60 +#define V_TXPKT_VLAN_VLD(x) ((__u64)(x) << S_TXPKT_VLAN_VLD) +#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1ULL) + +#define S_TXPKT_IPCSUM_DIS 62 +#define V_TXPKT_IPCSUM_DIS(x) ((__u64)(x) << S_TXPKT_IPCSUM_DIS) +#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1ULL) + +#define S_TXPKT_L4CSUM_DIS 63 +#define V_TXPKT_L4CSUM_DIS(x) ((__u64)(x) << S_TXPKT_L4CSUM_DIS) +#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1ULL) + +struct cpl_tx_pkt_lso_core { + __be32 lso_ctrl; + __be16 ipid_ofst; + __be16 mss; + __be32 seqno_offset; + __be32 len; + /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ +}; + +struct cpl_tx_pkt_lso { + WR_HDR; + struct cpl_tx_pkt_lso_core c; + /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ +}; + +/* cpl_tx_pkt_lso_core.lso_ctrl fields */ +#define S_LSO_TCPHDR_LEN 0 +#define M_LSO_TCPHDR_LEN 0xF +#define V_LSO_TCPHDR_LEN(x) ((x) << S_LSO_TCPHDR_LEN) +#define G_LSO_TCPHDR_LEN(x) (((x) >> S_LSO_TCPHDR_LEN) & M_LSO_TCPHDR_LEN) + +#define S_LSO_IPHDR_LEN 4 +#define M_LSO_IPHDR_LEN 0xFFF +#define V_LSO_IPHDR_LEN(x) ((x) << S_LSO_IPHDR_LEN) +#define G_LSO_IPHDR_LEN(x) (((x) >> S_LSO_IPHDR_LEN) & M_LSO_IPHDR_LEN) + +#define S_LSO_ETHHDR_LEN 16 +#define M_LSO_ETHHDR_LEN 0xF +#define V_LSO_ETHHDR_LEN(x) ((x) << S_LSO_ETHHDR_LEN) +#define G_LSO_ETHHDR_LEN(x) (((x) >> S_LSO_ETHHDR_LEN) & M_LSO_ETHHDR_LEN) + +#define S_LSO_IPV6 20 +#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6) +#define F_LSO_IPV6 V_LSO_IPV6(1U) + +#define S_LSO_LAST_SLICE 22 +#define V_LSO_LAST_SLICE(x) ((x) << S_LSO_LAST_SLICE) +#define F_LSO_LAST_SLICE V_LSO_LAST_SLICE(1U) + +#define S_LSO_FIRST_SLICE 23 +#define V_LSO_FIRST_SLICE(x) ((x) << S_LSO_FIRST_SLICE) +#define F_LSO_FIRST_SLICE V_LSO_FIRST_SLICE(1U) + +#define S_LSO_OPCODE 24 +#define M_LSO_OPCODE 0xFF +#define V_LSO_OPCODE(x) ((x) << S_LSO_OPCODE) +#define G_LSO_OPCODE(x) (((x) >> S_LSO_OPCODE) & M_LSO_OPCODE) + +#define S_LSO_T5_XFER_SIZE 0 +#define M_LSO_T5_XFER_SIZE 0xFFFFFFF +#define V_LSO_T5_XFER_SIZE(x) ((x) << S_LSO_T5_XFER_SIZE) +#define G_LSO_T5_XFER_SIZE(x) (((x) >> S_LSO_T5_XFER_SIZE) & M_LSO_T5_XFER_SIZE) + +struct cpl_rx_pkt { + RSS_HDR; + __u8 opcode; +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + __u8 iff:4; + __u8 csum_calc:1; + __u8 ipmi_pkt:1; + __u8 vlan_ex:1; + __u8 ip_frag:1; +#else + __u8 ip_frag:1; + __u8 vlan_ex:1; + __u8 ipmi_pkt:1; + __u8 csum_calc:1; + __u8 iff:4; +#endif + __be16 csum; + __be16 vlan; + __be16 len; + __be32 l2info; + __be16 hdr_len; + __be16 err_vec; +}; + +/* rx_pkt.l2info fields */ +#define S_RXF_UDP 22 +#define V_RXF_UDP(x) ((x) << S_RXF_UDP) +#define F_RXF_UDP V_RXF_UDP(1U) + +#define S_RXF_TCP 23 +#define V_RXF_TCP(x) ((x) << S_RXF_TCP) +#define F_RXF_TCP V_RXF_TCP(1U) + +#define S_RXF_IP 24 +#define V_RXF_IP(x) ((x) << S_RXF_IP) +#define F_RXF_IP V_RXF_IP(1U) + +#define S_RXF_IP6 25 +#define V_RXF_IP6(x) ((x) << S_RXF_IP6) +#define F_RXF_IP6 V_RXF_IP6(1U) + +/* cpl_fw*.type values */ +enum { + FW_TYPE_RSSCPL = 4, +}; + +struct cpl_fw4_msg { + RSS_HDR; + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[2]; +}; + +struct cpl_fw6_msg { + RSS_HDR; + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[4]; +}; + +enum { + ULP_TX_SC_IMM = 0x81, + ULP_TX_SC_DSGL = 0x82, + ULP_TX_SC_ISGL = 0x83 +}; + +#define S_ULPTX_CMD 24 +#define M_ULPTX_CMD 0xFF +#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD) + +#define S_ULP_TX_SC_MORE 23 +#define V_ULP_TX_SC_MORE(x) ((x) << S_ULP_TX_SC_MORE) +#define F_ULP_TX_SC_MORE V_ULP_TX_SC_MORE(1U) + +struct ulptx_sge_pair { + __be32 len[2]; + __be64 addr[2]; +}; + +struct ulptx_sgl { + __be32 cmd_nsge; + __be32 len0; + __be64 addr0; + +#if !(defined C99_NOT_SUPPORTED) + struct ulptx_sge_pair sge[0]; +#endif + +}; + +struct ulptx_idata { + __be32 cmd_more; + __be32 len; +}; + +#define S_ULPTX_NSGE 0 +#define M_ULPTX_NSGE 0xFFFF +#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE) + +struct ulp_txpkt { + __be32 cmd_dest; + __be32 len; +}; + +/* ulp_txpkt.cmd_dest fields */ +#define S_ULP_TXPKT_DEST 16 +#define M_ULP_TXPKT_DEST 0x3 +#define V_ULP_TXPKT_DEST(x) ((x) << S_ULP_TXPKT_DEST) + +#define S_ULP_TXPKT_FID 4 +#define M_ULP_TXPKT_FID 0x7ff +#define V_ULP_TXPKT_FID(x) ((x) << S_ULP_TXPKT_FID) + +#define S_ULP_TXPKT_RO 3 +#define V_ULP_TXPKT_RO(x) ((x) << S_ULP_TXPKT_RO) +#define F_ULP_TXPKT_RO V_ULP_TXPKT_RO(1U) + +#endif /* T4_MSG_H */ diff --git a/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/drivers/net/cxgbe/base/t4_pci_id_tbl.h new file mode 100644 index 00000000..110fadb0 --- /dev/null +++ b/drivers/net/cxgbe/base/t4_pci_id_tbl.h @@ -0,0 +1,151 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __T4_PCI_ID_TBL_H__ +#define __T4_PCI_ID_TBL_H__ + +/* + * The Os-Dependent code can defined cpp macros for creating a PCI Device ID + * Table. This is useful because it allows the PCI ID Table to be maintained + * in a single place and all supporting OSes to get new PCI Device IDs + * automatically. + * + * The macros are: + * + * CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + * -- Used to start the definition of the PCI ID Table. + * + * CH_PCI_DEVICE_ID_FUNCTION + * -- The PCI Function Number to use in the PCI Device ID Table. "0" + * -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4, + * -- "8" for drivers attaching to SR-IOV Virtual Functions, etc. + * + * CH_PCI_DEVICE_ID_FUNCTION2 [optional] + * -- If defined, create a PCI Device ID Table with both + * -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated. + * + * CH_PCI_ID_TABLE_ENTRY(DeviceID) + * -- Used for the individual PCI Device ID entries. Note that we will + * -- be adding a trailing comma (",") after all of the entries (and + * -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined). + * + * CH_PCI_DEVICE_ID_TABLE_DEFINE_END + * -- Used to finish the definition of the PCI ID Table. Note that we + * -- will be adding a trailing semi-colon (";") here. + * + * CH_PCI_DEVICE_ID_BYPASS_SUPPORTED [optional] + * -- If defined, indicates that the OS Driver has support for Bypass + * -- Adapters. + */ +#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + +/* + * Some sanity checks ... + */ +#ifndef CH_PCI_DEVICE_ID_FUNCTION +#error CH_PCI_DEVICE_ID_FUNCTION not defined! +#endif +#ifndef CH_PCI_ID_TABLE_ENTRY +#error CH_PCI_ID_TABLE_ENTRY not defined! +#endif +#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END +#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined! +#endif + +/* + * T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where: + * + * V = "4" for T4; "5" for T5, etc. + * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs + * PP = adapter product designation + * + * We use this consistency in order to create the proper PCI Device IDs + * for the specified CH_PCI_DEVICE_ID_FUNCTION. + */ +#ifndef CH_PCI_DEVICE_ID_FUNCTION2 +#define CH_PCI_ID_TABLE_FENTRY(devid) \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION) << 8)) +#else +#define CH_PCI_ID_TABLE_FENTRY(devid) \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION2) << 8)) +#endif + +CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + /* + * T5 adapters: + */ + CH_PCI_ID_TABLE_FENTRY(0x5000), /* T580-dbg */ + CH_PCI_ID_TABLE_FENTRY(0x5001), /* T520-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5002), /* T522-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5003), /* T540-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5004), /* T520-bch */ + CH_PCI_ID_TABLE_FENTRY(0x5005), /* T540-bch */ + CH_PCI_ID_TABLE_FENTRY(0x5006), /* T540-ch */ + CH_PCI_ID_TABLE_FENTRY(0x5007), /* T520-so */ + CH_PCI_ID_TABLE_FENTRY(0x5008), /* T520-cx */ + CH_PCI_ID_TABLE_FENTRY(0x5009), /* T520-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500a), /* T504-bt */ +#ifdef CH_PCI_DEVICE_ID_BYPASS_SUPPORTED + CH_PCI_ID_TABLE_FENTRY(0x500b), /* B520-sr */ + CH_PCI_ID_TABLE_FENTRY(0x500c), /* B504-bt */ +#endif + CH_PCI_ID_TABLE_FENTRY(0x500d), /* T580-cr */ + CH_PCI_ID_TABLE_FENTRY(0x500e), /* T540-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5010), /* T580-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5011), /* T520-LL-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5012), /* T560-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */ + CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */ + CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ + CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5083), /* Custom T540-LP-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5084), /* Custom T580-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5085), /* Custom 3x T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */ +CH_PCI_DEVICE_ID_TABLE_DEFINE_END; + +#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */ + +#endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h new file mode 100644 index 00000000..9057e409 --- /dev/null +++ b/drivers/net/cxgbe/base/t4_regs.h @@ -0,0 +1,795 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define MYPF_BASE 0x1b000 +#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr)) + +#define PF0_BASE 0x1e000 +#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr)) + +#define PF_STRIDE 0x400 +#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE) +#define PF_REG(idx, reg) (PF_BASE(idx) + (reg)) + +#define MYPORT_BASE 0x1c000 +#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr)) + +#define PORT0_BASE 0x20000 +#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr)) + +#define PORT_STRIDE 0x2000 +#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE) +#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg)) + +#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) +#define NUM_PCIE_MEM_ACCESS_INSTANCES 8 + +#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) +#define NUM_PCIE_FW_INSTANCES 8 + +#define T5_MYPORT_BASE 0x2c000 +#define T5_MYPORT_REG(reg_addr) (T5_MYPORT_BASE + (reg_addr)) + +#define T5_PORT0_BASE 0x30000 +#define T5_PORT0_REG(reg_addr) (T5_PORT0_BASE + (reg_addr)) + +#define T5_PORT_STRIDE 0x4000 +#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE) +#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg)) + +#define MPS_T5_CLS_SRAM_L(idx) (A_MPS_T5_CLS_SRAM_L + (idx) * 8) +#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 + +#define MPS_T5_CLS_SRAM_H(idx) (A_MPS_T5_CLS_SRAM_H + (idx) * 8) +#define NUM_MPS_T5_CLS_SRAM_H_INSTANCES 512 + +/* registers for module SGE */ +#define SGE_BASE_ADDR 0x1000 + +#define A_SGE_PF_KDOORBELL 0x0 + +#define S_QID 15 +#define M_QID 0x1ffffU +#define V_QID(x) ((x) << S_QID) +#define G_QID(x) (((x) >> S_QID) & M_QID) + +#define S_DBPRIO 14 +#define V_DBPRIO(x) ((x) << S_DBPRIO) +#define F_DBPRIO V_DBPRIO(1U) + +#define S_PIDX 0 +#define M_PIDX 0x3fffU +#define V_PIDX(x) ((x) << S_PIDX) +#define G_PIDX(x) (((x) >> S_PIDX) & M_PIDX) + +#define S_DBTYPE 13 +#define V_DBTYPE(x) ((x) << S_DBTYPE) +#define F_DBTYPE V_DBTYPE(1U) + +#define S_PIDX_T5 0 +#define M_PIDX_T5 0x1fffU +#define V_PIDX_T5(x) ((x) << S_PIDX_T5) +#define G_PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5) + +#define A_SGE_PF_GTS 0x4 + +#define S_INGRESSQID 16 +#define M_INGRESSQID 0xffffU +#define V_INGRESSQID(x) ((x) << S_INGRESSQID) +#define G_INGRESSQID(x) (((x) >> S_INGRESSQID) & M_INGRESSQID) + +#define S_SEINTARM 12 +#define V_SEINTARM(x) ((x) << S_SEINTARM) +#define F_SEINTARM V_SEINTARM(1U) + +#define S_CIDXINC 0 +#define M_CIDXINC 0xfffU +#define V_CIDXINC(x) ((x) << S_CIDXINC) +#define G_CIDXINC(x) (((x) >> S_CIDXINC) & M_CIDXINC) + +#define A_SGE_CONTROL 0x1008 + +#define S_RXPKTCPLMODE 18 +#define V_RXPKTCPLMODE(x) ((x) << S_RXPKTCPLMODE) +#define F_RXPKTCPLMODE V_RXPKTCPLMODE(1U) + +#define S_EGRSTATUSPAGESIZE 17 +#define V_EGRSTATUSPAGESIZE(x) ((x) << S_EGRSTATUSPAGESIZE) +#define F_EGRSTATUSPAGESIZE V_EGRSTATUSPAGESIZE(1U) + +#define S_PKTSHIFT 10 +#define M_PKTSHIFT 0x7U +#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT) +#define G_PKTSHIFT(x) (((x) >> S_PKTSHIFT) & M_PKTSHIFT) + +#define S_INGPADBOUNDARY 4 +#define M_INGPADBOUNDARY 0x7U +#define V_INGPADBOUNDARY(x) ((x) << S_INGPADBOUNDARY) +#define G_INGPADBOUNDARY(x) (((x) >> S_INGPADBOUNDARY) & M_INGPADBOUNDARY) + +#define A_SGE_HOST_PAGE_SIZE 0x100c + +#define S_HOSTPAGESIZEPF7 28 +#define M_HOSTPAGESIZEPF7 0xfU +#define V_HOSTPAGESIZEPF7(x) ((x) << S_HOSTPAGESIZEPF7) +#define G_HOSTPAGESIZEPF7(x) (((x) >> S_HOSTPAGESIZEPF7) & M_HOSTPAGESIZEPF7) + +#define S_HOSTPAGESIZEPF6 24 +#define M_HOSTPAGESIZEPF6 0xfU +#define V_HOSTPAGESIZEPF6(x) ((x) << S_HOSTPAGESIZEPF6) +#define G_HOSTPAGESIZEPF6(x) (((x) >> S_HOSTPAGESIZEPF6) & M_HOSTPAGESIZEPF6) + +#define S_HOSTPAGESIZEPF5 20 +#define M_HOSTPAGESIZEPF5 0xfU +#define V_HOSTPAGESIZEPF5(x) ((x) << S_HOSTPAGESIZEPF5) +#define G_HOSTPAGESIZEPF5(x) (((x) >> S_HOSTPAGESIZEPF5) & M_HOSTPAGESIZEPF5) + +#define S_HOSTPAGESIZEPF4 16 +#define M_HOSTPAGESIZEPF4 0xfU +#define V_HOSTPAGESIZEPF4(x) ((x) << S_HOSTPAGESIZEPF4) +#define G_HOSTPAGESIZEPF4(x) (((x) >> S_HOSTPAGESIZEPF4) & M_HOSTPAGESIZEPF4) + +#define S_HOSTPAGESIZEPF3 12 +#define M_HOSTPAGESIZEPF3 0xfU +#define V_HOSTPAGESIZEPF3(x) ((x) << S_HOSTPAGESIZEPF3) +#define G_HOSTPAGESIZEPF3(x) (((x) >> S_HOSTPAGESIZEPF3) & M_HOSTPAGESIZEPF3) + +#define S_HOSTPAGESIZEPF2 8 +#define M_HOSTPAGESIZEPF2 0xfU +#define V_HOSTPAGESIZEPF2(x) ((x) << S_HOSTPAGESIZEPF2) +#define G_HOSTPAGESIZEPF2(x) (((x) >> S_HOSTPAGESIZEPF2) & M_HOSTPAGESIZEPF2) + +#define S_HOSTPAGESIZEPF1 4 +#define M_HOSTPAGESIZEPF1 0xfU +#define V_HOSTPAGESIZEPF1(x) ((x) << S_HOSTPAGESIZEPF1) +#define G_HOSTPAGESIZEPF1(x) (((x) >> S_HOSTPAGESIZEPF1) & M_HOSTPAGESIZEPF1) + +#define S_HOSTPAGESIZEPF0 0 +#define M_HOSTPAGESIZEPF0 0xfU +#define V_HOSTPAGESIZEPF0(x) ((x) << S_HOSTPAGESIZEPF0) +#define G_HOSTPAGESIZEPF0(x) (((x) >> S_HOSTPAGESIZEPF0) & M_HOSTPAGESIZEPF0) + +#define A_SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 + +#define S_QUEUESPERPAGEPF1 4 +#define M_QUEUESPERPAGEPF1 0xfU +#define V_QUEUESPERPAGEPF1(x) ((x) << S_QUEUESPERPAGEPF1) +#define G_QUEUESPERPAGEPF1(x) (((x) >> S_QUEUESPERPAGEPF1) & M_QUEUESPERPAGEPF1) + +#define S_QUEUESPERPAGEPF0 0 +#define M_QUEUESPERPAGEPF0 0xfU +#define V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0) +#define G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0) + +#define S_ERR_CPL_EXCEED_IQE_SIZE 22 +#define V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE) +#define F_ERR_CPL_EXCEED_IQE_SIZE V_ERR_CPL_EXCEED_IQE_SIZE(1U) + +#define S_ERR_INVALID_CIDX_INC 21 +#define V_ERR_INVALID_CIDX_INC(x) ((x) << S_ERR_INVALID_CIDX_INC) +#define F_ERR_INVALID_CIDX_INC V_ERR_INVALID_CIDX_INC(1U) + +#define S_ERR_CPL_OPCODE_0 19 +#define V_ERR_CPL_OPCODE_0(x) ((x) << S_ERR_CPL_OPCODE_0) +#define F_ERR_CPL_OPCODE_0 V_ERR_CPL_OPCODE_0(1U) + +#define S_ERR_DROPPED_DB 18 +#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB) +#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U) + +#define S_ERR_DATA_CPL_ON_HIGH_QID1 17 +#define V_ERR_DATA_CPL_ON_HIGH_QID1(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID1) +#define F_ERR_DATA_CPL_ON_HIGH_QID1 V_ERR_DATA_CPL_ON_HIGH_QID1(1U) + +#define S_ERR_DATA_CPL_ON_HIGH_QID0 16 +#define V_ERR_DATA_CPL_ON_HIGH_QID0(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID0) +#define F_ERR_DATA_CPL_ON_HIGH_QID0 V_ERR_DATA_CPL_ON_HIGH_QID0(1U) + +#define S_ERR_BAD_DB_PIDX3 15 +#define V_ERR_BAD_DB_PIDX3(x) ((x) << S_ERR_BAD_DB_PIDX3) +#define F_ERR_BAD_DB_PIDX3 V_ERR_BAD_DB_PIDX3(1U) + +#define S_ERR_BAD_DB_PIDX2 14 +#define V_ERR_BAD_DB_PIDX2(x) ((x) << S_ERR_BAD_DB_PIDX2) +#define F_ERR_BAD_DB_PIDX2 V_ERR_BAD_DB_PIDX2(1U) + +#define S_ERR_BAD_DB_PIDX1 13 +#define V_ERR_BAD_DB_PIDX1(x) ((x) << S_ERR_BAD_DB_PIDX1) +#define F_ERR_BAD_DB_PIDX1 V_ERR_BAD_DB_PIDX1(1U) + +#define S_ERR_BAD_DB_PIDX0 12 +#define V_ERR_BAD_DB_PIDX0(x) ((x) << S_ERR_BAD_DB_PIDX0) +#define F_ERR_BAD_DB_PIDX0 V_ERR_BAD_DB_PIDX0(1U) + +#define S_ERR_ING_PCIE_CHAN 11 +#define V_ERR_ING_PCIE_CHAN(x) ((x) << S_ERR_ING_PCIE_CHAN) +#define F_ERR_ING_PCIE_CHAN V_ERR_ING_PCIE_CHAN(1U) + +#define S_ERR_ING_CTXT_PRIO 10 +#define V_ERR_ING_CTXT_PRIO(x) ((x) << S_ERR_ING_CTXT_PRIO) +#define F_ERR_ING_CTXT_PRIO V_ERR_ING_CTXT_PRIO(1U) + +#define S_ERR_EGR_CTXT_PRIO 9 +#define V_ERR_EGR_CTXT_PRIO(x) ((x) << S_ERR_EGR_CTXT_PRIO) +#define F_ERR_EGR_CTXT_PRIO V_ERR_EGR_CTXT_PRIO(1U) + +#define S_DBFIFO_HP_INT 8 +#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT) +#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U) + +#define S_DBFIFO_LP_INT 7 +#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT) +#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U) + +#define S_INGRESS_SIZE_ERR 5 +#define V_INGRESS_SIZE_ERR(x) ((x) << S_INGRESS_SIZE_ERR) +#define F_INGRESS_SIZE_ERR V_INGRESS_SIZE_ERR(1U) + +#define S_EGRESS_SIZE_ERR 4 +#define V_EGRESS_SIZE_ERR(x) ((x) << S_EGRESS_SIZE_ERR) +#define F_EGRESS_SIZE_ERR V_EGRESS_SIZE_ERR(1U) + +#define A_SGE_INT_ENABLE3 0x1040 + +#define A_SGE_FL_BUFFER_SIZE0 0x1044 +#define A_SGE_FL_BUFFER_SIZE1 0x1048 +#define A_SGE_FL_BUFFER_SIZE2 0x104c +#define A_SGE_FL_BUFFER_SIZE3 0x1050 + +#define A_SGE_FLM_CFG 0x1090 + +#define S_CREDITCNT 4 +#define M_CREDITCNT 0x3U +#define V_CREDITCNT(x) ((x) << S_CREDITCNT) +#define G_CREDITCNT(x) (((x) >> S_CREDITCNT) & M_CREDITCNT) + +#define S_CREDITCNTPACKING 2 +#define M_CREDITCNTPACKING 0x3U +#define V_CREDITCNTPACKING(x) ((x) << S_CREDITCNTPACKING) +#define G_CREDITCNTPACKING(x) (((x) >> S_CREDITCNTPACKING) & M_CREDITCNTPACKING) + +#define A_SGE_CONM_CTRL 0x1094 + +#define S_EGRTHRESHOLD 8 +#define M_EGRTHRESHOLD 0x3fU +#define V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD) +#define G_EGRTHRESHOLD(x) (((x) >> S_EGRTHRESHOLD) & M_EGRTHRESHOLD) + +#define S_EGRTHRESHOLDPACKING 14 +#define M_EGRTHRESHOLDPACKING 0x3fU +#define V_EGRTHRESHOLDPACKING(x) ((x) << S_EGRTHRESHOLDPACKING) +#define G_EGRTHRESHOLDPACKING(x) (((x) >> S_EGRTHRESHOLDPACKING) & \ + M_EGRTHRESHOLDPACKING) + +#define S_INGTHRESHOLD 2 +#define M_INGTHRESHOLD 0x3fU +#define V_INGTHRESHOLD(x) ((x) << S_INGTHRESHOLD) +#define G_INGTHRESHOLD(x) (((x) >> S_INGTHRESHOLD) & M_INGTHRESHOLD) + +#define A_SGE_INGRESS_RX_THRESHOLD 0x10a0 + +#define S_THRESHOLD_0 24 +#define M_THRESHOLD_0 0x3fU +#define V_THRESHOLD_0(x) ((x) << S_THRESHOLD_0) +#define G_THRESHOLD_0(x) (((x) >> S_THRESHOLD_0) & M_THRESHOLD_0) + +#define S_THRESHOLD_1 16 +#define M_THRESHOLD_1 0x3fU +#define V_THRESHOLD_1(x) ((x) << S_THRESHOLD_1) +#define G_THRESHOLD_1(x) (((x) >> S_THRESHOLD_1) & M_THRESHOLD_1) + +#define S_THRESHOLD_2 8 +#define M_THRESHOLD_2 0x3fU +#define V_THRESHOLD_2(x) ((x) << S_THRESHOLD_2) +#define G_THRESHOLD_2(x) (((x) >> S_THRESHOLD_2) & M_THRESHOLD_2) + +#define S_THRESHOLD_3 0 +#define M_THRESHOLD_3 0x3fU +#define V_THRESHOLD_3(x) ((x) << S_THRESHOLD_3) +#define G_THRESHOLD_3(x) (((x) >> S_THRESHOLD_3) & M_THRESHOLD_3) + +#define A_SGE_TIMER_VALUE_0_AND_1 0x10b8 + +#define S_TIMERVALUE0 16 +#define M_TIMERVALUE0 0xffffU +#define V_TIMERVALUE0(x) ((x) << S_TIMERVALUE0) +#define G_TIMERVALUE0(x) (((x) >> S_TIMERVALUE0) & M_TIMERVALUE0) + +#define S_TIMERVALUE1 0 +#define M_TIMERVALUE1 0xffffU +#define V_TIMERVALUE1(x) ((x) << S_TIMERVALUE1) +#define G_TIMERVALUE1(x) (((x) >> S_TIMERVALUE1) & M_TIMERVALUE1) + +#define A_SGE_TIMER_VALUE_2_AND_3 0x10bc + +#define S_TIMERVALUE2 16 +#define M_TIMERVALUE2 0xffffU +#define V_TIMERVALUE2(x) ((x) << S_TIMERVALUE2) +#define G_TIMERVALUE2(x) (((x) >> S_TIMERVALUE2) & M_TIMERVALUE2) + +#define S_TIMERVALUE3 0 +#define M_TIMERVALUE3 0xffffU +#define V_TIMERVALUE3(x) ((x) << S_TIMERVALUE3) +#define G_TIMERVALUE3(x) (((x) >> S_TIMERVALUE3) & M_TIMERVALUE3) + +#define A_SGE_TIMER_VALUE_4_AND_5 0x10c0 + +#define S_TIMERVALUE4 16 +#define M_TIMERVALUE4 0xffffU +#define V_TIMERVALUE4(x) ((x) << S_TIMERVALUE4) +#define G_TIMERVALUE4(x) (((x) >> S_TIMERVALUE4) & M_TIMERVALUE4) + +#define S_TIMERVALUE5 0 +#define M_TIMERVALUE5 0xffffU +#define V_TIMERVALUE5(x) ((x) << S_TIMERVALUE5) +#define G_TIMERVALUE5(x) (((x) >> S_TIMERVALUE5) & M_TIMERVALUE5) + +#define A_SGE_DEBUG_INDEX 0x10cc +#define A_SGE_DEBUG_DATA_HIGH 0x10d0 +#define A_SGE_DEBUG_DATA_LOW 0x10d4 +#define A_SGE_STAT_CFG 0x10ec + +#define S_STATMODE 2 +#define M_STATMODE 0x3U +#define V_STATMODE(x) ((x) << S_STATMODE) +#define G_STATMODE(x) (((x) >> S_STATMODE) & M_STATMODE) + +#define S_STATSOURCE_T5 9 +#define M_STATSOURCE_T5 0xfU +#define V_STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5) +#define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5) + +#define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 + +#define A_SGE_CONTROL2 0x1124 + +#define S_IDMAARBROUNDROBIN 19 +#define V_IDMAARBROUNDROBIN(x) ((x) << S_IDMAARBROUNDROBIN) +#define F_IDMAARBROUNDROBIN V_IDMAARBROUNDROBIN(1U) + +#define S_INGPACKBOUNDARY 16 +#define M_INGPACKBOUNDARY 0x7U +#define V_INGPACKBOUNDARY(x) ((x) << S_INGPACKBOUNDARY) +#define G_INGPACKBOUNDARY(x) (((x) >> S_INGPACKBOUNDARY) & M_INGPACKBOUNDARY) + +#define S_BUSY 31 +#define V_BUSY(x) ((x) << S_BUSY) +#define F_BUSY V_BUSY(1U) + +#define A_SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8 +#define A_SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8 +#define A_SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc + +/* registers for module PCIE */ +#define PCIE_BASE_ADDR 0x3000 + +#define A_PCIE_MEM_ACCESS_BASE_WIN 0x3068 + +#define S_PCIEOFST 10 +#define M_PCIEOFST 0x3fffffU +#define V_PCIEOFST(x) ((x) << S_PCIEOFST) +#define G_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST) + +#define S_BIR 8 +#define M_BIR 0x3U +#define V_BIR(x) ((x) << S_BIR) +#define G_BIR(x) (((x) >> S_BIR) & M_BIR) + +#define S_WINDOW 0 +#define M_WINDOW 0xffU +#define V_WINDOW(x) ((x) << S_WINDOW) +#define G_WINDOW(x) (((x) >> S_WINDOW) & M_WINDOW) + +#define A_PCIE_MEM_ACCESS_OFFSET 0x306c + +#define S_PFNUM 0 +#define M_PFNUM 0x7U +#define V_PFNUM(x) ((x) << S_PFNUM) +#define G_PFNUM(x) (((x) >> S_PFNUM) & M_PFNUM) + +#define A_PCIE_FW 0x30b8 +#define A_PCIE_FW_PF 0x30bc + +/* registers for module CIM */ +#define CIM_BASE_ADDR 0x7b00 + +#define A_CIM_PF_MAILBOX_DATA 0x240 +#define A_CIM_PF_MAILBOX_CTRL 0x280 + +#define S_MBMSGVALID 3 +#define V_MBMSGVALID(x) ((x) << S_MBMSGVALID) +#define F_MBMSGVALID V_MBMSGVALID(1U) + +#define S_MBOWNER 0 +#define M_MBOWNER 0x3U +#define V_MBOWNER(x) ((x) << S_MBOWNER) +#define G_MBOWNER(x) (((x) >> S_MBOWNER) & M_MBOWNER) + +#define A_CIM_PF_MAILBOX_CTRL_SHADOW_COPY 0x290 +#define A_CIM_BOOT_CFG 0x7b00 + +#define S_UPCRST 0 +#define V_UPCRST(x) ((x) << S_UPCRST) +#define F_UPCRST V_UPCRST(1U) + +/* registers for module TP */ +#define TP_BASE_ADDR 0x7d00 + +#define A_TP_TIMER_RESOLUTION 0x7d90 + +#define S_TIMERRESOLUTION 16 +#define M_TIMERRESOLUTION 0xffU +#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION) +#define G_TIMERRESOLUTION(x) (((x) >> S_TIMERRESOLUTION) & M_TIMERRESOLUTION) + +#define S_DELAYEDACKRESOLUTION 0 +#define M_DELAYEDACKRESOLUTION 0xffU +#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION) +#define G_DELAYEDACKRESOLUTION(x) (((x) >> S_DELAYEDACKRESOLUTION) & \ + M_DELAYEDACKRESOLUTION) + +#define A_TP_CCTRL_TABLE 0x7ddc + +#define A_TP_MTU_TABLE 0x7de4 + +#define S_MTUINDEX 24 +#define M_MTUINDEX 0xffU +#define V_MTUINDEX(x) ((x) << S_MTUINDEX) +#define G_MTUINDEX(x) (((x) >> S_MTUINDEX) & M_MTUINDEX) + +#define S_MTUWIDTH 16 +#define M_MTUWIDTH 0xfU +#define V_MTUWIDTH(x) ((x) << S_MTUWIDTH) +#define G_MTUWIDTH(x) (((x) >> S_MTUWIDTH) & M_MTUWIDTH) + +#define S_MTUVALUE 0 +#define M_MTUVALUE 0x3fffU +#define V_MTUVALUE(x) ((x) << S_MTUVALUE) +#define G_MTUVALUE(x) (((x) >> S_MTUVALUE) & M_MTUVALUE) + +#define A_TP_PIO_ADDR 0x7e40 +#define A_TP_PIO_DATA 0x7e44 + +#define A_TP_VLAN_PRI_MAP 0x140 + +#define S_FRAGMENTATION 9 +#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION) +#define F_FRAGMENTATION V_FRAGMENTATION(1U) + +#define S_MPSHITTYPE 8 +#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE) +#define F_MPSHITTYPE V_MPSHITTYPE(1U) + +#define S_MACMATCH 7 +#define V_MACMATCH(x) ((x) << S_MACMATCH) +#define F_MACMATCH V_MACMATCH(1U) + +#define S_ETHERTYPE 6 +#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE) +#define F_ETHERTYPE V_ETHERTYPE(1U) + +#define S_PROTOCOL 5 +#define V_PROTOCOL(x) ((x) << S_PROTOCOL) +#define F_PROTOCOL V_PROTOCOL(1U) + +#define S_TOS 4 +#define V_TOS(x) ((x) << S_TOS) +#define F_TOS V_TOS(1U) + +#define S_VLAN 3 +#define V_VLAN(x) ((x) << S_VLAN) +#define F_VLAN V_VLAN(1U) + +#define S_VNIC_ID 2 +#define V_VNIC_ID(x) ((x) << S_VNIC_ID) +#define F_VNIC_ID V_VNIC_ID(1U) + +#define S_PORT 1 +#define V_PORT(x) ((x) << S_PORT) +#define F_PORT V_PORT(1U) + +#define S_FCOE 0 +#define V_FCOE(x) ((x) << S_FCOE) +#define F_FCOE V_FCOE(1U) + +#define A_TP_INGRESS_CONFIG 0x141 + +#define S_VNIC 11 +#define V_VNIC(x) ((x) << S_VNIC) +#define F_VNIC V_VNIC(1U) + +#define S_CSUM_HAS_PSEUDO_HDR 10 +#define V_CSUM_HAS_PSEUDO_HDR(x) ((x) << S_CSUM_HAS_PSEUDO_HDR) +#define F_CSUM_HAS_PSEUDO_HDR V_CSUM_HAS_PSEUDO_HDR(1U) + +/* registers for module MPS */ +#define MPS_BASE_ADDR 0x9000 + +#define S_REPLICATE 11 +#define V_REPLICATE(x) ((x) << S_REPLICATE) +#define F_REPLICATE V_REPLICATE(1U) + +#define S_PF 8 +#define M_PF 0x7U +#define V_PF(x) ((x) << S_PF) +#define G_PF(x) (((x) >> S_PF) & M_PF) + +#define S_VF_VALID 7 +#define V_VF_VALID(x) ((x) << S_VF_VALID) +#define F_VF_VALID V_VF_VALID(1U) + +#define S_VF 0 +#define M_VF 0x7fU +#define V_VF(x) ((x) << S_VF) +#define G_VF(x) (((x) >> S_VF) & M_VF) + +#define A_MPS_PORT_STAT_TX_PORT_BYTES_L 0x400 +#define A_MPS_PORT_STAT_TX_PORT_BYTES_H 0x404 +#define A_MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408 +#define A_MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c +#define A_MPS_PORT_STAT_TX_PORT_BCAST_L 0x410 +#define A_MPS_PORT_STAT_TX_PORT_BCAST_H 0x414 +#define A_MPS_PORT_STAT_TX_PORT_MCAST_L 0x418 +#define A_MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c +#define A_MPS_PORT_STAT_TX_PORT_UCAST_L 0x420 +#define A_MPS_PORT_STAT_TX_PORT_UCAST_H 0x424 +#define A_MPS_PORT_STAT_TX_PORT_ERROR_L 0x428 +#define A_MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c +#define A_MPS_PORT_STAT_TX_PORT_64B_L 0x430 +#define A_MPS_PORT_STAT_TX_PORT_64B_H 0x434 +#define A_MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438 +#define A_MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c +#define A_MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440 +#define A_MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444 +#define A_MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448 +#define A_MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c +#define A_MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450 +#define A_MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454 +#define A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458 +#define A_MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c +#define A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460 +#define A_MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464 +#define A_MPS_PORT_STAT_TX_PORT_DROP_L 0x468 +#define A_MPS_PORT_STAT_TX_PORT_DROP_H 0x46c +#define A_MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470 +#define A_MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474 +#define A_MPS_PORT_STAT_TX_PORT_PPP0_L 0x478 +#define A_MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c +#define A_MPS_PORT_STAT_TX_PORT_PPP1_L 0x480 +#define A_MPS_PORT_STAT_TX_PORT_PPP1_H 0x484 +#define A_MPS_PORT_STAT_TX_PORT_PPP2_L 0x488 +#define A_MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c +#define A_MPS_PORT_STAT_TX_PORT_PPP3_L 0x490 +#define A_MPS_PORT_STAT_TX_PORT_PPP3_H 0x494 +#define A_MPS_PORT_STAT_TX_PORT_PPP4_L 0x498 +#define A_MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c +#define A_MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0 +#define A_MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4 +#define A_MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8 +#define A_MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac +#define A_MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0 +#define A_MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4 +#define A_MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0 +#define A_MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4 +#define A_MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8 +#define A_MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc +#define A_MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0 +#define A_MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4 +#define A_MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8 +#define A_MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc +#define A_MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0 +#define A_MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4 +#define A_MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8 +#define A_MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec +#define A_MPS_PORT_STAT_LB_PORT_64B_L 0x4f0 +#define A_MPS_PORT_STAT_LB_PORT_64B_H 0x4f4 +#define A_MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8 +#define A_MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc +#define A_MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500 +#define A_MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504 +#define A_MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508 +#define A_MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c +#define A_MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510 +#define A_MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514 +#define A_MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518 +#define A_MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c +#define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520 +#define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524 +#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528 +#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528 +#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES_H 0x52c +#define A_MPS_PORT_STAT_RX_PORT_BYTES_L 0x540 +#define A_MPS_PORT_STAT_RX_PORT_BYTES_H 0x544 +#define A_MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548 +#define A_MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c +#define A_MPS_PORT_STAT_RX_PORT_BCAST_L 0x550 +#define A_MPS_PORT_STAT_RX_PORT_BCAST_H 0x554 +#define A_MPS_PORT_STAT_RX_PORT_MCAST_L 0x558 +#define A_MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c +#define A_MPS_PORT_STAT_RX_PORT_UCAST_L 0x560 +#define A_MPS_PORT_STAT_RX_PORT_UCAST_H 0x564 +#define A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568 +#define A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c +#define A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570 +#define A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574 +#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578 +#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c +#define A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580 +#define A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584 +#define A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588 +#define A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c +#define A_MPS_PORT_STAT_RX_PORT_64B_L 0x590 +#define A_MPS_PORT_STAT_RX_PORT_64B_H 0x594 +#define A_MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598 +#define A_MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c +#define A_MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0 +#define A_MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4 +#define A_MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8 +#define A_MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac +#define A_MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0 +#define A_MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4 +#define A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8 +#define A_MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc +#define A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0 +#define A_MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4 +#define A_MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8 +#define A_MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc +#define A_MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0 +#define A_MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4 +#define A_MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8 +#define A_MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc +#define A_MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0 +#define A_MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4 +#define A_MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8 +#define A_MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec +#define A_MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0 +#define A_MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4 +#define A_MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8 +#define A_MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc +#define A_MPS_PORT_STAT_RX_PORT_PPP6_L 0x600 +#define A_MPS_PORT_STAT_RX_PORT_PPP6_H 0x604 +#define A_MPS_PORT_STAT_RX_PORT_PPP7_L 0x608 +#define A_MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c +#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 +#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 +#define A_MPS_CMN_CTL 0x9000 + +#define S_NUMPORTS 0 +#define M_NUMPORTS 0x3U +#define V_NUMPORTS(x) ((x) << S_NUMPORTS) +#define G_NUMPORTS(x) (((x) >> S_NUMPORTS) & M_NUMPORTS) + +#define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640 +#define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644 +#define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648 +#define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c +#define A_MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650 +#define A_MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654 +#define A_MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658 +#define A_MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c +#define A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660 +#define A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664 +#define A_MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668 +#define A_MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c +#define A_MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670 +#define A_MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674 +#define A_MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678 +#define A_MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c +#define A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680 +#define A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684 +#define A_MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688 +#define A_MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c +#define A_MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690 +#define A_MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694 +#define A_MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698 +#define A_MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c +#define A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0 +#define A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4 +#define A_MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8 +#define A_MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac +#define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0 +#define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4 +#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 +#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc + +/* registers for module ULP_RX */ +#define ULP_RX_BASE_ADDR 0x19150 + +#define S_HPZ0 0 +#define M_HPZ0 0xfU +#define V_HPZ0(x) ((x) << S_HPZ0) +#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0) + +#define A_ULP_RX_TDDP_PSZ 0x19178 + +/* registers for module SF */ +#define SF_BASE_ADDR 0x193f8 + +#define A_SF_DATA 0x193f8 +#define A_SF_OP 0x193fc + +#define S_SF_LOCK 4 +#define V_SF_LOCK(x) ((x) << S_SF_LOCK) +#define F_SF_LOCK V_SF_LOCK(1U) + +#define S_CONT 3 +#define V_CONT(x) ((x) << S_CONT) +#define F_CONT V_CONT(1U) + +#define S_BYTECNT 1 +#define M_BYTECNT 0x3U +#define V_BYTECNT(x) ((x) << S_BYTECNT) +#define G_BYTECNT(x) (((x) >> S_BYTECNT) & M_BYTECNT) + +#define S_OP 0 +#define V_OP(x) ((x) << S_OP) +#define F_OP V_OP(1U) + +/* registers for module PL */ +#define PL_BASE_ADDR 0x19400 + +#define S_SOURCEPF 8 +#define M_SOURCEPF 0x7U +#define V_SOURCEPF(x) ((x) << S_SOURCEPF) +#define G_SOURCEPF(x) (((x) >> S_SOURCEPF) & M_SOURCEPF) + +#define A_PL_PF_INT_ENABLE 0x3c4 + +#define S_PFSW 3 +#define V_PFSW(x) ((x) << S_PFSW) +#define F_PFSW V_PFSW(1U) + +#define S_PFCIM 1 +#define V_PFCIM(x) ((x) << S_PFCIM) +#define F_PFCIM V_PFCIM(1U) + +#define A_PL_WHOAMI 0x19400 + +#define A_PL_RST 0x19428 + +#define A_PL_INT_MAP0 0x19414 + +#define S_PIORST 1 +#define V_PIORST(x) ((x) << S_PIORST) +#define F_PIORST V_PIORST(1U) + +#define S_PIORSTMODE 0 +#define V_PIORSTMODE(x) ((x) << S_PIORSTMODE) +#define F_PIORSTMODE V_PIORSTMODE(1U) + +#define A_PL_REV 0x1943c + +#define S_REV 0 +#define M_REV 0xfU +#define V_REV(x) ((x) << S_REV) +#define G_REV(x) (((x) >> S_REV) & M_REV) diff --git a/drivers/net/cxgbe/base/t4_regs_values.h b/drivers/net/cxgbe/base/t4_regs_values.h new file mode 100644 index 00000000..d7d3144c --- /dev/null +++ b/drivers/net/cxgbe/base/t4_regs_values.h @@ -0,0 +1,169 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __T4_REGS_VALUES_H__ +#define __T4_REGS_VALUES_H__ + +/* + * This file contains definitions for various T4 register value hardware + * constants. The types of values encoded here are predominantly those for + * register fields which control "modal" behavior. For the most part, we do + * not include definitions for register fields which are simple numeric + * metrics, etc. + */ + +/* + * SGE definitions. + * ================ + */ + +/* + * SGE register field values. + */ + +/* CONTROL register */ +#define X_RXPKTCPLMODE_SPLIT 1 +#define X_INGPCIEBOUNDARY_32B 0 +#define X_INGPADBOUNDARY_SHIFT 5 + +/* CONTROL2 register */ +#define X_INGPACKBOUNDARY_SHIFT 5 +#define X_INGPACKBOUNDARY_16B 0 + +/* GTS register */ +#define X_TIMERREG_RESTART_COUNTER 6 +#define X_TIMERREG_UPDATE_CIDX 7 + +/* + * Egress Context field values + */ +#define X_FETCHBURSTMIN_64B 2 +#define X_FETCHBURSTMIN_128B 3 +#define X_FETCHBURSTMAX_256B 2 +#define X_FETCHBURSTMAX_512B 3 + +#define X_HOSTFCMODE_NONE 0 + +/* + * Ingress Context field values + */ +#define X_UPDATEDELIVERY_INTERRUPT 1 + +#define X_RSPD_TYPE_FLBUF 0 +#define X_RSPD_TYPE_CPL 1 + +/* + * Context field definitions. This is by no means a complete list of SGE + * Context fields. In the vast majority of cases the firmware initializes + * things the way they need to be set up. But in a few small cases, we need + * to compute new values and ship them off to the firmware to be applied to + * the SGE Conexts ... + */ + +/* + * Congestion Manager Definitions. + */ +#define S_CONMCTXT_CNGTPMODE 19 +#define M_CONMCTXT_CNGTPMODE 0x3 +#define V_CONMCTXT_CNGTPMODE(x) ((x) << S_CONMCTXT_CNGTPMODE) +#define G_CONMCTXT_CNGTPMODE(x) \ + (((x) >> S_CONMCTXT_CNGTPMODE) & M_CONMCTXT_CNGTPMODE) +#define S_CONMCTXT_CNGCHMAP 0 +#define M_CONMCTXT_CNGCHMAP 0xffff +#define V_CONMCTXT_CNGCHMAP(x) ((x) << S_CONMCTXT_CNGCHMAP) +#define G_CONMCTXT_CNGCHMAP(x) \ + (((x) >> S_CONMCTXT_CNGCHMAP) & M_CONMCTXT_CNGCHMAP) + +#define X_CONMCTXT_CNGTPMODE_QUEUE 1 +#define X_CONMCTXT_CNGTPMODE_CHANNEL 2 + +/* + * T5 and later support a new BAR2-based doorbell mechanism for Egress Queues. + * The User Doorbells are each 128 bytes in length with a Simple Doorbell at + * offsets 8x and a Write Combining single 64-byte Egress Queue Unit + * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64. For Ingress Queues, + * we have a Going To Sleep register at offsets 8x+4. + * + * As noted above, we have many instances of the Simple Doorbell and Going To + * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a + * non-64-byte aligned offset for the Simple Doorbell in order to attempt to + * avoid buffering of the writes to the Simple Doorbell and we want to use a + * non-contiguous offset for the Going To Sleep writes in order to avoid + * possible combining between them. + */ +#define SGE_UDB_SIZE 128 +#define SGE_UDB_KDOORBELL 8 +#define SGE_UDB_GTS 20 + +/* + * CIM definitions. + * ================ + */ + +/* + * CIM register field values. + */ +#define X_MBOWNER_NONE 0 +#define X_MBOWNER_FW 1 +#define X_MBOWNER_PL 2 + +/* + * PCI-E definitions. + * ================== + */ +#define X_WINDOW_SHIFT 10 +#define X_PCIEOFST_SHIFT 10 + +/* + * TP definitions. + * =============== + */ + +/* + * TP_VLAN_PRI_MAP controls which subset of fields will be present in the + * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP + * selects for a particular field being present. These fields, when present + * in the Compressed Filter Tuple, have the following widths in bits. + */ +#define W_FT_FCOE 1 +#define W_FT_PORT 3 +#define W_FT_VNIC_ID 17 +#define W_FT_VLAN 17 +#define W_FT_TOS 8 +#define W_FT_PROTOCOL 8 +#define W_FT_ETHERTYPE 16 +#define W_FT_MACMATCH 9 +#define W_FT_MPSHITTYPE 3 +#define W_FT_FRAGMENTATION 1 + +#endif /* __T4_REGS_VALUES_H__ */ diff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h new file mode 100644 index 00000000..74f19fe7 --- /dev/null +++ b/drivers/net/cxgbe/base/t4fw_interface.h @@ -0,0 +1,1730 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _T4FW_INTERFACE_H_ +#define _T4FW_INTERFACE_H_ + +/****************************************************************************** + * R E T U R N V A L U E S + ********************************/ + +enum fw_retval { + FW_SUCCESS = 0, /* completed successfully */ + FW_EPERM = 1, /* operation not permitted */ + FW_ENOENT = 2, /* no such file or directory */ + FW_EIO = 5, /* input/output error; hw bad */ + FW_ENOEXEC = 8, /* exec format error; inv microcode */ + FW_EAGAIN = 11, /* try again */ + FW_ENOMEM = 12, /* out of memory */ + FW_EFAULT = 14, /* bad address; fw bad */ + FW_EBUSY = 16, /* resource busy */ + FW_EEXIST = 17, /* file exists */ + FW_ENODEV = 19, /* no such device */ + FW_EINVAL = 22, /* invalid argument */ + FW_ENOSPC = 28, /* no space left on device */ + FW_ENOSYS = 38, /* functionality not implemented */ + FW_ENODATA = 61, /* no data available */ + FW_EPROTO = 71, /* protocol error */ + FW_EADDRINUSE = 98, /* address already in use */ + FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */ + FW_ENETDOWN = 100, /* network is down */ + FW_ENETUNREACH = 101, /* network is unreachable */ + FW_ENOBUFS = 105, /* no buffer space available */ + FW_ETIMEDOUT = 110, /* timeout */ + FW_EINPROGRESS = 115, /* fw internal */ +}; + +/****************************************************************************** + * M E M O R Y T Y P E s + ******************************/ + +enum fw_memtype { + FW_MEMTYPE_EDC0 = 0x0, + FW_MEMTYPE_EDC1 = 0x1, + FW_MEMTYPE_EXTMEM = 0x2, + FW_MEMTYPE_FLASH = 0x4, + FW_MEMTYPE_INTERNAL = 0x5, + FW_MEMTYPE_EXTMEM1 = 0x6, +}; + +/****************************************************************************** + * W O R K R E Q U E S T s + ********************************/ + +enum fw_wr_opcodes { + FW_ETH_TX_PKT_WR = 0x08, + FW_ETH_TX_PKTS_WR = 0x09, +}; + +/* + * Generic work request header flit0 + */ +struct fw_wr_hdr { + __be32 hi; + __be32 lo; +}; + +/* work request opcode (hi) + */ +#define S_FW_WR_OP 24 +#define M_FW_WR_OP 0xff +#define V_FW_WR_OP(x) ((x) << S_FW_WR_OP) +#define G_FW_WR_OP(x) (((x) >> S_FW_WR_OP) & M_FW_WR_OP) + +/* work request immediate data length (hi) + */ +#define S_FW_WR_IMMDLEN 0 +#define M_FW_WR_IMMDLEN 0xff +#define V_FW_WR_IMMDLEN(x) ((x) << S_FW_WR_IMMDLEN) +#define G_FW_WR_IMMDLEN(x) \ + (((x) >> S_FW_WR_IMMDLEN) & M_FW_WR_IMMDLEN) + +/* egress queue status update to egress queue status entry (lo) + */ +#define S_FW_WR_EQUEQ 30 +#define M_FW_WR_EQUEQ 0x1 +#define V_FW_WR_EQUEQ(x) ((x) << S_FW_WR_EQUEQ) +#define G_FW_WR_EQUEQ(x) (((x) >> S_FW_WR_EQUEQ) & M_FW_WR_EQUEQ) +#define F_FW_WR_EQUEQ V_FW_WR_EQUEQ(1U) + +/* length in units of 16-bytes (lo) + */ +#define S_FW_WR_LEN16 0 +#define M_FW_WR_LEN16 0xff +#define V_FW_WR_LEN16(x) ((x) << S_FW_WR_LEN16) +#define G_FW_WR_LEN16(x) (((x) >> S_FW_WR_LEN16) & M_FW_WR_LEN16) + +struct fw_eth_tx_pkt_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be64 r3; +}; + +#define S_FW_ETH_TX_PKT_WR_IMMDLEN 0 +#define M_FW_ETH_TX_PKT_WR_IMMDLEN 0x1ff +#define V_FW_ETH_TX_PKT_WR_IMMDLEN(x) ((x) << S_FW_ETH_TX_PKT_WR_IMMDLEN) +#define G_FW_ETH_TX_PKT_WR_IMMDLEN(x) \ + (((x) >> S_FW_ETH_TX_PKT_WR_IMMDLEN) & M_FW_ETH_TX_PKT_WR_IMMDLEN) + +struct fw_eth_tx_pkts_wr { + __be32 op_pkd; + __be32 equiq_to_len16; + __be32 r3; + __be16 plen; + __u8 npkt; + __u8 type; +}; + +/****************************************************************************** + * C O M M A N D s + *********************/ + +/* + * The maximum length of time, in miliseconds, that we expect any firmware + * command to take to execute and return a reply to the host. The RESET + * and INITIALIZE commands can take a fair amount of time to execute but + * most execute in far less time than this maximum. This constant is used + * by host software to determine how long to wait for a firmware command + * reply before declaring the firmware as dead/unreachable ... + */ +#define FW_CMD_MAX_TIMEOUT 10000 + +/* + * If a host driver does a HELLO and discovers that there's already a MASTER + * selected, we may have to wait for that MASTER to finish issuing RESET, + * configuration and INITIALIZE commands. Also, there's a possibility that + * our own HELLO may get lost if it happens right as the MASTER is issuign a + * RESET command, so we need to be willing to make a few retries of our HELLO. + */ +#define FW_CMD_HELLO_TIMEOUT (3 * FW_CMD_MAX_TIMEOUT) +#define FW_CMD_HELLO_RETRIES 3 + +enum fw_cmd_opcodes { + FW_RESET_CMD = 0x03, + FW_HELLO_CMD = 0x04, + FW_BYE_CMD = 0x05, + FW_INITIALIZE_CMD = 0x06, + FW_CAPS_CONFIG_CMD = 0x07, + FW_PARAMS_CMD = 0x08, + FW_IQ_CMD = 0x10, + FW_EQ_ETH_CMD = 0x12, + FW_VI_CMD = 0x14, + FW_VI_MAC_CMD = 0x15, + FW_VI_RXMODE_CMD = 0x16, + FW_VI_ENABLE_CMD = 0x17, + FW_PORT_CMD = 0x1b, + FW_RSS_IND_TBL_CMD = 0x20, + FW_RSS_VI_CONFIG_CMD = 0x23, + FW_DEBUG_CMD = 0x81, +}; + +/* + * Generic command header flit0 + */ +struct fw_cmd_hdr { + __be32 hi; + __be32 lo; +}; + +#define S_FW_CMD_OP 24 +#define M_FW_CMD_OP 0xff +#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP) +#define G_FW_CMD_OP(x) (((x) >> S_FW_CMD_OP) & M_FW_CMD_OP) + +#define S_FW_CMD_REQUEST 23 +#define M_FW_CMD_REQUEST 0x1 +#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST) +#define G_FW_CMD_REQUEST(x) (((x) >> S_FW_CMD_REQUEST) & M_FW_CMD_REQUEST) +#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U) + +#define S_FW_CMD_READ 22 +#define M_FW_CMD_READ 0x1 +#define V_FW_CMD_READ(x) ((x) << S_FW_CMD_READ) +#define G_FW_CMD_READ(x) (((x) >> S_FW_CMD_READ) & M_FW_CMD_READ) +#define F_FW_CMD_READ V_FW_CMD_READ(1U) + +#define S_FW_CMD_WRITE 21 +#define M_FW_CMD_WRITE 0x1 +#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE) +#define G_FW_CMD_WRITE(x) (((x) >> S_FW_CMD_WRITE) & M_FW_CMD_WRITE) +#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U) + +#define S_FW_CMD_EXEC 20 +#define M_FW_CMD_EXEC 0x1 +#define V_FW_CMD_EXEC(x) ((x) << S_FW_CMD_EXEC) +#define G_FW_CMD_EXEC(x) (((x) >> S_FW_CMD_EXEC) & M_FW_CMD_EXEC) +#define F_FW_CMD_EXEC V_FW_CMD_EXEC(1U) + +#define S_FW_CMD_RETVAL 8 +#define M_FW_CMD_RETVAL 0xff +#define V_FW_CMD_RETVAL(x) ((x) << S_FW_CMD_RETVAL) +#define G_FW_CMD_RETVAL(x) (((x) >> S_FW_CMD_RETVAL) & M_FW_CMD_RETVAL) + +#define S_FW_CMD_LEN16 0 +#define M_FW_CMD_LEN16 0xff +#define V_FW_CMD_LEN16(x) ((x) << S_FW_CMD_LEN16) +#define G_FW_CMD_LEN16(x) (((x) >> S_FW_CMD_LEN16) & M_FW_CMD_LEN16) + +#define FW_LEN16(fw_struct) V_FW_CMD_LEN16(sizeof(fw_struct) / 16) + +struct fw_reset_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 val; + __be32 halt_pkd; +}; + +#define S_FW_RESET_CMD_HALT 31 +#define M_FW_RESET_CMD_HALT 0x1 +#define V_FW_RESET_CMD_HALT(x) ((x) << S_FW_RESET_CMD_HALT) +#define G_FW_RESET_CMD_HALT(x) \ + (((x) >> S_FW_RESET_CMD_HALT) & M_FW_RESET_CMD_HALT) +#define F_FW_RESET_CMD_HALT V_FW_RESET_CMD_HALT(1U) + +enum { + FW_HELLO_CMD_STAGE_OS = 0, +}; + +struct fw_hello_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 err_to_clearinit; + __be32 fwrev; +}; + +#define S_FW_HELLO_CMD_ERR 31 +#define M_FW_HELLO_CMD_ERR 0x1 +#define V_FW_HELLO_CMD_ERR(x) ((x) << S_FW_HELLO_CMD_ERR) +#define G_FW_HELLO_CMD_ERR(x) \ + (((x) >> S_FW_HELLO_CMD_ERR) & M_FW_HELLO_CMD_ERR) +#define F_FW_HELLO_CMD_ERR V_FW_HELLO_CMD_ERR(1U) + +#define S_FW_HELLO_CMD_INIT 30 +#define M_FW_HELLO_CMD_INIT 0x1 +#define V_FW_HELLO_CMD_INIT(x) ((x) << S_FW_HELLO_CMD_INIT) +#define G_FW_HELLO_CMD_INIT(x) \ + (((x) >> S_FW_HELLO_CMD_INIT) & M_FW_HELLO_CMD_INIT) +#define F_FW_HELLO_CMD_INIT V_FW_HELLO_CMD_INIT(1U) + +#define S_FW_HELLO_CMD_MASTERDIS 29 +#define M_FW_HELLO_CMD_MASTERDIS 0x1 +#define V_FW_HELLO_CMD_MASTERDIS(x) ((x) << S_FW_HELLO_CMD_MASTERDIS) +#define G_FW_HELLO_CMD_MASTERDIS(x) \ + (((x) >> S_FW_HELLO_CMD_MASTERDIS) & M_FW_HELLO_CMD_MASTERDIS) +#define F_FW_HELLO_CMD_MASTERDIS V_FW_HELLO_CMD_MASTERDIS(1U) + +#define S_FW_HELLO_CMD_MASTERFORCE 28 +#define M_FW_HELLO_CMD_MASTERFORCE 0x1 +#define V_FW_HELLO_CMD_MASTERFORCE(x) ((x) << S_FW_HELLO_CMD_MASTERFORCE) +#define G_FW_HELLO_CMD_MASTERFORCE(x) \ + (((x) >> S_FW_HELLO_CMD_MASTERFORCE) & M_FW_HELLO_CMD_MASTERFORCE) +#define F_FW_HELLO_CMD_MASTERFORCE V_FW_HELLO_CMD_MASTERFORCE(1U) + +#define S_FW_HELLO_CMD_MBMASTER 24 +#define M_FW_HELLO_CMD_MBMASTER 0xf +#define V_FW_HELLO_CMD_MBMASTER(x) ((x) << S_FW_HELLO_CMD_MBMASTER) +#define G_FW_HELLO_CMD_MBMASTER(x) \ + (((x) >> S_FW_HELLO_CMD_MBMASTER) & M_FW_HELLO_CMD_MBMASTER) + +#define S_FW_HELLO_CMD_MBASYNCNOT 20 +#define M_FW_HELLO_CMD_MBASYNCNOT 0x7 +#define V_FW_HELLO_CMD_MBASYNCNOT(x) ((x) << S_FW_HELLO_CMD_MBASYNCNOT) +#define G_FW_HELLO_CMD_MBASYNCNOT(x) \ + (((x) >> S_FW_HELLO_CMD_MBASYNCNOT) & M_FW_HELLO_CMD_MBASYNCNOT) + +#define S_FW_HELLO_CMD_STAGE 17 +#define M_FW_HELLO_CMD_STAGE 0x7 +#define V_FW_HELLO_CMD_STAGE(x) ((x) << S_FW_HELLO_CMD_STAGE) +#define G_FW_HELLO_CMD_STAGE(x) \ + (((x) >> S_FW_HELLO_CMD_STAGE) & M_FW_HELLO_CMD_STAGE) + +#define S_FW_HELLO_CMD_CLEARINIT 16 +#define M_FW_HELLO_CMD_CLEARINIT 0x1 +#define V_FW_HELLO_CMD_CLEARINIT(x) ((x) << S_FW_HELLO_CMD_CLEARINIT) +#define G_FW_HELLO_CMD_CLEARINIT(x) \ + (((x) >> S_FW_HELLO_CMD_CLEARINIT) & M_FW_HELLO_CMD_CLEARINIT) +#define F_FW_HELLO_CMD_CLEARINIT V_FW_HELLO_CMD_CLEARINIT(1U) + +struct fw_bye_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +struct fw_initialize_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +enum fw_caps_config_nic { + FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020, + FW_CAPS_CONFIG_NIC_ETHOFLD = 0x00000040, +}; + +enum fw_memtype_cf { + FW_MEMTYPE_CF_FLASH = FW_MEMTYPE_FLASH, +}; + +struct fw_caps_config_cmd { + __be32 op_to_write; + __be32 cfvalid_to_len16; + __be32 r2; + __be32 hwmbitmap; + __be16 nbmcaps; + __be16 linkcaps; + __be16 switchcaps; + __be16 r3; + __be16 niccaps; + __be16 toecaps; + __be16 rdmacaps; + __be16 r4; + __be16 iscsicaps; + __be16 fcoecaps; + __be32 cfcsum; + __be32 finiver; + __be32 finicsum; +}; + +#define S_FW_CAPS_CONFIG_CMD_CFVALID 27 +#define M_FW_CAPS_CONFIG_CMD_CFVALID 0x1 +#define V_FW_CAPS_CONFIG_CMD_CFVALID(x) ((x) << S_FW_CAPS_CONFIG_CMD_CFVALID) +#define G_FW_CAPS_CONFIG_CMD_CFVALID(x) \ + (((x) >> S_FW_CAPS_CONFIG_CMD_CFVALID) & M_FW_CAPS_CONFIG_CMD_CFVALID) +#define F_FW_CAPS_CONFIG_CMD_CFVALID V_FW_CAPS_CONFIG_CMD_CFVALID(1U) + +#define S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 24 +#define M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 0x7 +#define V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \ + ((x) << S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) +#define G_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \ + (((x) >> S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) & \ + M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) + +#define S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 16 +#define M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 0xff +#define V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \ + ((x) << S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) +#define G_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \ + (((x) >> S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) & \ + M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) + +/* + * params command mnemonics + */ +enum fw_params_mnem { + FW_PARAMS_MNEM_DEV = 1, /* device params */ + FW_PARAMS_MNEM_PFVF = 2, /* function params */ + FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ +}; + +/* + * device parameters + */ +enum fw_params_param_dev { + FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ + FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ + FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, +}; + +/* + * physical and virtual function parameters + */ +enum fw_params_param_pfvf { + FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31 +}; + +/* + * dma queue parameters + */ +enum fw_params_param_dmaq { + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01, + FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20, +}; + +#define S_FW_PARAMS_MNEM 24 +#define M_FW_PARAMS_MNEM 0xff +#define V_FW_PARAMS_MNEM(x) ((x) << S_FW_PARAMS_MNEM) +#define G_FW_PARAMS_MNEM(x) \ + (((x) >> S_FW_PARAMS_MNEM) & M_FW_PARAMS_MNEM) + +#define S_FW_PARAMS_PARAM_X 16 +#define M_FW_PARAMS_PARAM_X 0xff +#define V_FW_PARAMS_PARAM_X(x) ((x) << S_FW_PARAMS_PARAM_X) +#define G_FW_PARAMS_PARAM_X(x) \ + (((x) >> S_FW_PARAMS_PARAM_X) & M_FW_PARAMS_PARAM_X) + +#define S_FW_PARAMS_PARAM_Y 8 +#define M_FW_PARAMS_PARAM_Y 0xff +#define V_FW_PARAMS_PARAM_Y(x) ((x) << S_FW_PARAMS_PARAM_Y) +#define G_FW_PARAMS_PARAM_Y(x) \ + (((x) >> S_FW_PARAMS_PARAM_Y) & M_FW_PARAMS_PARAM_Y) + +#define S_FW_PARAMS_PARAM_Z 0 +#define M_FW_PARAMS_PARAM_Z 0xff +#define V_FW_PARAMS_PARAM_Z(x) ((x) << S_FW_PARAMS_PARAM_Z) +#define G_FW_PARAMS_PARAM_Z(x) \ + (((x) >> S_FW_PARAMS_PARAM_Z) & M_FW_PARAMS_PARAM_Z) + +#define S_FW_PARAMS_PARAM_YZ 0 +#define M_FW_PARAMS_PARAM_YZ 0xffff +#define V_FW_PARAMS_PARAM_YZ(x) ((x) << S_FW_PARAMS_PARAM_YZ) +#define G_FW_PARAMS_PARAM_YZ(x) \ + (((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ) + +struct fw_params_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + struct fw_params_param { + __be32 mnem; + __be32 val; + } param[7]; +}; + +#define S_FW_PARAMS_CMD_PFN 8 +#define M_FW_PARAMS_CMD_PFN 0x7 +#define V_FW_PARAMS_CMD_PFN(x) ((x) << S_FW_PARAMS_CMD_PFN) +#define G_FW_PARAMS_CMD_PFN(x) \ + (((x) >> S_FW_PARAMS_CMD_PFN) & M_FW_PARAMS_CMD_PFN) + +#define S_FW_PARAMS_CMD_VFN 0 +#define M_FW_PARAMS_CMD_VFN 0xff +#define V_FW_PARAMS_CMD_VFN(x) ((x) << S_FW_PARAMS_CMD_VFN) +#define G_FW_PARAMS_CMD_VFN(x) \ + (((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN) + +/* + * ingress queue type; the first 1K ingress queues can have associated 0, + * 1 or 2 free lists and an interrupt, all other ingress queues lack these + * capabilities + */ +enum fw_iq_type { + FW_IQ_TYPE_FL_INT_CAP, +}; + +struct fw_iq_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 physiqid; + __be16 iqid; + __be16 fl0id; + __be16 fl1id; + __be32 type_to_iqandstindex; + __be16 iqdroprss_to_iqesize; + __be16 iqsize; + __be64 iqaddr; + __be32 iqns_to_fl0congen; + __be16 fl0dcaen_to_fl0cidxfthresh; + __be16 fl0size; + __be64 fl0addr; + __be32 fl1cngchmap_to_fl1congen; + __be16 fl1dcaen_to_fl1cidxfthresh; + __be16 fl1size; + __be64 fl1addr; +}; + +#define S_FW_IQ_CMD_PFN 8 +#define M_FW_IQ_CMD_PFN 0x7 +#define V_FW_IQ_CMD_PFN(x) ((x) << S_FW_IQ_CMD_PFN) +#define G_FW_IQ_CMD_PFN(x) (((x) >> S_FW_IQ_CMD_PFN) & M_FW_IQ_CMD_PFN) + +#define S_FW_IQ_CMD_VFN 0 +#define M_FW_IQ_CMD_VFN 0xff +#define V_FW_IQ_CMD_VFN(x) ((x) << S_FW_IQ_CMD_VFN) +#define G_FW_IQ_CMD_VFN(x) (((x) >> S_FW_IQ_CMD_VFN) & M_FW_IQ_CMD_VFN) + +#define S_FW_IQ_CMD_ALLOC 31 +#define M_FW_IQ_CMD_ALLOC 0x1 +#define V_FW_IQ_CMD_ALLOC(x) ((x) << S_FW_IQ_CMD_ALLOC) +#define G_FW_IQ_CMD_ALLOC(x) \ + (((x) >> S_FW_IQ_CMD_ALLOC) & M_FW_IQ_CMD_ALLOC) +#define F_FW_IQ_CMD_ALLOC V_FW_IQ_CMD_ALLOC(1U) + +#define S_FW_IQ_CMD_FREE 30 +#define M_FW_IQ_CMD_FREE 0x1 +#define V_FW_IQ_CMD_FREE(x) ((x) << S_FW_IQ_CMD_FREE) +#define G_FW_IQ_CMD_FREE(x) (((x) >> S_FW_IQ_CMD_FREE) & M_FW_IQ_CMD_FREE) +#define F_FW_IQ_CMD_FREE V_FW_IQ_CMD_FREE(1U) + +#define S_FW_IQ_CMD_IQSTART 28 +#define M_FW_IQ_CMD_IQSTART 0x1 +#define V_FW_IQ_CMD_IQSTART(x) ((x) << S_FW_IQ_CMD_IQSTART) +#define G_FW_IQ_CMD_IQSTART(x) \ + (((x) >> S_FW_IQ_CMD_IQSTART) & M_FW_IQ_CMD_IQSTART) +#define F_FW_IQ_CMD_IQSTART V_FW_IQ_CMD_IQSTART(1U) + +#define S_FW_IQ_CMD_IQSTOP 27 +#define M_FW_IQ_CMD_IQSTOP 0x1 +#define V_FW_IQ_CMD_IQSTOP(x) ((x) << S_FW_IQ_CMD_IQSTOP) +#define G_FW_IQ_CMD_IQSTOP(x) \ + (((x) >> S_FW_IQ_CMD_IQSTOP) & M_FW_IQ_CMD_IQSTOP) +#define F_FW_IQ_CMD_IQSTOP V_FW_IQ_CMD_IQSTOP(1U) + +#define S_FW_IQ_CMD_TYPE 29 +#define M_FW_IQ_CMD_TYPE 0x7 +#define V_FW_IQ_CMD_TYPE(x) ((x) << S_FW_IQ_CMD_TYPE) +#define G_FW_IQ_CMD_TYPE(x) (((x) >> S_FW_IQ_CMD_TYPE) & M_FW_IQ_CMD_TYPE) + +#define S_FW_IQ_CMD_IQASYNCH 28 +#define M_FW_IQ_CMD_IQASYNCH 0x1 +#define V_FW_IQ_CMD_IQASYNCH(x) ((x) << S_FW_IQ_CMD_IQASYNCH) +#define G_FW_IQ_CMD_IQASYNCH(x) \ + (((x) >> S_FW_IQ_CMD_IQASYNCH) & M_FW_IQ_CMD_IQASYNCH) +#define F_FW_IQ_CMD_IQASYNCH V_FW_IQ_CMD_IQASYNCH(1U) + +#define S_FW_IQ_CMD_VIID 16 +#define M_FW_IQ_CMD_VIID 0xfff +#define V_FW_IQ_CMD_VIID(x) ((x) << S_FW_IQ_CMD_VIID) +#define G_FW_IQ_CMD_VIID(x) (((x) >> S_FW_IQ_CMD_VIID) & M_FW_IQ_CMD_VIID) + +#define S_FW_IQ_CMD_IQANDST 15 +#define M_FW_IQ_CMD_IQANDST 0x1 +#define V_FW_IQ_CMD_IQANDST(x) ((x) << S_FW_IQ_CMD_IQANDST) +#define G_FW_IQ_CMD_IQANDST(x) \ + (((x) >> S_FW_IQ_CMD_IQANDST) & M_FW_IQ_CMD_IQANDST) +#define F_FW_IQ_CMD_IQANDST V_FW_IQ_CMD_IQANDST(1U) + +#define S_FW_IQ_CMD_IQANUD 12 +#define M_FW_IQ_CMD_IQANUD 0x3 +#define V_FW_IQ_CMD_IQANUD(x) ((x) << S_FW_IQ_CMD_IQANUD) +#define G_FW_IQ_CMD_IQANUD(x) \ + (((x) >> S_FW_IQ_CMD_IQANUD) & M_FW_IQ_CMD_IQANUD) + +#define S_FW_IQ_CMD_IQANDSTINDEX 0 +#define M_FW_IQ_CMD_IQANDSTINDEX 0xfff +#define V_FW_IQ_CMD_IQANDSTINDEX(x) ((x) << S_FW_IQ_CMD_IQANDSTINDEX) +#define G_FW_IQ_CMD_IQANDSTINDEX(x) \ + (((x) >> S_FW_IQ_CMD_IQANDSTINDEX) & M_FW_IQ_CMD_IQANDSTINDEX) + +#define S_FW_IQ_CMD_IQGTSMODE 14 +#define M_FW_IQ_CMD_IQGTSMODE 0x1 +#define V_FW_IQ_CMD_IQGTSMODE(x) ((x) << S_FW_IQ_CMD_IQGTSMODE) +#define G_FW_IQ_CMD_IQGTSMODE(x) \ + (((x) >> S_FW_IQ_CMD_IQGTSMODE) & M_FW_IQ_CMD_IQGTSMODE) +#define F_FW_IQ_CMD_IQGTSMODE V_FW_IQ_CMD_IQGTSMODE(1U) + +#define S_FW_IQ_CMD_IQPCIECH 12 +#define M_FW_IQ_CMD_IQPCIECH 0x3 +#define V_FW_IQ_CMD_IQPCIECH(x) ((x) << S_FW_IQ_CMD_IQPCIECH) +#define G_FW_IQ_CMD_IQPCIECH(x) \ + (((x) >> S_FW_IQ_CMD_IQPCIECH) & M_FW_IQ_CMD_IQPCIECH) + +#define S_FW_IQ_CMD_IQINTCNTTHRESH 4 +#define M_FW_IQ_CMD_IQINTCNTTHRESH 0x3 +#define V_FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << S_FW_IQ_CMD_IQINTCNTTHRESH) +#define G_FW_IQ_CMD_IQINTCNTTHRESH(x) \ + (((x) >> S_FW_IQ_CMD_IQINTCNTTHRESH) & M_FW_IQ_CMD_IQINTCNTTHRESH) + +#define S_FW_IQ_CMD_IQESIZE 0 +#define M_FW_IQ_CMD_IQESIZE 0x3 +#define V_FW_IQ_CMD_IQESIZE(x) ((x) << S_FW_IQ_CMD_IQESIZE) +#define G_FW_IQ_CMD_IQESIZE(x) \ + (((x) >> S_FW_IQ_CMD_IQESIZE) & M_FW_IQ_CMD_IQESIZE) + +#define S_FW_IQ_CMD_IQFLINTCONGEN 27 +#define M_FW_IQ_CMD_IQFLINTCONGEN 0x1 +#define V_FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << S_FW_IQ_CMD_IQFLINTCONGEN) +#define G_FW_IQ_CMD_IQFLINTCONGEN(x) \ + (((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN) +#define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U) + +#define S_FW_IQ_CMD_FL0CNGCHMAP 20 +#define M_FW_IQ_CMD_FL0CNGCHMAP 0xf +#define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP) +#define G_FW_IQ_CMD_FL0CNGCHMAP(x) \ + (((x) >> S_FW_IQ_CMD_FL0CNGCHMAP) & M_FW_IQ_CMD_FL0CNGCHMAP) + +#define S_FW_IQ_CMD_FL0DATARO 12 +#define M_FW_IQ_CMD_FL0DATARO 0x1 +#define V_FW_IQ_CMD_FL0DATARO(x) ((x) << S_FW_IQ_CMD_FL0DATARO) +#define G_FW_IQ_CMD_FL0DATARO(x) \ + (((x) >> S_FW_IQ_CMD_FL0DATARO) & M_FW_IQ_CMD_FL0DATARO) +#define F_FW_IQ_CMD_FL0DATARO V_FW_IQ_CMD_FL0DATARO(1U) + +#define S_FW_IQ_CMD_FL0CONGCIF 11 +#define M_FW_IQ_CMD_FL0CONGCIF 0x1 +#define V_FW_IQ_CMD_FL0CONGCIF(x) ((x) << S_FW_IQ_CMD_FL0CONGCIF) +#define G_FW_IQ_CMD_FL0CONGCIF(x) \ + (((x) >> S_FW_IQ_CMD_FL0CONGCIF) & M_FW_IQ_CMD_FL0CONGCIF) +#define F_FW_IQ_CMD_FL0CONGCIF V_FW_IQ_CMD_FL0CONGCIF(1U) + +#define S_FW_IQ_CMD_FL0FETCHRO 6 +#define M_FW_IQ_CMD_FL0FETCHRO 0x1 +#define V_FW_IQ_CMD_FL0FETCHRO(x) ((x) << S_FW_IQ_CMD_FL0FETCHRO) +#define G_FW_IQ_CMD_FL0FETCHRO(x) \ + (((x) >> S_FW_IQ_CMD_FL0FETCHRO) & M_FW_IQ_CMD_FL0FETCHRO) +#define F_FW_IQ_CMD_FL0FETCHRO V_FW_IQ_CMD_FL0FETCHRO(1U) + +#define S_FW_IQ_CMD_FL0HOSTFCMODE 4 +#define M_FW_IQ_CMD_FL0HOSTFCMODE 0x3 +#define V_FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << S_FW_IQ_CMD_FL0HOSTFCMODE) +#define G_FW_IQ_CMD_FL0HOSTFCMODE(x) \ + (((x) >> S_FW_IQ_CMD_FL0HOSTFCMODE) & M_FW_IQ_CMD_FL0HOSTFCMODE) + +#define S_FW_IQ_CMD_FL0PADEN 2 +#define M_FW_IQ_CMD_FL0PADEN 0x1 +#define V_FW_IQ_CMD_FL0PADEN(x) ((x) << S_FW_IQ_CMD_FL0PADEN) +#define G_FW_IQ_CMD_FL0PADEN(x) \ + (((x) >> S_FW_IQ_CMD_FL0PADEN) & M_FW_IQ_CMD_FL0PADEN) +#define F_FW_IQ_CMD_FL0PADEN V_FW_IQ_CMD_FL0PADEN(1U) + +#define S_FW_IQ_CMD_FL0PACKEN 1 +#define M_FW_IQ_CMD_FL0PACKEN 0x1 +#define V_FW_IQ_CMD_FL0PACKEN(x) ((x) << S_FW_IQ_CMD_FL0PACKEN) +#define G_FW_IQ_CMD_FL0PACKEN(x) \ + (((x) >> S_FW_IQ_CMD_FL0PACKEN) & M_FW_IQ_CMD_FL0PACKEN) +#define F_FW_IQ_CMD_FL0PACKEN V_FW_IQ_CMD_FL0PACKEN(1U) + +#define S_FW_IQ_CMD_FL0CONGEN 0 +#define M_FW_IQ_CMD_FL0CONGEN 0x1 +#define V_FW_IQ_CMD_FL0CONGEN(x) ((x) << S_FW_IQ_CMD_FL0CONGEN) +#define G_FW_IQ_CMD_FL0CONGEN(x) \ + (((x) >> S_FW_IQ_CMD_FL0CONGEN) & M_FW_IQ_CMD_FL0CONGEN) +#define F_FW_IQ_CMD_FL0CONGEN V_FW_IQ_CMD_FL0CONGEN(1U) + +#define S_FW_IQ_CMD_FL0FBMIN 7 +#define M_FW_IQ_CMD_FL0FBMIN 0x7 +#define V_FW_IQ_CMD_FL0FBMIN(x) ((x) << S_FW_IQ_CMD_FL0FBMIN) +#define G_FW_IQ_CMD_FL0FBMIN(x) \ + (((x) >> S_FW_IQ_CMD_FL0FBMIN) & M_FW_IQ_CMD_FL0FBMIN) + +#define S_FW_IQ_CMD_FL0FBMAX 4 +#define M_FW_IQ_CMD_FL0FBMAX 0x7 +#define V_FW_IQ_CMD_FL0FBMAX(x) ((x) << S_FW_IQ_CMD_FL0FBMAX) +#define G_FW_IQ_CMD_FL0FBMAX(x) \ + (((x) >> S_FW_IQ_CMD_FL0FBMAX) & M_FW_IQ_CMD_FL0FBMAX) + +struct fw_eq_eth_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 eqid_pkd; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; + __be32 autoequiqe_to_viid; + __be32 r8_lo; + __be64 r9; +}; + +#define S_FW_EQ_ETH_CMD_PFN 8 +#define M_FW_EQ_ETH_CMD_PFN 0x7 +#define V_FW_EQ_ETH_CMD_PFN(x) ((x) << S_FW_EQ_ETH_CMD_PFN) +#define G_FW_EQ_ETH_CMD_PFN(x) \ + (((x) >> S_FW_EQ_ETH_CMD_PFN) & M_FW_EQ_ETH_CMD_PFN) + +#define S_FW_EQ_ETH_CMD_VFN 0 +#define M_FW_EQ_ETH_CMD_VFN 0xff +#define V_FW_EQ_ETH_CMD_VFN(x) ((x) << S_FW_EQ_ETH_CMD_VFN) +#define G_FW_EQ_ETH_CMD_VFN(x) \ + (((x) >> S_FW_EQ_ETH_CMD_VFN) & M_FW_EQ_ETH_CMD_VFN) + +#define S_FW_EQ_ETH_CMD_ALLOC 31 +#define M_FW_EQ_ETH_CMD_ALLOC 0x1 +#define V_FW_EQ_ETH_CMD_ALLOC(x) ((x) << S_FW_EQ_ETH_CMD_ALLOC) +#define G_FW_EQ_ETH_CMD_ALLOC(x) \ + (((x) >> S_FW_EQ_ETH_CMD_ALLOC) & M_FW_EQ_ETH_CMD_ALLOC) +#define F_FW_EQ_ETH_CMD_ALLOC V_FW_EQ_ETH_CMD_ALLOC(1U) + +#define S_FW_EQ_ETH_CMD_FREE 30 +#define M_FW_EQ_ETH_CMD_FREE 0x1 +#define V_FW_EQ_ETH_CMD_FREE(x) ((x) << S_FW_EQ_ETH_CMD_FREE) +#define G_FW_EQ_ETH_CMD_FREE(x) \ + (((x) >> S_FW_EQ_ETH_CMD_FREE) & M_FW_EQ_ETH_CMD_FREE) +#define F_FW_EQ_ETH_CMD_FREE V_FW_EQ_ETH_CMD_FREE(1U) + +#define S_FW_EQ_ETH_CMD_EQSTART 28 +#define M_FW_EQ_ETH_CMD_EQSTART 0x1 +#define V_FW_EQ_ETH_CMD_EQSTART(x) ((x) << S_FW_EQ_ETH_CMD_EQSTART) +#define G_FW_EQ_ETH_CMD_EQSTART(x) \ + (((x) >> S_FW_EQ_ETH_CMD_EQSTART) & M_FW_EQ_ETH_CMD_EQSTART) +#define F_FW_EQ_ETH_CMD_EQSTART V_FW_EQ_ETH_CMD_EQSTART(1U) + +#define S_FW_EQ_ETH_CMD_EQID 0 +#define M_FW_EQ_ETH_CMD_EQID 0xfffff +#define V_FW_EQ_ETH_CMD_EQID(x) ((x) << S_FW_EQ_ETH_CMD_EQID) +#define G_FW_EQ_ETH_CMD_EQID(x) \ + (((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID) + +#define S_FW_EQ_ETH_CMD_FETCHRO 22 +#define M_FW_EQ_ETH_CMD_FETCHRO 0x1 +#define V_FW_EQ_ETH_CMD_FETCHRO(x) ((x) << S_FW_EQ_ETH_CMD_FETCHRO) +#define G_FW_EQ_ETH_CMD_FETCHRO(x) \ + (((x) >> S_FW_EQ_ETH_CMD_FETCHRO) & M_FW_EQ_ETH_CMD_FETCHRO) +#define F_FW_EQ_ETH_CMD_FETCHRO V_FW_EQ_ETH_CMD_FETCHRO(1U) + +#define S_FW_EQ_ETH_CMD_HOSTFCMODE 20 +#define M_FW_EQ_ETH_CMD_HOSTFCMODE 0x3 +#define V_FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_ETH_CMD_HOSTFCMODE) +#define G_FW_EQ_ETH_CMD_HOSTFCMODE(x) \ + (((x) >> S_FW_EQ_ETH_CMD_HOSTFCMODE) & M_FW_EQ_ETH_CMD_HOSTFCMODE) + +#define S_FW_EQ_ETH_CMD_PCIECHN 16 +#define M_FW_EQ_ETH_CMD_PCIECHN 0x3 +#define V_FW_EQ_ETH_CMD_PCIECHN(x) ((x) << S_FW_EQ_ETH_CMD_PCIECHN) +#define G_FW_EQ_ETH_CMD_PCIECHN(x) \ + (((x) >> S_FW_EQ_ETH_CMD_PCIECHN) & M_FW_EQ_ETH_CMD_PCIECHN) + +#define S_FW_EQ_ETH_CMD_IQID 0 +#define M_FW_EQ_ETH_CMD_IQID 0xffff +#define V_FW_EQ_ETH_CMD_IQID(x) ((x) << S_FW_EQ_ETH_CMD_IQID) +#define G_FW_EQ_ETH_CMD_IQID(x) \ + (((x) >> S_FW_EQ_ETH_CMD_IQID) & M_FW_EQ_ETH_CMD_IQID) + +#define S_FW_EQ_ETH_CMD_FBMIN 23 +#define M_FW_EQ_ETH_CMD_FBMIN 0x7 +#define V_FW_EQ_ETH_CMD_FBMIN(x) ((x) << S_FW_EQ_ETH_CMD_FBMIN) +#define G_FW_EQ_ETH_CMD_FBMIN(x) \ + (((x) >> S_FW_EQ_ETH_CMD_FBMIN) & M_FW_EQ_ETH_CMD_FBMIN) + +#define S_FW_EQ_ETH_CMD_FBMAX 20 +#define M_FW_EQ_ETH_CMD_FBMAX 0x7 +#define V_FW_EQ_ETH_CMD_FBMAX(x) ((x) << S_FW_EQ_ETH_CMD_FBMAX) +#define G_FW_EQ_ETH_CMD_FBMAX(x) \ + (((x) >> S_FW_EQ_ETH_CMD_FBMAX) & M_FW_EQ_ETH_CMD_FBMAX) + +#define S_FW_EQ_ETH_CMD_CIDXFTHRESH 16 +#define M_FW_EQ_ETH_CMD_CIDXFTHRESH 0x7 +#define V_FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_ETH_CMD_CIDXFTHRESH) +#define G_FW_EQ_ETH_CMD_CIDXFTHRESH(x) \ + (((x) >> S_FW_EQ_ETH_CMD_CIDXFTHRESH) & M_FW_EQ_ETH_CMD_CIDXFTHRESH) + +#define S_FW_EQ_ETH_CMD_EQSIZE 0 +#define M_FW_EQ_ETH_CMD_EQSIZE 0xffff +#define V_FW_EQ_ETH_CMD_EQSIZE(x) ((x) << S_FW_EQ_ETH_CMD_EQSIZE) +#define G_FW_EQ_ETH_CMD_EQSIZE(x) \ + (((x) >> S_FW_EQ_ETH_CMD_EQSIZE) & M_FW_EQ_ETH_CMD_EQSIZE) + +#define S_FW_EQ_ETH_CMD_AUTOEQUEQE 30 +#define M_FW_EQ_ETH_CMD_AUTOEQUEQE 0x1 +#define V_FW_EQ_ETH_CMD_AUTOEQUEQE(x) ((x) << S_FW_EQ_ETH_CMD_AUTOEQUEQE) +#define G_FW_EQ_ETH_CMD_AUTOEQUEQE(x) \ + (((x) >> S_FW_EQ_ETH_CMD_AUTOEQUEQE) & M_FW_EQ_ETH_CMD_AUTOEQUEQE) +#define F_FW_EQ_ETH_CMD_AUTOEQUEQE V_FW_EQ_ETH_CMD_AUTOEQUEQE(1U) + +#define S_FW_EQ_ETH_CMD_VIID 16 +#define M_FW_EQ_ETH_CMD_VIID 0xfff +#define V_FW_EQ_ETH_CMD_VIID(x) ((x) << S_FW_EQ_ETH_CMD_VIID) +#define G_FW_EQ_ETH_CMD_VIID(x) \ + (((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID) + +enum fw_vi_func { + FW_VI_FUNC_ETH, +}; + +struct fw_vi_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 type_to_viid; + __u8 mac[6]; + __u8 portid_pkd; + __u8 nmac; + __u8 nmac0[6]; + __be16 norss_rsssize; + __u8 nmac1[6]; + __be16 idsiiq_pkd; + __u8 nmac2[6]; + __be16 idseiq_pkd; + __u8 nmac3[6]; + __be64 r9; + __be64 r10; +}; + +#define S_FW_VI_CMD_PFN 8 +#define M_FW_VI_CMD_PFN 0x7 +#define V_FW_VI_CMD_PFN(x) ((x) << S_FW_VI_CMD_PFN) +#define G_FW_VI_CMD_PFN(x) (((x) >> S_FW_VI_CMD_PFN) & M_FW_VI_CMD_PFN) + +#define S_FW_VI_CMD_VFN 0 +#define M_FW_VI_CMD_VFN 0xff +#define V_FW_VI_CMD_VFN(x) ((x) << S_FW_VI_CMD_VFN) +#define G_FW_VI_CMD_VFN(x) (((x) >> S_FW_VI_CMD_VFN) & M_FW_VI_CMD_VFN) + +#define S_FW_VI_CMD_ALLOC 31 +#define M_FW_VI_CMD_ALLOC 0x1 +#define V_FW_VI_CMD_ALLOC(x) ((x) << S_FW_VI_CMD_ALLOC) +#define G_FW_VI_CMD_ALLOC(x) \ + (((x) >> S_FW_VI_CMD_ALLOC) & M_FW_VI_CMD_ALLOC) +#define F_FW_VI_CMD_ALLOC V_FW_VI_CMD_ALLOC(1U) + +#define S_FW_VI_CMD_FREE 30 +#define M_FW_VI_CMD_FREE 0x1 +#define V_FW_VI_CMD_FREE(x) ((x) << S_FW_VI_CMD_FREE) +#define G_FW_VI_CMD_FREE(x) (((x) >> S_FW_VI_CMD_FREE) & M_FW_VI_CMD_FREE) +#define F_FW_VI_CMD_FREE V_FW_VI_CMD_FREE(1U) + +#define S_FW_VI_CMD_TYPE 15 +#define M_FW_VI_CMD_TYPE 0x1 +#define V_FW_VI_CMD_TYPE(x) ((x) << S_FW_VI_CMD_TYPE) +#define G_FW_VI_CMD_TYPE(x) (((x) >> S_FW_VI_CMD_TYPE) & M_FW_VI_CMD_TYPE) +#define F_FW_VI_CMD_TYPE V_FW_VI_CMD_TYPE(1U) + +#define S_FW_VI_CMD_FUNC 12 +#define M_FW_VI_CMD_FUNC 0x7 +#define V_FW_VI_CMD_FUNC(x) ((x) << S_FW_VI_CMD_FUNC) +#define G_FW_VI_CMD_FUNC(x) (((x) >> S_FW_VI_CMD_FUNC) & M_FW_VI_CMD_FUNC) + +#define S_FW_VI_CMD_VIID 0 +#define M_FW_VI_CMD_VIID 0xfff +#define V_FW_VI_CMD_VIID(x) ((x) << S_FW_VI_CMD_VIID) +#define G_FW_VI_CMD_VIID(x) (((x) >> S_FW_VI_CMD_VIID) & M_FW_VI_CMD_VIID) + +#define S_FW_VI_CMD_PORTID 4 +#define M_FW_VI_CMD_PORTID 0xf +#define V_FW_VI_CMD_PORTID(x) ((x) << S_FW_VI_CMD_PORTID) +#define G_FW_VI_CMD_PORTID(x) \ + (((x) >> S_FW_VI_CMD_PORTID) & M_FW_VI_CMD_PORTID) + +#define S_FW_VI_CMD_RSSSIZE 0 +#define M_FW_VI_CMD_RSSSIZE 0x7ff +#define V_FW_VI_CMD_RSSSIZE(x) ((x) << S_FW_VI_CMD_RSSSIZE) +#define G_FW_VI_CMD_RSSSIZE(x) \ + (((x) >> S_FW_VI_CMD_RSSSIZE) & M_FW_VI_CMD_RSSSIZE) + +/* Special VI_MAC command index ids */ +#define FW_VI_MAC_ADD_MAC 0x3FF +#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE + +enum fw_vi_mac_smac { + FW_VI_MAC_MPS_TCAM_ENTRY, + FW_VI_MAC_SMT_AND_MPSTCAM +}; + +struct fw_vi_mac_cmd { + __be32 op_to_viid; + __be32 freemacs_to_len16; + union fw_vi_mac { + struct fw_vi_mac_exact { + __be16 valid_to_idx; + __u8 macaddr[6]; + } exact[7]; + struct fw_vi_mac_hash { + __be64 hashvec; + } hash; + } u; +}; + +#define S_FW_VI_MAC_CMD_VIID 0 +#define M_FW_VI_MAC_CMD_VIID 0xfff +#define V_FW_VI_MAC_CMD_VIID(x) ((x) << S_FW_VI_MAC_CMD_VIID) +#define G_FW_VI_MAC_CMD_VIID(x) \ + (((x) >> S_FW_VI_MAC_CMD_VIID) & M_FW_VI_MAC_CMD_VIID) + +#define S_FW_VI_MAC_CMD_VALID 15 +#define M_FW_VI_MAC_CMD_VALID 0x1 +#define V_FW_VI_MAC_CMD_VALID(x) ((x) << S_FW_VI_MAC_CMD_VALID) +#define G_FW_VI_MAC_CMD_VALID(x) \ + (((x) >> S_FW_VI_MAC_CMD_VALID) & M_FW_VI_MAC_CMD_VALID) +#define F_FW_VI_MAC_CMD_VALID V_FW_VI_MAC_CMD_VALID(1U) + +#define S_FW_VI_MAC_CMD_SMAC_RESULT 10 +#define M_FW_VI_MAC_CMD_SMAC_RESULT 0x3 +#define V_FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << S_FW_VI_MAC_CMD_SMAC_RESULT) +#define G_FW_VI_MAC_CMD_SMAC_RESULT(x) \ + (((x) >> S_FW_VI_MAC_CMD_SMAC_RESULT) & M_FW_VI_MAC_CMD_SMAC_RESULT) + +#define S_FW_VI_MAC_CMD_IDX 0 +#define M_FW_VI_MAC_CMD_IDX 0x3ff +#define V_FW_VI_MAC_CMD_IDX(x) ((x) << S_FW_VI_MAC_CMD_IDX) +#define G_FW_VI_MAC_CMD_IDX(x) \ + (((x) >> S_FW_VI_MAC_CMD_IDX) & M_FW_VI_MAC_CMD_IDX) + +struct fw_vi_rxmode_cmd { + __be32 op_to_viid; + __be32 retval_len16; + __be32 mtu_to_vlanexen; + __be32 r4_lo; +}; + +#define S_FW_VI_RXMODE_CMD_VIID 0 +#define M_FW_VI_RXMODE_CMD_VIID 0xfff +#define V_FW_VI_RXMODE_CMD_VIID(x) ((x) << S_FW_VI_RXMODE_CMD_VIID) +#define G_FW_VI_RXMODE_CMD_VIID(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_VIID) & M_FW_VI_RXMODE_CMD_VIID) + +#define S_FW_VI_RXMODE_CMD_MTU 16 +#define M_FW_VI_RXMODE_CMD_MTU 0xffff +#define V_FW_VI_RXMODE_CMD_MTU(x) ((x) << S_FW_VI_RXMODE_CMD_MTU) +#define G_FW_VI_RXMODE_CMD_MTU(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_MTU) & M_FW_VI_RXMODE_CMD_MTU) + +#define S_FW_VI_RXMODE_CMD_PROMISCEN 14 +#define M_FW_VI_RXMODE_CMD_PROMISCEN 0x3 +#define V_FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << S_FW_VI_RXMODE_CMD_PROMISCEN) +#define G_FW_VI_RXMODE_CMD_PROMISCEN(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_PROMISCEN) & M_FW_VI_RXMODE_CMD_PROMISCEN) + +#define S_FW_VI_RXMODE_CMD_ALLMULTIEN 12 +#define M_FW_VI_RXMODE_CMD_ALLMULTIEN 0x3 +#define V_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \ + ((x) << S_FW_VI_RXMODE_CMD_ALLMULTIEN) +#define G_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_ALLMULTIEN) & M_FW_VI_RXMODE_CMD_ALLMULTIEN) + +#define S_FW_VI_RXMODE_CMD_BROADCASTEN 10 +#define M_FW_VI_RXMODE_CMD_BROADCASTEN 0x3 +#define V_FW_VI_RXMODE_CMD_BROADCASTEN(x) \ + ((x) << S_FW_VI_RXMODE_CMD_BROADCASTEN) +#define G_FW_VI_RXMODE_CMD_BROADCASTEN(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_BROADCASTEN) & \ + M_FW_VI_RXMODE_CMD_BROADCASTEN) + +#define S_FW_VI_RXMODE_CMD_VLANEXEN 8 +#define M_FW_VI_RXMODE_CMD_VLANEXEN 0x3 +#define V_FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << S_FW_VI_RXMODE_CMD_VLANEXEN) +#define G_FW_VI_RXMODE_CMD_VLANEXEN(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_VLANEXEN) & M_FW_VI_RXMODE_CMD_VLANEXEN) + +struct fw_vi_enable_cmd { + __be32 op_to_viid; + __be32 ien_to_len16; + __be16 blinkdur; + __be16 r3; + __be32 r4; +}; + +#define S_FW_VI_ENABLE_CMD_VIID 0 +#define M_FW_VI_ENABLE_CMD_VIID 0xfff +#define V_FW_VI_ENABLE_CMD_VIID(x) ((x) << S_FW_VI_ENABLE_CMD_VIID) +#define G_FW_VI_ENABLE_CMD_VIID(x) \ + (((x) >> S_FW_VI_ENABLE_CMD_VIID) & M_FW_VI_ENABLE_CMD_VIID) + +#define S_FW_VI_ENABLE_CMD_IEN 31 +#define M_FW_VI_ENABLE_CMD_IEN 0x1 +#define V_FW_VI_ENABLE_CMD_IEN(x) ((x) << S_FW_VI_ENABLE_CMD_IEN) +#define G_FW_VI_ENABLE_CMD_IEN(x) \ + (((x) >> S_FW_VI_ENABLE_CMD_IEN) & M_FW_VI_ENABLE_CMD_IEN) +#define F_FW_VI_ENABLE_CMD_IEN V_FW_VI_ENABLE_CMD_IEN(1U) + +#define S_FW_VI_ENABLE_CMD_EEN 30 +#define M_FW_VI_ENABLE_CMD_EEN 0x1 +#define V_FW_VI_ENABLE_CMD_EEN(x) ((x) << S_FW_VI_ENABLE_CMD_EEN) +#define G_FW_VI_ENABLE_CMD_EEN(x) \ + (((x) >> S_FW_VI_ENABLE_CMD_EEN) & M_FW_VI_ENABLE_CMD_EEN) +#define F_FW_VI_ENABLE_CMD_EEN V_FW_VI_ENABLE_CMD_EEN(1U) + +#define S_FW_VI_ENABLE_CMD_DCB_INFO 28 +#define M_FW_VI_ENABLE_CMD_DCB_INFO 0x1 +#define V_FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << S_FW_VI_ENABLE_CMD_DCB_INFO) +#define G_FW_VI_ENABLE_CMD_DCB_INFO(x) \ + (((x) >> S_FW_VI_ENABLE_CMD_DCB_INFO) & M_FW_VI_ENABLE_CMD_DCB_INFO) +#define F_FW_VI_ENABLE_CMD_DCB_INFO V_FW_VI_ENABLE_CMD_DCB_INFO(1U) + +/* VI PF stats offset definitions */ +#define VI_PF_NUM_STATS 17 +enum fw_vi_stats_pf_index { + FW_VI_PF_STAT_TX_BCAST_BYTES_IX, + FW_VI_PF_STAT_TX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_MCAST_BYTES_IX, + FW_VI_PF_STAT_TX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_UCAST_BYTES_IX, + FW_VI_PF_STAT_TX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_OFLD_BYTES_IX, + FW_VI_PF_STAT_TX_OFLD_FRAMES_IX, + FW_VI_PF_STAT_RX_BYTES_IX, + FW_VI_PF_STAT_RX_FRAMES_IX, + FW_VI_PF_STAT_RX_BCAST_BYTES_IX, + FW_VI_PF_STAT_RX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_MCAST_BYTES_IX, + FW_VI_PF_STAT_RX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_UCAST_BYTES_IX, + FW_VI_PF_STAT_RX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_ERR_FRAMES_IX +}; + +struct fw_vi_stats_cmd { + __be32 op_to_viid; + __be32 retval_len16; + union fw_vi_stats { + struct fw_vi_stats_ctl { + __be16 nstats_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_vi_stats_pf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_pf_bytes; + __be64 rx_pf_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } pf; + struct fw_vi_stats_vf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_drop_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } vf; + } u; +}; + +/* port capabilities bitmap */ +enum fw_port_cap { + FW_PORT_CAP_SPEED_100M = 0x0001, + FW_PORT_CAP_SPEED_1G = 0x0002, + FW_PORT_CAP_SPEED_2_5G = 0x0004, + FW_PORT_CAP_SPEED_10G = 0x0008, + FW_PORT_CAP_SPEED_40G = 0x0010, + FW_PORT_CAP_SPEED_100G = 0x0020, + FW_PORT_CAP_FC_RX = 0x0040, + FW_PORT_CAP_FC_TX = 0x0080, + FW_PORT_CAP_ANEG = 0x0100, + FW_PORT_CAP_MDIX = 0x0200, + FW_PORT_CAP_MDIAUTO = 0x0400, + FW_PORT_CAP_FEC = 0x0800, + FW_PORT_CAP_TECHKR = 0x1000, + FW_PORT_CAP_TECHKX4 = 0x2000, + FW_PORT_CAP_802_3_PAUSE = 0x4000, + FW_PORT_CAP_802_3_ASM_DIR = 0x8000, +}; + +enum fw_port_mdi { + FW_PORT_CAP_MDI_AUTO, +}; + +#define S_FW_PORT_CAP_MDI 9 +#define M_FW_PORT_CAP_MDI 3 +#define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI) +#define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI) + +enum fw_port_action { + FW_PORT_ACTION_L1_CFG = 0x0001, + FW_PORT_ACTION_GET_PORT_INFO = 0x0003, +}; + +struct fw_port_cmd { + __be32 op_to_portid; + __be32 action_to_len16; + union fw_port { + struct fw_port_l1cfg { + __be32 rcap; + __be32 r; + } l1cfg; + struct fw_port_l2cfg { + __u8 ctlbf; + __u8 ovlan3_to_ivlan0; + __be16 ivlantype; + __be16 txipg_force_pinfo; + __be16 mtu; + __be16 ovlan0mask; + __be16 ovlan0type; + __be16 ovlan1mask; + __be16 ovlan1type; + __be16 ovlan2mask; + __be16 ovlan2type; + __be16 ovlan3mask; + __be16 ovlan3type; + } l2cfg; + struct fw_port_info { + __be32 lstatus_to_modtype; + __be16 pcap; + __be16 acap; + __be16 mtu; + __u8 cbllen; + __u8 auxlinfo; + __u8 dcbxdis_pkd; + __u8 r8_lo; + __be16 lpacap; + __be64 r9; + } info; + struct fw_port_diags { + __u8 diagop; + __u8 r[3]; + __be32 diagval; + } diags; + union fw_port_dcb { + struct fw_port_dcb_pgid { + __u8 type; + __u8 apply_pkd; + __u8 r10_lo[2]; + __be32 pgid; + __be64 r11; + } pgid; + struct fw_port_dcb_pgrate { + __u8 type; + __u8 apply_pkd; + __u8 r10_lo[5]; + __u8 num_tcs_supported; + __u8 pgrate[8]; + __u8 tsa[8]; + } pgrate; + struct fw_port_dcb_priorate { + __u8 type; + __u8 apply_pkd; + __u8 r10_lo[6]; + __u8 strict_priorate[8]; + } priorate; + struct fw_port_dcb_pfc { + __u8 type; + __u8 pfcen; + __u8 r10[5]; + __u8 max_pfc_tcs; + __be64 r11; + } pfc; + struct fw_port_app_priority { + __u8 type; + __u8 r10[2]; + __u8 idx; + __u8 user_prio_map; + __u8 sel_field; + __be16 protocolid; + __be64 r12; + } app_priority; + struct fw_port_dcb_control { + __u8 type; + __u8 all_syncd_pkd; + __be16 dcb_version_to_app_state; + __be32 r11; + __be64 r12; + } control; + } dcb; + } u; +}; + +#define S_FW_PORT_CMD_PORTID 0 +#define M_FW_PORT_CMD_PORTID 0xf +#define V_FW_PORT_CMD_PORTID(x) ((x) << S_FW_PORT_CMD_PORTID) +#define G_FW_PORT_CMD_PORTID(x) \ + (((x) >> S_FW_PORT_CMD_PORTID) & M_FW_PORT_CMD_PORTID) + +#define S_FW_PORT_CMD_ACTION 16 +#define M_FW_PORT_CMD_ACTION 0xffff +#define V_FW_PORT_CMD_ACTION(x) ((x) << S_FW_PORT_CMD_ACTION) +#define G_FW_PORT_CMD_ACTION(x) \ + (((x) >> S_FW_PORT_CMD_ACTION) & M_FW_PORT_CMD_ACTION) + +#define S_FW_PORT_CMD_LSTATUS 31 +#define M_FW_PORT_CMD_LSTATUS 0x1 +#define V_FW_PORT_CMD_LSTATUS(x) ((x) << S_FW_PORT_CMD_LSTATUS) +#define G_FW_PORT_CMD_LSTATUS(x) \ + (((x) >> S_FW_PORT_CMD_LSTATUS) & M_FW_PORT_CMD_LSTATUS) +#define F_FW_PORT_CMD_LSTATUS V_FW_PORT_CMD_LSTATUS(1U) + +#define S_FW_PORT_CMD_LSPEED 24 +#define M_FW_PORT_CMD_LSPEED 0x3f +#define V_FW_PORT_CMD_LSPEED(x) ((x) << S_FW_PORT_CMD_LSPEED) +#define G_FW_PORT_CMD_LSPEED(x) \ + (((x) >> S_FW_PORT_CMD_LSPEED) & M_FW_PORT_CMD_LSPEED) + +#define S_FW_PORT_CMD_TXPAUSE 23 +#define M_FW_PORT_CMD_TXPAUSE 0x1 +#define V_FW_PORT_CMD_TXPAUSE(x) ((x) << S_FW_PORT_CMD_TXPAUSE) +#define G_FW_PORT_CMD_TXPAUSE(x) \ + (((x) >> S_FW_PORT_CMD_TXPAUSE) & M_FW_PORT_CMD_TXPAUSE) +#define F_FW_PORT_CMD_TXPAUSE V_FW_PORT_CMD_TXPAUSE(1U) + +#define S_FW_PORT_CMD_RXPAUSE 22 +#define M_FW_PORT_CMD_RXPAUSE 0x1 +#define V_FW_PORT_CMD_RXPAUSE(x) ((x) << S_FW_PORT_CMD_RXPAUSE) +#define G_FW_PORT_CMD_RXPAUSE(x) \ + (((x) >> S_FW_PORT_CMD_RXPAUSE) & M_FW_PORT_CMD_RXPAUSE) +#define F_FW_PORT_CMD_RXPAUSE V_FW_PORT_CMD_RXPAUSE(1U) + +#define S_FW_PORT_CMD_MDIOCAP 21 +#define M_FW_PORT_CMD_MDIOCAP 0x1 +#define V_FW_PORT_CMD_MDIOCAP(x) ((x) << S_FW_PORT_CMD_MDIOCAP) +#define G_FW_PORT_CMD_MDIOCAP(x) \ + (((x) >> S_FW_PORT_CMD_MDIOCAP) & M_FW_PORT_CMD_MDIOCAP) +#define F_FW_PORT_CMD_MDIOCAP V_FW_PORT_CMD_MDIOCAP(1U) + +#define S_FW_PORT_CMD_MDIOADDR 16 +#define M_FW_PORT_CMD_MDIOADDR 0x1f +#define V_FW_PORT_CMD_MDIOADDR(x) ((x) << S_FW_PORT_CMD_MDIOADDR) +#define G_FW_PORT_CMD_MDIOADDR(x) \ + (((x) >> S_FW_PORT_CMD_MDIOADDR) & M_FW_PORT_CMD_MDIOADDR) + +#define S_FW_PORT_CMD_PTYPE 8 +#define M_FW_PORT_CMD_PTYPE 0x1f +#define V_FW_PORT_CMD_PTYPE(x) ((x) << S_FW_PORT_CMD_PTYPE) +#define G_FW_PORT_CMD_PTYPE(x) \ + (((x) >> S_FW_PORT_CMD_PTYPE) & M_FW_PORT_CMD_PTYPE) + +#define S_FW_PORT_CMD_LINKDNRC 5 +#define M_FW_PORT_CMD_LINKDNRC 0x7 +#define V_FW_PORT_CMD_LINKDNRC(x) ((x) << S_FW_PORT_CMD_LINKDNRC) +#define G_FW_PORT_CMD_LINKDNRC(x) \ + (((x) >> S_FW_PORT_CMD_LINKDNRC) & M_FW_PORT_CMD_LINKDNRC) + +#define S_FW_PORT_CMD_MODTYPE 0 +#define M_FW_PORT_CMD_MODTYPE 0x1f +#define V_FW_PORT_CMD_MODTYPE(x) ((x) << S_FW_PORT_CMD_MODTYPE) +#define G_FW_PORT_CMD_MODTYPE(x) \ + (((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE) + +/* + * These are configured into the VPD and hence tools that generate + * VPD may use this enumeration. + * extPHY #lanes T4_I2C extI2C BP_Eq BP_ANEG Speed + * + * REMEMBER: + * Update the Common Code t4_hw.c:t4_get_port_type_description() + * with any new Firmware Port Technology Types! + */ +enum fw_port_type { + FW_PORT_TYPE_FIBER_XFI = 0, /* Y, 1, N, Y, N, N, 10G */ + FW_PORT_TYPE_FIBER_XAUI = 1, /* Y, 4, N, Y, N, N, 10G */ + FW_PORT_TYPE_BT_SGMII = 2, /* Y, 1, No, No, No, No, 1G/100M */ + FW_PORT_TYPE_BT_XFI = 3, /* Y, 1, No, No, No, No, 10G */ + FW_PORT_TYPE_BT_XAUI = 4, /* Y, 4, No, No, No, No, 10G/1G/100M? */ + FW_PORT_TYPE_KX4 = 5, /* No, 4, No, No, Yes, Yes, 10G */ + FW_PORT_TYPE_CX4 = 6, /* No, 4, No, No, No, No, 10G */ + FW_PORT_TYPE_KX = 7, /* No, 1, No, No, Yes, No, 1G */ + FW_PORT_TYPE_KR = 8, /* No, 1, No, No, Yes, Yes, 10G */ + FW_PORT_TYPE_SFP = 9, /* No, 1, Yes, No, No, No, 10G */ + FW_PORT_TYPE_BP_AP = 10, + /* No, 1, No, No, Yes, Yes, 10G, BP ANGE */ + FW_PORT_TYPE_BP4_AP = 11, + /* No, 4, No, No, Yes, Yes, 10G, BP ANGE */ + FW_PORT_TYPE_QSFP_10G = 12, /* No, 1, Yes, No, No, No, 10G */ + FW_PORT_TYPE_QSA = 13, /* No, 1, Yes, No, No, No, 10G */ + FW_PORT_TYPE_QSFP = 14, /* No, 4, Yes, No, No, No, 40G */ + FW_PORT_TYPE_BP40_BA = 15, + /* No, 4, No, No, Yes, Yes, 40G/10G/1G, BP ANGE */ + + FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE +}; + +/* These are read from module's EEPROM and determined once the + * module is inserted. + */ +enum fw_port_module_type { + FW_PORT_MOD_TYPE_NA = 0x0, + FW_PORT_MOD_TYPE_LR = 0x1, + FW_PORT_MOD_TYPE_SR = 0x2, + FW_PORT_MOD_TYPE_ER = 0x3, + FW_PORT_MOD_TYPE_TWINAX_PASSIVE = 0x4, + FW_PORT_MOD_TYPE_TWINAX_ACTIVE = 0x5, + FW_PORT_MOD_TYPE_LRM = 0x6, + FW_PORT_MOD_TYPE_ERROR = M_FW_PORT_CMD_MODTYPE - 3, + FW_PORT_MOD_TYPE_UNKNOWN = M_FW_PORT_CMD_MODTYPE - 2, + FW_PORT_MOD_TYPE_NOTSUPPORTED = M_FW_PORT_CMD_MODTYPE - 1, + FW_PORT_MOD_TYPE_NONE = M_FW_PORT_CMD_MODTYPE +}; + +/* used by FW and tools may use this to generate VPD */ +enum fw_port_mod_sub_type { + FW_PORT_MOD_SUB_TYPE_NA, + FW_PORT_MOD_SUB_TYPE_MV88E114X = 0x1, + FW_PORT_MOD_SUB_TYPE_TN8022 = 0x2, + FW_PORT_MOD_SUB_TYPE_AQ1202 = 0x3, + FW_PORT_MOD_SUB_TYPE_88x3120 = 0x4, + FW_PORT_MOD_SUB_TYPE_BCM84834 = 0x5, + FW_PORT_MOD_SUB_TYPE_BCM5482 = 0x6, + FW_PORT_MOD_SUB_TYPE_BCM84856 = 0x7, + FW_PORT_MOD_SUB_TYPE_BT_VSC8634 = 0x8, + + /* + * The following will never been in the VPD. They are TWINAX cable + * lengths decoded from SFP+ module i2c PROMs. These should almost + * certainly go somewhere else ... + */ + FW_PORT_MOD_SUB_TYPE_TWINAX_1 = 0x9, + FW_PORT_MOD_SUB_TYPE_TWINAX_3 = 0xA, + FW_PORT_MOD_SUB_TYPE_TWINAX_5 = 0xB, + FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC, +}; + +/* link down reason codes (3b) */ +enum fw_port_link_dn_rc { + FW_PORT_LINK_DN_RC_NONE, + FW_PORT_LINK_DN_RC_REMFLT, /* Remote fault detected */ + FW_PORT_LINK_DN_ANEG_F, /* Auto-negotiation fault */ + FW_PORT_LINK_DN_RESERVED3, + FW_PORT_LINK_DN_OVERHEAT, /* Port overheated */ + FW_PORT_LINK_DN_UNKNOWN, /* Unable to determine reason */ + FW_PORT_LINK_DN_RX_LOS, /* No RX signal detected */ + FW_PORT_LINK_DN_RESERVED7 +}; + +/* port stats */ +#define FW_NUM_PORT_STATS 50 +#define FW_NUM_PORT_TX_STATS 23 +#define FW_NUM_PORT_RX_STATS 27 + +enum fw_port_stats_tx_index { + FW_STAT_TX_PORT_BYTES_IX, + FW_STAT_TX_PORT_FRAMES_IX, + FW_STAT_TX_PORT_BCAST_IX, + FW_STAT_TX_PORT_MCAST_IX, + FW_STAT_TX_PORT_UCAST_IX, + FW_STAT_TX_PORT_ERROR_IX, + FW_STAT_TX_PORT_64B_IX, + FW_STAT_TX_PORT_65B_127B_IX, + FW_STAT_TX_PORT_128B_255B_IX, + FW_STAT_TX_PORT_256B_511B_IX, + FW_STAT_TX_PORT_512B_1023B_IX, + FW_STAT_TX_PORT_1024B_1518B_IX, + FW_STAT_TX_PORT_1519B_MAX_IX, + FW_STAT_TX_PORT_DROP_IX, + FW_STAT_TX_PORT_PAUSE_IX, + FW_STAT_TX_PORT_PPP0_IX, + FW_STAT_TX_PORT_PPP1_IX, + FW_STAT_TX_PORT_PPP2_IX, + FW_STAT_TX_PORT_PPP3_IX, + FW_STAT_TX_PORT_PPP4_IX, + FW_STAT_TX_PORT_PPP5_IX, + FW_STAT_TX_PORT_PPP6_IX, + FW_STAT_TX_PORT_PPP7_IX +}; + +enum fw_port_stat_rx_index { + FW_STAT_RX_PORT_BYTES_IX, + FW_STAT_RX_PORT_FRAMES_IX, + FW_STAT_RX_PORT_BCAST_IX, + FW_STAT_RX_PORT_MCAST_IX, + FW_STAT_RX_PORT_UCAST_IX, + FW_STAT_RX_PORT_MTU_ERROR_IX, + FW_STAT_RX_PORT_MTU_CRC_ERROR_IX, + FW_STAT_RX_PORT_CRC_ERROR_IX, + FW_STAT_RX_PORT_LEN_ERROR_IX, + FW_STAT_RX_PORT_SYM_ERROR_IX, + FW_STAT_RX_PORT_64B_IX, + FW_STAT_RX_PORT_65B_127B_IX, + FW_STAT_RX_PORT_128B_255B_IX, + FW_STAT_RX_PORT_256B_511B_IX, + FW_STAT_RX_PORT_512B_1023B_IX, + FW_STAT_RX_PORT_1024B_1518B_IX, + FW_STAT_RX_PORT_1519B_MAX_IX, + FW_STAT_RX_PORT_PAUSE_IX, + FW_STAT_RX_PORT_PPP0_IX, + FW_STAT_RX_PORT_PPP1_IX, + FW_STAT_RX_PORT_PPP2_IX, + FW_STAT_RX_PORT_PPP3_IX, + FW_STAT_RX_PORT_PPP4_IX, + FW_STAT_RX_PORT_PPP5_IX, + FW_STAT_RX_PORT_PPP6_IX, + FW_STAT_RX_PORT_PPP7_IX, + FW_STAT_RX_PORT_LESS_64B_IX +}; + +struct fw_port_stats_cmd { + __be32 op_to_portid; + __be32 retval_len16; + union fw_port_stats { + struct fw_port_stats_ctl { + __u8 nstats_bg_bm; + __u8 tx_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_port_stats_all { + __be64 tx_bytes; + __be64 tx_frames; + __be64 tx_bcast; + __be64 tx_mcast; + __be64 tx_ucast; + __be64 tx_error; + __be64 tx_64b; + __be64 tx_65b_127b; + __be64 tx_128b_255b; + __be64 tx_256b_511b; + __be64 tx_512b_1023b; + __be64 tx_1024b_1518b; + __be64 tx_1519b_max; + __be64 tx_drop; + __be64 tx_pause; + __be64 tx_ppp0; + __be64 tx_ppp1; + __be64 tx_ppp2; + __be64 tx_ppp3; + __be64 tx_ppp4; + __be64 tx_ppp5; + __be64 tx_ppp6; + __be64 tx_ppp7; + __be64 rx_bytes; + __be64 rx_frames; + __be64 rx_bcast; + __be64 rx_mcast; + __be64 rx_ucast; + __be64 rx_mtu_error; + __be64 rx_mtu_crc_error; + __be64 rx_crc_error; + __be64 rx_len_error; + __be64 rx_sym_error; + __be64 rx_64b; + __be64 rx_65b_127b; + __be64 rx_128b_255b; + __be64 rx_256b_511b; + __be64 rx_512b_1023b; + __be64 rx_1024b_1518b; + __be64 rx_1519b_max; + __be64 rx_pause; + __be64 rx_ppp0; + __be64 rx_ppp1; + __be64 rx_ppp2; + __be64 rx_ppp3; + __be64 rx_ppp4; + __be64 rx_ppp5; + __be64 rx_ppp6; + __be64 rx_ppp7; + __be64 rx_less_64b; + __be64 rx_bg_drop; + __be64 rx_bg_trunc; + } all; + } u; +}; + +struct fw_rss_ind_tbl_cmd { + __be32 op_to_viid; + __be32 retval_len16; + __be16 niqid; + __be16 startidx; + __be32 r3; + __be32 iq0_to_iq2; + __be32 iq3_to_iq5; + __be32 iq6_to_iq8; + __be32 iq9_to_iq11; + __be32 iq12_to_iq14; + __be32 iq15_to_iq17; + __be32 iq18_to_iq20; + __be32 iq21_to_iq23; + __be32 iq24_to_iq26; + __be32 iq27_to_iq29; + __be32 iq30_iq31; + __be32 r15_lo; +}; + +#define S_FW_RSS_IND_TBL_CMD_VIID 0 +#define M_FW_RSS_IND_TBL_CMD_VIID 0xfff +#define V_FW_RSS_IND_TBL_CMD_VIID(x) ((x) << S_FW_RSS_IND_TBL_CMD_VIID) +#define G_FW_RSS_IND_TBL_CMD_VIID(x) \ + (((x) >> S_FW_RSS_IND_TBL_CMD_VIID) & M_FW_RSS_IND_TBL_CMD_VIID) + +#define S_FW_RSS_IND_TBL_CMD_IQ0 20 +#define M_FW_RSS_IND_TBL_CMD_IQ0 0x3ff +#define V_FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ0) +#define G_FW_RSS_IND_TBL_CMD_IQ0(x) \ + (((x) >> S_FW_RSS_IND_TBL_CMD_IQ0) & M_FW_RSS_IND_TBL_CMD_IQ0) + +#define S_FW_RSS_IND_TBL_CMD_IQ1 10 +#define M_FW_RSS_IND_TBL_CMD_IQ1 0x3ff +#define V_FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ1) +#define G_FW_RSS_IND_TBL_CMD_IQ1(x) \ + (((x) >> S_FW_RSS_IND_TBL_CMD_IQ1) & M_FW_RSS_IND_TBL_CMD_IQ1) + +#define S_FW_RSS_IND_TBL_CMD_IQ2 0 +#define M_FW_RSS_IND_TBL_CMD_IQ2 0x3ff +#define V_FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ2) +#define G_FW_RSS_IND_TBL_CMD_IQ2(x) \ + (((x) >> S_FW_RSS_IND_TBL_CMD_IQ2) & M_FW_RSS_IND_TBL_CMD_IQ2) + +struct fw_rss_vi_config_cmd { + __be32 op_to_viid; + __be32 retval_len16; + union fw_rss_vi_config { + struct fw_rss_vi_config_manual { + __be64 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_vi_config_basicvirtual { + __be32 r6; + __be32 defaultq_to_udpen; + __be64 r9; + __be64 r10; + } basicvirtual; + } u; +}; + +#define S_FW_RSS_VI_CONFIG_CMD_VIID 0 +#define M_FW_RSS_VI_CONFIG_CMD_VIID 0xfff +#define V_FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_VIID) +#define G_FW_RSS_VI_CONFIG_CMD_VIID(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_VIID) & M_FW_RSS_VI_CONFIG_CMD_VIID) + +#define S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 16 +#define M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 0x3ff +#define V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) +#define G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) & \ + M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) + +#define S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 4 +#define M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) +#define G_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) & \ + M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) +#define F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN \ + V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(1U) + +#define S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 3 +#define M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) +#define G_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) & \ + M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) +#define F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN \ + V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(1U) + +#define S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 2 +#define M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) +#define G_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) & \ + M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) +#define F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN \ + V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(1U) + +#define S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 1 +#define M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) +#define G_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) & \ + M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) +#define F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN \ + V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(1U) + +#define S_FW_RSS_VI_CONFIG_CMD_UDPEN 0 +#define M_FW_RSS_VI_CONFIG_CMD_UDPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_UDPEN(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_UDPEN) +#define G_FW_RSS_VI_CONFIG_CMD_UDPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_UDPEN) & M_FW_RSS_VI_CONFIG_CMD_UDPEN) +#define F_FW_RSS_VI_CONFIG_CMD_UDPEN V_FW_RSS_VI_CONFIG_CMD_UDPEN(1U) + +/****************************************************************************** + * D E B U G C O M M A N D s + ******************************************************/ + +struct fw_debug_cmd { + __be32 op_type; + __be32 len16_pkd; + union fw_debug { + struct fw_debug_assert { + __be32 fcid; + __be32 line; + __be32 x; + __be32 y; + __u8 filename_0_7[8]; + __u8 filename_8_15[8]; + __be64 r3; + } assert; + struct fw_debug_prt { + __be16 dprtstridx; + __be16 r3[3]; + __be32 dprtstrparam0; + __be32 dprtstrparam1; + __be32 dprtstrparam2; + __be32 dprtstrparam3; + } prt; + } u; +}; + +#define S_FW_DEBUG_CMD_TYPE 0 +#define M_FW_DEBUG_CMD_TYPE 0xff +#define V_FW_DEBUG_CMD_TYPE(x) ((x) << S_FW_DEBUG_CMD_TYPE) +#define G_FW_DEBUG_CMD_TYPE(x) \ + (((x) >> S_FW_DEBUG_CMD_TYPE) & M_FW_DEBUG_CMD_TYPE) + +/****************************************************************************** + * P C I E F W R E G I S T E R + **************************************/ + +/* + * Register definitions for the PCIE_FW register which the firmware uses + * to retain status across RESETs. This register should be considered + * as a READ-ONLY register for Host Software and only to be used to + * track firmware initialization/error state, etc. + */ +#define S_PCIE_FW_ERR 31 +#define M_PCIE_FW_ERR 0x1 +#define V_PCIE_FW_ERR(x) ((x) << S_PCIE_FW_ERR) +#define G_PCIE_FW_ERR(x) (((x) >> S_PCIE_FW_ERR) & M_PCIE_FW_ERR) +#define F_PCIE_FW_ERR V_PCIE_FW_ERR(1U) + +#define S_PCIE_FW_INIT 30 +#define M_PCIE_FW_INIT 0x1 +#define V_PCIE_FW_INIT(x) ((x) << S_PCIE_FW_INIT) +#define G_PCIE_FW_INIT(x) (((x) >> S_PCIE_FW_INIT) & M_PCIE_FW_INIT) +#define F_PCIE_FW_INIT V_PCIE_FW_INIT(1U) + +#define S_PCIE_FW_HALT 29 +#define M_PCIE_FW_HALT 0x1 +#define V_PCIE_FW_HALT(x) ((x) << S_PCIE_FW_HALT) +#define G_PCIE_FW_HALT(x) (((x) >> S_PCIE_FW_HALT) & M_PCIE_FW_HALT) +#define F_PCIE_FW_HALT V_PCIE_FW_HALT(1U) + +#define S_PCIE_FW_EVAL 24 +#define M_PCIE_FW_EVAL 0x7 +#define V_PCIE_FW_EVAL(x) ((x) << S_PCIE_FW_EVAL) +#define G_PCIE_FW_EVAL(x) (((x) >> S_PCIE_FW_EVAL) & M_PCIE_FW_EVAL) + +#define S_PCIE_FW_MASTER_VLD 15 +#define M_PCIE_FW_MASTER_VLD 0x1 +#define V_PCIE_FW_MASTER_VLD(x) ((x) << S_PCIE_FW_MASTER_VLD) +#define G_PCIE_FW_MASTER_VLD(x) \ + (((x) >> S_PCIE_FW_MASTER_VLD) & M_PCIE_FW_MASTER_VLD) +#define F_PCIE_FW_MASTER_VLD V_PCIE_FW_MASTER_VLD(1U) + +#define S_PCIE_FW_MASTER 12 +#define M_PCIE_FW_MASTER 0x7 +#define V_PCIE_FW_MASTER(x) ((x) << S_PCIE_FW_MASTER) +#define G_PCIE_FW_MASTER(x) (((x) >> S_PCIE_FW_MASTER) & M_PCIE_FW_MASTER) + +/****************************************************************************** + * B I N A R Y H E A D E R F O R M A T + **********************************************/ + +/* + * firmware binary header format + */ +struct fw_hdr { + __u8 ver; + __u8 chip; /* terminator chip family */ + __be16 len512; /* bin length in units of 512-bytes */ + __be32 fw_ver; /* firmware version */ + __be32 tp_microcode_ver; /* tcp processor microcode version */ + __u8 intfver_nic; + __u8 intfver_vnic; + __u8 intfver_ofld; + __u8 intfver_ri; + __u8 intfver_iscsipdu; + __u8 intfver_iscsi; + __u8 intfver_fcoepdu; + __u8 intfver_fcoe; + __u32 reserved2; + __u32 reserved3; + __u32 magic; /* runtime or bootstrap fw */ + __be32 flags; + __be32 reserved6[23]; +}; + +#define S_FW_HDR_FW_VER_MAJOR 24 +#define M_FW_HDR_FW_VER_MAJOR 0xff +#define V_FW_HDR_FW_VER_MAJOR(x) \ + ((x) << S_FW_HDR_FW_VER_MAJOR) +#define G_FW_HDR_FW_VER_MAJOR(x) \ + (((x) >> S_FW_HDR_FW_VER_MAJOR) & M_FW_HDR_FW_VER_MAJOR) + +#define S_FW_HDR_FW_VER_MINOR 16 +#define M_FW_HDR_FW_VER_MINOR 0xff +#define V_FW_HDR_FW_VER_MINOR(x) \ + ((x) << S_FW_HDR_FW_VER_MINOR) +#define G_FW_HDR_FW_VER_MINOR(x) \ + (((x) >> S_FW_HDR_FW_VER_MINOR) & M_FW_HDR_FW_VER_MINOR) + +#define S_FW_HDR_FW_VER_MICRO 8 +#define M_FW_HDR_FW_VER_MICRO 0xff +#define V_FW_HDR_FW_VER_MICRO(x) \ + ((x) << S_FW_HDR_FW_VER_MICRO) +#define G_FW_HDR_FW_VER_MICRO(x) \ + (((x) >> S_FW_HDR_FW_VER_MICRO) & M_FW_HDR_FW_VER_MICRO) + +#define S_FW_HDR_FW_VER_BUILD 0 +#define M_FW_HDR_FW_VER_BUILD 0xff +#define V_FW_HDR_FW_VER_BUILD(x) \ + ((x) << S_FW_HDR_FW_VER_BUILD) +#define G_FW_HDR_FW_VER_BUILD(x) \ + (((x) >> S_FW_HDR_FW_VER_BUILD) & M_FW_HDR_FW_VER_BUILD) + +#endif /* _T4FW_INTERFACE_H_ */ diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h new file mode 100644 index 00000000..0201c990 --- /dev/null +++ b/drivers/net/cxgbe/cxgbe.h @@ -0,0 +1,63 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CXGBE_H_ +#define _CXGBE_H_ + +#include "common.h" +#include "t4_regs.h" + +#define CXGBE_MIN_RING_DESC_SIZE 128 /* Min TX/RX descriptor ring size */ +#define CXGBE_MAX_RING_DESC_SIZE 4096 /* Max TX/RX descriptor ring size */ + +#define CXGBE_DEFAULT_TX_DESC_SIZE 1024 /* Default TX ring size */ +#define CXGBE_DEFAULT_RX_DESC_SIZE 1024 /* Default RX ring size */ + +#define CXGBE_MIN_RX_BUFSIZE ETHER_MIN_MTU /* min buf size */ +#define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */ + +int cxgbe_probe(struct adapter *adapter); +int cxgbe_up(struct adapter *adap); +int cxgbe_down(struct port_info *pi); +void cxgbe_close(struct adapter *adapter); +void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats); +void cxgbe_stats_reset(struct port_info *pi); +int link_start(struct port_info *pi); +void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us, + unsigned int cnt, unsigned int size, unsigned int iqe_size); +int setup_sge_fwevtq(struct adapter *adapter); +void cfg_queues(struct rte_eth_dev *eth_dev); +int cfg_queue_count(struct rte_eth_dev *eth_dev); +int setup_rss(struct port_info *pi); + +#endif /* _CXGBE_H_ */ diff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h new file mode 100644 index 00000000..e68f8f59 --- /dev/null +++ b/drivers/net/cxgbe/cxgbe_compat.h @@ -0,0 +1,266 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CXGBE_COMPAT_H_ +#define _CXGBE_COMPAT_H_ + +#include <string.h> +#include <stdint.h> +#include <stdio.h> +#include <stdarg.h> + +#include <rte_common.h> +#include <rte_memcpy.h> +#include <rte_byteorder.h> +#include <rte_cycles.h> +#include <rte_spinlock.h> +#include <rte_log.h> + +#define dev_printf(level, fmt, args...) \ + RTE_LOG(level, PMD, "rte_cxgbe_pmd: " fmt, ## args) + +#define dev_err(x, args...) dev_printf(ERR, args) +#define dev_info(x, args...) dev_printf(INFO, args) +#define dev_warn(x, args...) dev_printf(WARNING, args) + +#ifdef RTE_LIBRTE_CXGBE_DEBUG +#define dev_debug(x, args...) dev_printf(DEBUG, args) +#else +#define dev_debug(x, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_CXGBE_DEBUG_REG +#define CXGBE_DEBUG_REG(x, args...) dev_printf(DEBUG, "REG:" args) +#else +#define CXGBE_DEBUG_REG(x, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_CXGBE_DEBUG_MBOX +#define CXGBE_DEBUG_MBOX(x, args...) dev_printf(DEBUG, "MBOX:" args) +#else +#define CXGBE_DEBUG_MBOX(x, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_CXGBE_DEBUG_TX +#define CXGBE_DEBUG_TX(x, args...) dev_printf(DEBUG, "TX:" args) +#else +#define CXGBE_DEBUG_TX(x, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_CXGBE_DEBUG_RX +#define CXGBE_DEBUG_RX(x, args...) dev_printf(DEBUG, "RX:" args) +#else +#define CXGBE_DEBUG_RX(x, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_CXGBE_DEBUG +#define CXGBE_FUNC_TRACE() \ + RTE_LOG(DEBUG, PMD, "CXGBE trace: %s\n", __func__) +#else +#define CXGBE_FUNC_TRACE() do { } while (0) +#endif + +#define pr_err(y, args...) dev_err(0, y, ##args) +#define pr_warn(y, args...) dev_warn(0, y, ##args) +#define pr_info(y, args...) dev_info(0, y, ##args) +#define BUG() pr_err("BUG at %s:%d", __func__, __LINE__) + +#define ASSERT(x) do {\ + if (!(x)) \ + rte_panic("CXGBE: x"); \ +} while (0) +#define BUG_ON(x) ASSERT(!(x)) + +#ifndef WARN_ON +#define WARN_ON(x) do { \ + int ret = !!(x); \ + if (unlikely(ret)) \ + pr_warn("WARN_ON: \"" #x "\" at %s:%d\n", __func__, __LINE__); \ +} while (0) +#endif + +#define __iomem + +#ifndef BIT +#define BIT(n) (1 << (n)) +#endif + +#define L1_CACHE_SHIFT 6 +#define L1_CACHE_BYTES BIT(L1_CACHE_SHIFT) + +#define PAGE_SHIFT 12 +#define CXGBE_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1)) +#define PTR_ALIGN(p, a) ((typeof(p))CXGBE_ALIGN((unsigned long)(p), (a))) + +#define VLAN_HLEN 4 + +#define rmb() rte_rmb() /* dpdk rte provided rmb */ +#define wmb() rte_wmb() /* dpdk rte provided wmb */ + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef int32_t s32; +typedef uint64_t u64; +typedef int bool; +typedef uint64_t dma_addr_t; + +#ifndef __le16 +#define __le16 uint16_t +#endif +#ifndef __le32 +#define __le32 uint32_t +#endif +#ifndef __le64 +#define __le64 uint64_t +#endif +#ifndef __be16 +#define __be16 uint16_t +#endif +#ifndef __be32 +#define __be32 uint32_t +#endif +#ifndef __be64 +#define __be64 uint64_t +#endif +#ifndef __u8 +#define __u8 uint8_t +#endif +#ifndef __u16 +#define __u16 uint16_t +#endif +#ifndef __u32 +#define __u32 uint32_t +#endif +#ifndef __u64 +#define __u64 uint64_t +#endif + +#define FALSE 0 +#define TRUE 1 +#define false 0 +#define true 1 + +#define min(a, b) RTE_MIN(a, b) +#define max(a, b) RTE_MAX(a, b) + +/* + * round up val _p to a power of 2 size _s + */ +#define cxgbe_roundup(_p, _s) (((unsigned long)(_p) + (_s - 1)) & ~(_s - 1)) + +#undef container_of +#define container_of(ptr, type, member) ({ \ + typeof(((type *)0)->member)(*__mptr) = (ptr); \ + (type *)((char *)__mptr - offsetof(type, member)); }) + +#define ARRAY_SIZE(arr) RTE_DIM(arr) + +#define cpu_to_be16(o) rte_cpu_to_be_16(o) +#define cpu_to_be32(o) rte_cpu_to_be_32(o) +#define cpu_to_be64(o) rte_cpu_to_be_64(o) +#define cpu_to_le32(o) rte_cpu_to_le_32(o) +#define be16_to_cpu(o) rte_be_to_cpu_16(o) +#define be32_to_cpu(o) rte_be_to_cpu_32(o) +#define be64_to_cpu(o) rte_be_to_cpu_64(o) +#define le32_to_cpu(o) rte_le_to_cpu_32(o) + +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#define DELAY(x) rte_delay_us(x) +#define udelay(x) DELAY(x) +#define msleep(x) DELAY(1000 * (x)) +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) + +static inline uint8_t hweight32(uint32_t word32) +{ + uint32_t res = word32 - ((word32 >> 1) & 0x55555555); + + res = (res & 0x33333333) + ((res >> 2) & 0x33333333); + res = (res + (res >> 4)) & 0x0F0F0F0F; + res = res + (res >> 8); + return (res + (res >> 16)) & 0x000000FF; + +} /* weight32 */ + +/** + * cxgbe_fls - find last (most-significant) bit set + * @x: the word to search + * + * This is defined the same way as ffs. + * Note cxgbe_fls(0) = 0, cxgbe_fls(1) = 1, cxgbe_fls(0x80000000) = 32. + */ +static inline int cxgbe_fls(int x) +{ + return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; +} + +static inline unsigned long ilog2(unsigned long n) +{ + unsigned int e = 0; + + while (n) { + if (n & ~((1 << 8) - 1)) { + e += 8; + n >>= 8; + continue; + } + + if (n & ~((1 << 4) - 1)) { + e += 4; + n >>= 4; + } + + for (;;) { + n >>= 1; + if (n == 0) + break; + e++; + } + } + + return e; +} + +static inline void writel(unsigned int val, volatile void __iomem *addr) +{ + *(volatile unsigned int *)addr = val; +} + +static inline void writeq(u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, (void *)((uintptr_t)addr + 4)); +} + +#endif /* _CXGBE_COMPAT_H_ */ diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c new file mode 100644 index 00000000..bb134e50 --- /dev/null +++ b/drivers/net/cxgbe/cxgbe_ethdev.c @@ -0,0 +1,898 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2016 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <stdarg.h> +#include <inttypes.h> +#include <netinet/in.h> + +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_interrupts.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_pci.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_tailq.h> +#include <rte_eal.h> +#include <rte_alarm.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_atomic.h> +#include <rte_malloc.h> +#include <rte_random.h> +#include <rte_dev.h> + +#include "cxgbe.h" + +/* + * Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static struct rte_pci_id cxgb4_pci_tbl[] = { +#define CH_PCI_DEVICE_ID_FUNCTION 0x4 + +#define PCI_VENDOR_ID_CHELSIO 0x1425 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) } + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ + { .vendor_id = 0, } \ + } + +/* + *... and the PCI ID Table itself ... + */ +#include "t4_pci_id_tbl.h" + +static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue; + uint16_t pkts_sent, pkts_remain; + uint16_t total_sent = 0; + int ret = 0; + + CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n", + __func__, txq, tx_pkts, nb_pkts); + + t4_os_lock(&txq->txq_lock); + /* free up desc from already completed tx */ + reclaim_completed_tx(&txq->q); + while (total_sent < nb_pkts) { + pkts_remain = nb_pkts - total_sent; + + for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) { + ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent]); + if (ret < 0) + break; + } + if (!pkts_sent) + break; + total_sent += pkts_sent; + /* reclaim as much as possible */ + reclaim_completed_tx(&txq->q); + } + + t4_os_unlock(&txq->txq_lock); + return total_sent; +} + +static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue; + unsigned int work_done; + + CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n", + __func__, rxq->rspq.cntxt_id, nb_pkts); + + if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done)) + dev_err(adapter, "error in cxgbe poll\n"); + + CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done); + return work_done; +} + +static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *device_info) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + int max_queues = adapter->sge.max_ethqsets / adapter->params.nports; + + static const struct rte_eth_desc_lim cxgbe_desc_lim = { + .nb_max = CXGBE_MAX_RING_DESC_SIZE, + .nb_min = CXGBE_MIN_RING_DESC_SIZE, + .nb_align = 1, + }; + + device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE; + device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN; + device_info->max_rx_queues = max_queues; + device_info->max_tx_queues = max_queues; + device_info->max_mac_addrs = 1; + /* XXX: For now we support one MAC/port */ + device_info->max_vfs = adapter->params.arch.vfcount; + device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */ + + device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + + device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + + device_info->reta_size = pi->rss_size; + + device_info->rx_desc_lim = cxgbe_desc_lim; + device_info->tx_desc_lim = cxgbe_desc_lim; + device_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G; +} + +static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + + t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + 1, -1, 1, -1, false); +} + +static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + + t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + 0, -1, 1, -1, false); +} + +static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + + /* TODO: address filters ?? */ + + t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + -1, 1, 1, -1, false); +} + +static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + + /* TODO: address filters ?? */ + + t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + -1, 0, 1, -1, false); +} + +static int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, + __rte_unused int wait_to_complete) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + struct rte_eth_link *old_link = ð_dev->data->dev_link; + unsigned int work_done, budget = 4; + + cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); + if (old_link->link_status == pi->link_cfg.link_ok) + return -1; /* link not changed */ + + eth_dev->data->dev_link.link_status = pi->link_cfg.link_ok; + eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + eth_dev->data->dev_link.link_speed = pi->link_cfg.speed; + + /* link has changed */ + return 0; +} + +static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + struct rte_eth_dev_info dev_info; + int err; + uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + + cxgbe_dev_info_get(eth_dev, &dev_info); + + /* Must accommodate at least ETHER_MIN_MTU */ + if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen)) + return -EINVAL; + + /* set to jumbo mode if needed */ + if (new_mtu > ETHER_MAX_LEN) + eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + else + eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + + err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1, + -1, -1, true); + if (!err) + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu; + + return err; +} + +static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, + uint16_t tx_queue_id); +static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, + uint16_t tx_queue_id); +static void cxgbe_dev_tx_queue_release(void *q); +static void cxgbe_dev_rx_queue_release(void *q); + +/* + * Stop device. + */ +static void cxgbe_dev_close(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + int i, dev_down = 0; + + CXGBE_FUNC_TRACE(); + + if (!(adapter->flags & FULL_INIT_DONE)) + return; + + cxgbe_down(pi); + + /* + * We clear queues only if both tx and rx path of the port + * have been disabled + */ + t4_sge_eth_clear_queues(pi); + + /* See if all ports are down */ + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + /* + * Skip first port of the adapter since it will be closed + * by DPDK + */ + if (i == 0) + continue; + dev_down += (pi->eth_dev->data->dev_started == 0) ? 1 : 0; + } + + /* If rest of the ports are stopped, then free up resources */ + if (dev_down == (adapter->params.nports - 1)) + cxgbe_close(adapter); +} + +/* Start the device. + * It returns 0 on success. + */ +static int cxgbe_dev_start(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + int err = 0, i; + + CXGBE_FUNC_TRACE(); + + /* + * If we don't have a connection to the firmware there's nothing we + * can do. + */ + if (!(adapter->flags & FW_OK)) { + err = -ENXIO; + goto out; + } + + if (!(adapter->flags & FULL_INIT_DONE)) { + err = cxgbe_up(adapter); + if (err < 0) + goto out; + } + + err = setup_rss(pi); + if (err) + goto out; + + for (i = 0; i < pi->n_tx_qsets; i++) { + err = cxgbe_dev_tx_queue_start(eth_dev, i); + if (err) + goto out; + } + + for (i = 0; i < pi->n_rx_qsets; i++) { + err = cxgbe_dev_rx_queue_start(eth_dev, i); + if (err) + goto out; + } + + err = link_start(pi); + if (err) + goto out; + +out: + return err; +} + +/* + * Stop device: disable rx and tx functions to allow for reconfiguring. + */ +static void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + + CXGBE_FUNC_TRACE(); + + if (!(adapter->flags & FULL_INIT_DONE)) + return; + + cxgbe_down(pi); + + /* + * We clear queues only if both tx and rx path of the port + * have been disabled + */ + t4_sge_eth_clear_queues(pi); +} + +static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + int err; + + CXGBE_FUNC_TRACE(); + + if (!(adapter->flags & FW_QUEUE_BOUND)) { + err = setup_sge_fwevtq(adapter); + if (err) + return err; + adapter->flags |= FW_QUEUE_BOUND; + } + + err = cfg_queue_count(eth_dev); + if (err) + return err; + + return 0; +} + +static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, + uint16_t tx_queue_id) +{ + int ret; + struct sge_eth_txq *txq = (struct sge_eth_txq *) + (eth_dev->data->tx_queues[tx_queue_id]); + + dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id); + + ret = t4_sge_eth_txq_start(txq); + if (ret == 0) + eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return ret; +} + +static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, + uint16_t tx_queue_id) +{ + int ret; + struct sge_eth_txq *txq = (struct sge_eth_txq *) + (eth_dev->data->tx_queues[tx_queue_id]); + + dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id); + + ret = t4_sge_eth_txq_stop(txq); + if (ret == 0) + eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return ret; +} + +static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx]; + int err = 0; + unsigned int temp_nb_desc; + + RTE_SET_USED(tx_conf); + + dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n", + __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc, + socket_id, pi->first_qset); + + /* Free up the existing queue */ + if (eth_dev->data->tx_queues[queue_idx]) { + cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]); + eth_dev->data->tx_queues[queue_idx] = NULL; + } + + eth_dev->data->tx_queues[queue_idx] = (void *)txq; + + /* Sanity Checking + * + * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE + */ + temp_nb_desc = nb_desc; + if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) { + dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n", + __func__, CXGBE_MIN_RING_DESC_SIZE, + CXGBE_DEFAULT_TX_DESC_SIZE); + temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE; + } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) { + dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n", + __func__, CXGBE_MIN_RING_DESC_SIZE, + CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE); + return -(EINVAL); + } + + txq->q.size = temp_nb_desc; + + err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx, + s->fw_evtq.cntxt_id, socket_id); + + dev_debug(adapter, "%s: txq->q.cntxt_id= %d err = %d\n", + __func__, txq->q.cntxt_id, err); + + return err; +} + +static void cxgbe_dev_tx_queue_release(void *q) +{ + struct sge_eth_txq *txq = (struct sge_eth_txq *)q; + + if (txq) { + struct port_info *pi = (struct port_info *) + (txq->eth_dev->data->dev_private); + struct adapter *adap = pi->adapter; + + dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n", + __func__, pi->port_id, txq->q.cntxt_id); + + t4_sge_eth_txq_release(adap, txq); + } +} + +static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id) +{ + int ret; + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adap = pi->adapter; + struct sge_rspq *q; + + dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", + __func__, pi->port_id, rx_queue_id); + + q = eth_dev->data->rx_queues[rx_queue_id]; + + ret = t4_sge_eth_rxq_start(adap, q); + if (ret == 0) + eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return ret; +} + +static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id) +{ + int ret; + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adap = pi->adapter; + struct sge_rspq *q; + + dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", + __func__, pi->port_id, rx_queue_id); + + q = eth_dev->data->rx_queues[rx_queue_id]; + ret = t4_sge_eth_rxq_stop(adap, q); + if (ret == 0) + eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return ret; +} + +static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx]; + int err = 0; + int msi_idx = 0; + unsigned int temp_nb_desc; + struct rte_eth_dev_info dev_info; + unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + + RTE_SET_USED(rx_conf); + + dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n", + __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc, + socket_id, mp); + + cxgbe_dev_info_get(eth_dev, &dev_info); + + /* Must accommodate at least ETHER_MIN_MTU */ + if ((pkt_len < dev_info.min_rx_bufsize) || + (pkt_len > dev_info.max_rx_pktlen)) { + dev_err(adap, "%s: max pkt len must be > %d and <= %d\n", + __func__, dev_info.min_rx_bufsize, + dev_info.max_rx_pktlen); + return -EINVAL; + } + + /* Free up the existing queue */ + if (eth_dev->data->rx_queues[queue_idx]) { + cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]); + eth_dev->data->rx_queues[queue_idx] = NULL; + } + + eth_dev->data->rx_queues[queue_idx] = (void *)rxq; + + /* Sanity Checking + * + * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE + */ + temp_nb_desc = nb_desc; + if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) { + dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n", + __func__, CXGBE_MIN_RING_DESC_SIZE, + CXGBE_DEFAULT_RX_DESC_SIZE); + temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE; + } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) { + dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n", + __func__, CXGBE_MIN_RING_DESC_SIZE, + CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE); + return -(EINVAL); + } + + rxq->rspq.size = temp_nb_desc; + if ((&rxq->fl) != NULL) + rxq->fl.size = temp_nb_desc; + + /* Set to jumbo mode if necessary */ + if (pkt_len > ETHER_MAX_LEN) + eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + else + eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + + err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx, + &rxq->fl, t4_ethrx_handler, + t4_get_mps_bg_map(adapter, pi->tx_chan), mp, + queue_idx, socket_id); + + dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n", + __func__, err, pi->port_id, rxq->rspq.cntxt_id); + return err; +} + +static void cxgbe_dev_rx_queue_release(void *q) +{ + struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q; + struct sge_rspq *rq = &rxq->rspq; + + if (rq) { + struct port_info *pi = (struct port_info *) + (rq->eth_dev->data->dev_private); + struct adapter *adap = pi->adapter; + + dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", + __func__, pi->port_id, rxq->rspq.cntxt_id); + + t4_sge_eth_rxq_release(adap, rxq); + } +} + +/* + * Get port statistics. + */ +static void cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *eth_stats) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + struct port_stats ps; + unsigned int i; + + cxgbe_stats_get(pi, &ps); + + /* RX Stats */ + eth_stats->ipackets = ps.rx_frames; + eth_stats->ibytes = ps.rx_octets; + eth_stats->imcasts = ps.rx_mcast_frames; + eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 + + ps.rx_ovflow2 + ps.rx_ovflow3 + + ps.rx_trunc0 + ps.rx_trunc1 + + ps.rx_trunc2 + ps.rx_trunc3; + eth_stats->ierrors = ps.rx_symbol_err + ps.rx_fcs_err + + ps.rx_jabber + ps.rx_too_long + ps.rx_runt + + ps.rx_len_err; + + /* TX Stats */ + eth_stats->opackets = ps.tx_frames; + eth_stats->obytes = ps.tx_octets; + eth_stats->oerrors = ps.tx_error_frames; + + for (i = 0; i < pi->n_rx_qsets; i++) { + struct sge_eth_rxq *rxq = + &s->ethrxq[pi->first_qset + i]; + + eth_stats->q_ipackets[i] = rxq->stats.pkts; + eth_stats->q_ibytes[i] = rxq->stats.rx_bytes; + } + + for (i = 0; i < pi->n_tx_qsets; i++) { + struct sge_eth_txq *txq = + &s->ethtxq[pi->first_qset + i]; + + eth_stats->q_opackets[i] = txq->stats.pkts; + eth_stats->q_obytes[i] = txq->stats.tx_bytes; + eth_stats->q_errors[i] = txq->stats.mapping_err; + } +} + +/* + * Reset port statistics. + */ +static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + unsigned int i; + + cxgbe_stats_reset(pi); + for (i = 0; i < pi->n_rx_qsets; i++) { + struct sge_eth_rxq *rxq = + &s->ethrxq[pi->first_qset + i]; + + rxq->stats.pkts = 0; + rxq->stats.rx_bytes = 0; + } + for (i = 0; i < pi->n_tx_qsets; i++) { + struct sge_eth_txq *txq = + &s->ethtxq[pi->first_qset + i]; + + txq->stats.pkts = 0; + txq->stats.tx_bytes = 0; + txq->stats.mapping_err = 0; + } +} + +static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct link_config *lc = &pi->link_cfg; + int rx_pause, tx_pause; + + fc_conf->autoneg = lc->fc & PAUSE_AUTONEG; + rx_pause = lc->fc & PAUSE_RX; + tx_pause = lc->fc & PAUSE_TX; + + if (rx_pause && tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + return 0; +} + +static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + struct link_config *lc = &pi->link_cfg; + + if (lc->supported & FW_PORT_CAP_ANEG) { + if (fc_conf->autoneg) + lc->requested_fc |= PAUSE_AUTONEG; + else + lc->requested_fc &= ~PAUSE_AUTONEG; + } + + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_RX_PAUSE)) + lc->requested_fc |= PAUSE_RX; + else + lc->requested_fc &= ~PAUSE_RX; + + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_TX_PAUSE)) + lc->requested_fc |= PAUSE_TX; + else + lc->requested_fc &= ~PAUSE_TX; + + return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan, + &pi->link_cfg); +} + +static const uint32_t * +cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_UNKNOWN + }; + + if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts) + return ptypes; + return NULL; +} + +static const struct eth_dev_ops cxgbe_eth_dev_ops = { + .dev_start = cxgbe_dev_start, + .dev_stop = cxgbe_dev_stop, + .dev_close = cxgbe_dev_close, + .promiscuous_enable = cxgbe_dev_promiscuous_enable, + .promiscuous_disable = cxgbe_dev_promiscuous_disable, + .allmulticast_enable = cxgbe_dev_allmulticast_enable, + .allmulticast_disable = cxgbe_dev_allmulticast_disable, + .dev_configure = cxgbe_dev_configure, + .dev_infos_get = cxgbe_dev_info_get, + .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get, + .link_update = cxgbe_dev_link_update, + .mtu_set = cxgbe_dev_mtu_set, + .tx_queue_setup = cxgbe_dev_tx_queue_setup, + .tx_queue_start = cxgbe_dev_tx_queue_start, + .tx_queue_stop = cxgbe_dev_tx_queue_stop, + .tx_queue_release = cxgbe_dev_tx_queue_release, + .rx_queue_setup = cxgbe_dev_rx_queue_setup, + .rx_queue_start = cxgbe_dev_rx_queue_start, + .rx_queue_stop = cxgbe_dev_rx_queue_stop, + .rx_queue_release = cxgbe_dev_rx_queue_release, + .stats_get = cxgbe_dev_stats_get, + .stats_reset = cxgbe_dev_stats_reset, + .flow_ctrl_get = cxgbe_flow_ctrl_get, + .flow_ctrl_set = cxgbe_flow_ctrl_set, +}; + +/* + * Initialize driver + * It returns 0 on success. + */ +static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = NULL; + char name[RTE_ETH_NAME_MAX_LEN]; + int err = 0; + + CXGBE_FUNC_TRACE(); + + eth_dev->dev_ops = &cxgbe_eth_dev_ops; + eth_dev->rx_pkt_burst = &cxgbe_recv_pkts; + eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + pci_dev = eth_dev->pci_dev; + + snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id); + adapter = rte_zmalloc(name, sizeof(*adapter), 0); + if (!adapter) + return -1; + + adapter->use_unpacked_mode = 1; + adapter->regs = (void *)pci_dev->mem_resource[0].addr; + if (!adapter->regs) { + dev_err(adapter, "%s: cannot map device registers\n", __func__); + err = -ENOMEM; + goto out_free_adapter; + } + adapter->pdev = pci_dev; + adapter->eth_dev = eth_dev; + pi->adapter = adapter; + + err = cxgbe_probe(adapter); + if (err) { + dev_err(adapter, "%s: cxgbe probe failed with err %d\n", + __func__, err); + goto out_free_adapter; + } + + return 0; + +out_free_adapter: + rte_free(adapter); + return err; +} + +static struct eth_driver rte_cxgbe_pmd = { + .pci_drv = { + .name = "rte_cxgbe_pmd", + .id_table = cxgb4_pci_tbl, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + }, + .eth_dev_init = eth_cxgbe_dev_init, + .dev_private_size = sizeof(struct port_info), +}; + +/* + * Driver initialization routine. + * Invoked once at EAL init time. + * Register itself as the [Poll Mode] Driver of PCI CXGBE devices. + */ +static int rte_cxgbe_pmd_init(const char *name __rte_unused, + const char *params __rte_unused) +{ + CXGBE_FUNC_TRACE(); + + rte_eth_driver_register(&rte_cxgbe_pmd); + return 0; +} + +static struct rte_driver rte_cxgbe_driver = { + .name = "cxgbe_driver", + .type = PMD_PDEV, + .init = rte_cxgbe_pmd_init, +}; + +PMD_REGISTER_DRIVER(rte_cxgbe_driver); diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c new file mode 100644 index 00000000..ceaf5ab2 --- /dev/null +++ b/drivers/net/cxgbe/cxgbe_main.c @@ -0,0 +1,1222 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2016 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <stdarg.h> +#include <inttypes.h> +#include <netinet/in.h> + +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_interrupts.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_pci.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_tailq.h> +#include <rte_eal.h> +#include <rte_alarm.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_atomic.h> +#include <rte_malloc.h> +#include <rte_random.h> +#include <rte_dev.h> + +#include "common.h" +#include "t4_regs.h" +#include "t4_msg.h" +#include "cxgbe.h" + +/* + * Response queue handler for the FW event queue. + */ +static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, + __rte_unused const struct pkt_gl *gl) +{ + u8 opcode = ((const struct rss_header *)rsp)->opcode; + + rsp++; /* skip RSS header */ + + /* + * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. + */ + if (unlikely(opcode == CPL_FW4_MSG && + ((const struct cpl_fw4_msg *)rsp)->type == + FW_TYPE_RSSCPL)) { + rsp++; + opcode = ((const struct rss_header *)rsp)->opcode; + rsp++; + if (opcode != CPL_SGE_EGR_UPDATE) { + dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n", + opcode); + goto out; + } + } + + if (likely(opcode == CPL_SGE_EGR_UPDATE)) { + /* do nothing */ + } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { + const struct cpl_fw6_msg *msg = (const void *)rsp; + + t4_handle_fw_rpl(q->adapter, msg->data); + } else { + dev_err(adapter, "unexpected CPL %#x on FW event queue\n", + opcode); + } +out: + return 0; +} + +int setup_sge_fwevtq(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int err = 0; + int msi_idx = 0; + + err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev, + msi_idx, NULL, fwevtq_handler, -1, NULL, 0, + rte_socket_id()); + return err; +} + +static int closest_timer(const struct sge *s, int time) +{ + unsigned int i, match = 0; + int delta, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { + delta = time - s->timer_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +static int closest_thres(const struct sge *s, int thres) +{ + unsigned int i, match = 0; + int delta, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { + delta = thres - s->counter_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +/** + * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters + * @q: the Rx queue + * @us: the hold-off time in us, or 0 to disable timer + * @cnt: the hold-off packet count, or 0 to disable counter + * + * Sets an Rx queue's interrupt hold-off time and packet count. At least + * one of the two needs to be enabled for the queue to generate interrupts. + */ +int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, + unsigned int cnt) +{ + struct adapter *adap = q->adapter; + unsigned int timer_val; + + if (cnt) { + int err; + u32 v, new_idx; + + new_idx = closest_thres(&adap->sge, cnt); + if (q->desc && q->pktcnt_idx != new_idx) { + /* the queue has already been created, update it */ + v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | + V_FW_PARAMS_PARAM_X( + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | + V_FW_PARAMS_PARAM_YZ(q->cntxt_id); + err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, + &v, &new_idx); + if (err) + return err; + } + q->pktcnt_idx = new_idx; + } + + timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER : + closest_timer(&adap->sge, us); + + if ((us | cnt) == 0) + q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX); + else + q->intr_params = V_QINTR_TIMER_IDX(timer_val) | + V_QINTR_CNT_EN(cnt > 0); + return 0; +} + +static inline bool is_x_1g_port(const struct link_config *lc) +{ + return (lc->supported & FW_PORT_CAP_SPEED_1G) != 0; +} + +static inline bool is_x_10g_port(const struct link_config *lc) +{ + return ((lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || + (lc->supported & FW_PORT_CAP_SPEED_40G) != 0 || + (lc->supported & FW_PORT_CAP_SPEED_100G) != 0); +} + +inline void init_rspq(struct adapter *adap, struct sge_rspq *q, + unsigned int us, unsigned int cnt, + unsigned int size, unsigned int iqe_size) +{ + q->adapter = adap; + cxgb4_set_rspq_intr_params(q, us, cnt); + q->iqe_len = iqe_size; + q->size = size; +} + +int cfg_queue_count(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adap = pi->adapter; + struct sge *s = &adap->sge; + unsigned int max_queues = s->max_ethqsets / adap->params.nports; + + if ((eth_dev->data->nb_rx_queues < 1) || + (eth_dev->data->nb_tx_queues < 1)) + return -EINVAL; + + if ((eth_dev->data->nb_rx_queues > max_queues) || + (eth_dev->data->nb_tx_queues > max_queues)) + return -EINVAL; + + if (eth_dev->data->nb_rx_queues > pi->rss_size) + return -EINVAL; + + /* We must configure RSS, since config has changed*/ + pi->flags &= ~PORT_RSS_DONE; + + pi->n_rx_qsets = eth_dev->data->nb_rx_queues; + pi->n_tx_qsets = eth_dev->data->nb_tx_queues; + + return 0; +} + +void cfg_queues(struct rte_eth_dev *eth_dev) +{ + struct rte_config *config = rte_eal_get_configuration(); + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adap = pi->adapter; + struct sge *s = &adap->sge; + unsigned int i, nb_ports = 0, qidx = 0; + unsigned int q_per_port = 0; + + if (!(adap->flags & CFG_QUEUES)) { + for_each_port(adap, i) { + struct port_info *tpi = adap2pinfo(adap, i); + + nb_ports += (is_x_10g_port(&tpi->link_cfg)) || + is_x_1g_port(&tpi->link_cfg) ? 1 : 0; + } + + /* + * We default up to # of cores queues per 1G/10G port. + */ + if (nb_ports) + q_per_port = (MAX_ETH_QSETS - + (adap->params.nports - nb_ports)) / + nb_ports; + + if (q_per_port > config->lcore_count) + q_per_port = config->lcore_count; + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->first_qset = qidx; + + /* Initially n_rx_qsets == n_tx_qsets */ + pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) || + is_x_1g_port(&pi->link_cfg)) ? + q_per_port : 1; + pi->n_tx_qsets = pi->n_rx_qsets; + + if (pi->n_rx_qsets > pi->rss_size) + pi->n_rx_qsets = pi->rss_size; + + qidx += pi->n_rx_qsets; + } + + s->max_ethqsets = qidx; + + for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { + struct sge_eth_rxq *r = &s->ethrxq[i]; + + init_rspq(adap, &r->rspq, 0, 0, 1024, 64); + r->usembufs = 1; + r->fl.size = (r->usembufs ? 1024 : 72); + } + + for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) + s->ethtxq[i].q.size = 1024; + + init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64); + adap->flags |= CFG_QUEUES; + } +} + +void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats) +{ + t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats, + &pi->stats_base); +} + +void cxgbe_stats_reset(struct port_info *pi) +{ + t4_clr_port_stats(pi->adapter, pi->tx_chan); +} + +static void setup_memwin(struct adapter *adap) +{ + u32 mem_win0_base; + + /* For T5, only relative offset inside the PCIe BAR is passed */ + mem_win0_base = MEMWIN0_BASE; + + /* + * Set up memory window for accessing adapter memory ranges. (Read + * back MA register to ensure that changes propagate before we attempt + * to use the new values.) + */ + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, + MEMWIN_NIC), + mem_win0_base | V_BIR(0) | + V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT)); + t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, + MEMWIN_NIC)); +} + +static int init_rss(struct adapter *adap) +{ + unsigned int i; + int err; + + err = t4_init_rss_mode(adap, adap->mbox); + if (err) + return err; + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0); + if (!pi->rss) + return -ENOMEM; + } + return 0; +} + +static void print_port_info(struct adapter *adap) +{ + int i; + char buf[80]; + struct rte_pci_addr *loc = &adap->pdev->addr; + + for_each_port(adap, i) { + const struct port_info *pi = &adap->port[i]; + char *bufp = buf; + + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) + bufp += sprintf(bufp, "100/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) + bufp += sprintf(bufp, "1000/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) + bufp += sprintf(bufp, "10G/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) + bufp += sprintf(bufp, "40G/"); + if (bufp != buf) + --bufp; + sprintf(bufp, "BASE-%s", + t4_get_port_type_description( + (enum fw_port_type)pi->port_type)); + + dev_info(adap, + " " PCI_PRI_FMT " Chelsio rev %d %s %s\n", + loc->domain, loc->bus, loc->devid, loc->function, + CHELSIO_CHIP_RELEASE(adap->params.chip), buf, + (adap->flags & USING_MSIX) ? " MSI-X" : + (adap->flags & USING_MSI) ? " MSI" : ""); + } +} + +/* + * Tweak configuration based on system architecture, etc. Most of these have + * defaults assigned to them by Firmware Configuration Files (if we're using + * them) but need to be explicitly set if we're using hard-coded + * initialization. So these are essentially common tweaks/settings for + * Configuration Files and hard-coded initialization ... + */ +static int adap_init0_tweaks(struct adapter *adapter) +{ + u8 rx_dma_offset; + + /* + * Fix up various Host-Dependent Parameters like Page Size, Cache + * Line Size, etc. The firmware default is for a 4KB Page Size and + * 64B Cache Line Size ... + */ + t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES, + T5_LAST_REV); + + /* + * Keep the chip default offset to deliver Ingress packets into our + * DMA buffers to zero + */ + rx_dma_offset = 0; + t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT), + V_PKTSHIFT(rx_dma_offset)); + + t4_set_reg_field(adapter, A_SGE_FLM_CFG, + V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING, + V_CREDITCNT(3) | V_CREDITCNTPACKING(1)); + + t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U), + V_IDMAARBROUNDROBIN(1U)); + + /* + * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux + * adds the pseudo header itself. + */ + t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG, + F_CSUM_HAS_PSEUDO_HDR, 0); + + return 0; +} + +/* + * Attempt to initialize the adapter via a Firmware Configuration File. + */ +static int adap_init0_config(struct adapter *adapter, int reset) +{ + struct fw_caps_config_cmd caps_cmd; + unsigned long mtype = 0, maddr = 0; + u32 finiver, finicsum, cfcsum; + int ret; + int config_issued = 0; + int cfg_addr; + char config_name[20]; + + /* + * Reset device if necessary. + */ + if (reset) { + ret = t4_fw_reset(adapter, adapter->mbox, + F_PIORSTMODE | F_PIORST); + if (ret < 0) { + dev_warn(adapter, "Firmware reset failed, error %d\n", + -ret); + goto bye; + } + } + + cfg_addr = t4_flash_cfg_addr(adapter); + if (cfg_addr < 0) { + ret = cfg_addr; + dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n", + -ret); + goto bye; + } + + strcpy(config_name, "On Flash"); + mtype = FW_MEMTYPE_CF_FLASH; + maddr = cfg_addr; + + /* + * Issue a Capability Configuration command to the firmware to get it + * to parse the Configuration File. We don't use t4_fw_config_file() + * because we want the ability to modify various features after we've + * processed the configuration file ... + */ + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ); + caps_cmd.cfvalid_to_len16 = + cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID | + V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | + V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | + FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), + &caps_cmd); + /* + * If the CAPS_CONFIG failed with an ENOENT (for a Firmware + * Configuration File in FLASH), our last gasp effort is to use the + * Firmware Configuration File which is embedded in the firmware. A + * very few early versions of the firmware didn't have one embedded + * but we can ignore those. + */ + if (ret == -ENOENT) { + dev_info(adapter, "%s: Going for embedded config in firmware..\n", + __func__); + + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = + cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ); + caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, + sizeof(caps_cmd), &caps_cmd); + strcpy(config_name, "Firmware Default"); + } + + config_issued = 1; + if (ret < 0) + goto bye; + + finiver = be32_to_cpu(caps_cmd.finiver); + finicsum = be32_to_cpu(caps_cmd.finicsum); + cfcsum = be32_to_cpu(caps_cmd.cfcsum); + if (finicsum != cfcsum) + dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n", + finicsum, cfcsum); + + /* + * If we're a pure NIC driver then disable all offloading facilities. + * This will allow the firmware to optimize aspects of the hardware + * configuration which will result in improved performance. + */ + caps_cmd.niccaps &= cpu_to_be16(~(FW_CAPS_CONFIG_NIC_HASHFILTER | + FW_CAPS_CONFIG_NIC_ETHOFLD)); + caps_cmd.toecaps = 0; + caps_cmd.iscsicaps = 0; + caps_cmd.rdmacaps = 0; + caps_cmd.fcoecaps = 0; + + /* + * And now tell the firmware to use the configuration we just loaded. + */ + caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE); + caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), + NULL); + if (ret < 0) { + dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n", + -ret); + goto bye; + } + + /* + * Tweak configuration based on system architecture, etc. + */ + ret = adap_init0_tweaks(adapter); + if (ret < 0) { + dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret); + goto bye; + } + + /* + * And finally tell the firmware to initialize itself using the + * parameters from the Configuration File. + */ + ret = t4_fw_initialize(adapter, adapter->mbox); + if (ret < 0) { + dev_warn(adapter, "Initializing Firmware failed, error %d\n", + -ret); + goto bye; + } + + /* + * Return successfully and note that we're operating with parameters + * not supplied by the driver, rather than from hard-wired + * initialization constants burried in the driver. + */ + dev_info(adapter, + "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n", + config_name, finiver, cfcsum); + + return 0; + + /* + * Something bad happened. Return the error ... (If the "error" + * is that there's no Configuration File on the adapter we don't + * want to issue a warning since this is fairly common.) + */ +bye: + if (config_issued && ret != -ENOENT) + dev_warn(adapter, "\"%s\" configuration file error %d\n", + config_name, -ret); + + dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret); + return ret; +} + +static int adap_init0(struct adapter *adap) +{ + int ret = 0; + u32 v, port_vec; + enum dev_state state; + u32 params[7], val[7]; + int reset = 1; + int mbox = adap->mbox; + + /* + * Contact FW, advertising Master capability. + */ + ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); + if (ret < 0) { + dev_err(adap, "%s: could not connect to FW, error %d\n", + __func__, -ret); + goto bye; + } + + CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__, + adap->mbox, ret); + + if (ret == mbox) + adap->flags |= MASTER_PF; + + if (state == DEV_STATE_INIT) { + /* + * Force halt and reset FW because a previous instance may have + * exited abnormally without properly shutting down + */ + ret = t4_fw_halt(adap, adap->mbox, reset); + if (ret < 0) { + dev_err(adap, "Failed to halt. Exit.\n"); + goto bye; + } + + ret = t4_fw_restart(adap, adap->mbox, reset); + if (ret < 0) { + dev_err(adap, "Failed to restart. Exit.\n"); + goto bye; + } + state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT); + } + + t4_get_fw_version(adap, &adap->params.fw_vers); + t4_get_tp_version(adap, &adap->params.tp_vers); + + dev_info(adap, "fw: %u.%u.%u.%u, TP: %u.%u.%u.%u\n", + G_FW_HDR_FW_VER_MAJOR(adap->params.fw_vers), + G_FW_HDR_FW_VER_MINOR(adap->params.fw_vers), + G_FW_HDR_FW_VER_MICRO(adap->params.fw_vers), + G_FW_HDR_FW_VER_BUILD(adap->params.fw_vers), + G_FW_HDR_FW_VER_MAJOR(adap->params.tp_vers), + G_FW_HDR_FW_VER_MINOR(adap->params.tp_vers), + G_FW_HDR_FW_VER_MICRO(adap->params.tp_vers), + G_FW_HDR_FW_VER_BUILD(adap->params.tp_vers)); + + ret = t4_get_core_clock(adap, &adap->params.vpd); + if (ret < 0) { + dev_err(adap, "%s: could not get core clock, error %d\n", + __func__, -ret); + goto bye; + } + + /* + * Find out what ports are available to us. Note that we need to do + * this before calling adap_init0_no_config() since it needs nports + * and portvec ... + */ + v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); + if (ret < 0) { + dev_err(adap, "%s: failure in t4_queury_params; error = %d\n", + __func__, ret); + goto bye; + } + + adap->params.nports = hweight32(port_vec); + adap->params.portvec = port_vec; + + dev_debug(adap, "%s: adap->params.nports = %u\n", __func__, + adap->params.nports); + + /* + * If the firmware is initialized already (and we're not forcing a + * master initialization), note that we're living with existing + * adapter parameters. Otherwise, it's time to try initializing the + * adapter ... + */ + if (state == DEV_STATE_INIT) { + dev_info(adap, "Coming up as %s: Adapter already initialized\n", + adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); + } else { + dev_info(adap, "Coming up as MASTER: Initializing adapter\n"); + + ret = adap_init0_config(adap, reset); + if (ret == -ENOENT) { + dev_err(adap, + "No Configuration File present on adapter. Using hard-wired configuration parameters.\n"); + goto bye; + } + } + if (ret < 0) { + dev_err(adap, "could not initialize adapter, error %d\n", -ret); + goto bye; + } + + /* + * Give the SGE code a chance to pull in anything that it needs ... + * Note that this must be called after we retrieve our VPD parameters + * in order to know how to convert core ticks to seconds, etc. + */ + ret = t4_sge_init(adap); + if (ret < 0) { + dev_err(adap, "t4_sge_init failed with error %d\n", + -ret); + goto bye; + } + + /* + * Grab some of our basic fundamental operating parameters. + */ +#define FW_PARAM_DEV(param) \ + (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + +#define FW_PARAM_PFVF(param) \ + (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ + V_FW_PARAMS_PARAM_Y(0) | \ + V_FW_PARAMS_PARAM_Z(0)) + + /* If we're running on newer firmware, let it know that we're + * prepared to deal with encapsulated CPL messages. Older + * firmware won't understand this and we'll just get + * unencapsulated messages ... + */ + params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); + val[0] = 1; + (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); + + /* + * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL + * capability. Earlier versions of the firmware didn't have the + * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no + * permission to use ULPTX MEMWRITE DSGL. + */ + if (is_t4(adap->params.chip)) { + adap->params.ulptx_memwrite_dsgl = false; + } else { + params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, + 1, params, val); + adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); + } + + /* + * The MTU/MSS Table is initialized by now, so load their values. If + * we're initializing the adapter, then we'll make any modifications + * we want to the MTU/MSS Table and also initialize the congestion + * parameters. + */ + t4_read_mtu_tbl(adap, adap->params.mtus, NULL); + if (state != DEV_STATE_INIT) { + int i; + + /* + * The default MTU Table contains values 1492 and 1500. + * However, for TCP, it's better to have two values which are + * a multiple of 8 +/- 4 bytes apart near this popular MTU. + * This allows us to have a TCP Data Payload which is a + * multiple of 8 regardless of what combination of TCP Options + * are in use (always a multiple of 4 bytes) which is + * important for performance reasons. For instance, if no + * options are in use, then we have a 20-byte IP header and a + * 20-byte TCP header. In this case, a 1500-byte MSS would + * result in a TCP Data Payload of 1500 - 40 == 1460 bytes + * which is not a multiple of 8. So using an MSS of 1488 in + * this case results in a TCP Data Payload of 1448 bytes which + * is a multiple of 8. On the other hand, if 12-byte TCP Time + * Stamps have been negotiated, then an MTU of 1500 bytes + * results in a TCP Data Payload of 1448 bytes which, as + * above, is a multiple of 8 bytes ... + */ + for (i = 0; i < NMTUS; i++) + if (adap->params.mtus[i] == 1492) { + adap->params.mtus[i] = 1488; + break; + } + + t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, + adap->params.b_wnd); + } + t4_init_sge_params(adap); + t4_init_tp_params(adap); + + adap->params.drv_memwin = MEMWIN_NIC; + adap->flags |= FW_OK; + dev_debug(adap, "%s: returning zero..\n", __func__); + return 0; + + /* + * Something bad happened. If a command timed out or failed with EIO + * FW does not operate within its spec or something catastrophic + * happened to HW/FW, stop issuing commands. + */ +bye: + if (ret != -ETIMEDOUT && ret != -EIO) + t4_fw_bye(adap, adap->mbox); + return ret; +} + +/** + * t4_os_portmod_changed - handle port module changes + * @adap: the adapter associated with the module change + * @port_id: the port index whose module status has changed + * + * This is the OS-dependent handler for port module changes. It is + * invoked when a port module is removed or inserted for any OS-specific + * processing. + */ +void t4_os_portmod_changed(const struct adapter *adap, int port_id) +{ + static const char * const mod_str[] = { + NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" + }; + + const struct port_info *pi = &adap->port[port_id]; + + if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) + dev_info(adap, "Port%d: port module unplugged\n", pi->port_id); + else if (pi->mod_type < ARRAY_SIZE(mod_str)) + dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id, + mod_str[pi->mod_type]); + else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) + dev_info(adap, "Port%d: unsupported optical port module inserted\n", + pi->port_id); + else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) + dev_info(adap, "Port%d: unknown port module inserted, forcing TWINAX\n", + pi->port_id); + else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) + dev_info(adap, "Port%d: transceiver module error\n", + pi->port_id); + else + dev_info(adap, "Port%d: unknown module type %d inserted\n", + pi->port_id, pi->mod_type); +} + +/** + * link_start - enable a port + * @dev: the port to enable + * + * Performs the MAC and PHY actions needed to enable a port. + */ +int link_start(struct port_info *pi) +{ + struct adapter *adapter = pi->adapter; + int ret; + unsigned int mtu; + + mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - + (ETHER_HDR_LEN + ETHER_CRC_LEN); + + /* + * We do not set address filters and promiscuity here, the stack does + * that step explicitly. + */ + ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, + -1, 1, true); + if (ret == 0) { + ret = t4_change_mac(adapter, adapter->mbox, pi->viid, + pi->xact_addr_filt, + (u8 *)&pi->eth_dev->data->mac_addrs[0], + true, true); + if (ret >= 0) { + pi->xact_addr_filt = ret; + ret = 0; + } + } + if (ret == 0) + ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan, + &pi->link_cfg); + if (ret == 0) { + /* + * Enabling a Virtual Interface can result in an interrupt + * during the processing of the VI Enable command and, in some + * paths, result in an attempt to issue another command in the + * interrupt context. Thus, we disable interrupts during the + * course of the VI Enable command ... + */ + ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid, + true, true, false); + } + return ret; +} + +/** + * cxgb4_write_rss - write the RSS table for a given port + * @pi: the port + * @queues: array of queue indices for RSS + * + * Sets up the portion of the HW RSS table for the port's VI to distribute + * packets to the Rx queues in @queues. + */ +int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) +{ + u16 *rss; + int i, err; + struct adapter *adapter = pi->adapter; + const struct sge_eth_rxq *rxq; + + /* Should never be called before setting up sge eth rx queues */ + BUG_ON(!(adapter->flags & FULL_INIT_DONE)); + + rxq = &adapter->sge.ethrxq[pi->first_qset]; + rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0); + if (!rss) + return -ENOMEM; + + /* map the queue indices to queue ids */ + for (i = 0; i < pi->rss_size; i++, queues++) + rss[i] = rxq[*queues].rspq.abs_id; + + err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0, + pi->rss_size, rss, pi->rss_size); + /* + * If Tunnel All Lookup isn't specified in the global RSS + * Configuration, then we need to specify a default Ingress + * Queue for any ingress packets which aren't hashed. We'll + * use our first ingress queue ... + */ + if (!err) + err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid, + F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | + F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | + F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | + F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | + F_FW_RSS_VI_CONFIG_CMD_UDPEN, + rss[0]); + rte_free(rss); + return err; +} + +/** + * setup_rss - configure RSS + * @adapter: the adapter + * + * Sets up RSS to distribute packets to multiple receive queues. We + * configure the RSS CPU lookup table to distribute to the number of HW + * receive queues, and the response queue lookup table to narrow that + * down to the response queues actually configured for each port. + * We always configure the RSS mapping for all ports since the mapping + * table has plenty of entries. + */ +int setup_rss(struct port_info *pi) +{ + int j, err; + struct adapter *adapter = pi->adapter; + + dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n", + __func__, pi->rss_size, pi->n_rx_qsets); + + if (!pi->flags & PORT_RSS_DONE) { + if (adapter->flags & FULL_INIT_DONE) { + /* Fill default values with equal distribution */ + for (j = 0; j < pi->rss_size; j++) + pi->rss[j] = j % pi->n_rx_qsets; + + err = cxgb4_write_rss(pi, pi->rss); + if (err) + return err; + pi->flags |= PORT_RSS_DONE; + } + } + return 0; +} + +/* + * Enable NAPI scheduling and interrupt generation for all Rx queues. + */ +static void enable_rx(struct adapter *adap) +{ + struct sge *s = &adap->sge; + struct sge_rspq *q = &s->fw_evtq; + int i, j; + + /* 0-increment GTS to start the timer and enable interrupts */ + t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS), + V_SEINTARM(q->intr_params) | + V_INGRESSQID(q->cntxt_id)); + + for_each_port(adap, i) { + const struct port_info *pi = &adap->port[i]; + struct rte_eth_dev *eth_dev = pi->eth_dev; + + for (j = 0; j < eth_dev->data->nb_rx_queues; j++) { + q = eth_dev->data->rx_queues[j]; + + /* + * 0-increment GTS to start the timer and enable + * interrupts + */ + t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS), + V_SEINTARM(q->intr_params) | + V_INGRESSQID(q->cntxt_id)); + } + } +} + +/** + * cxgb_up - enable the adapter + * @adap: adapter being enabled + * + * Called when the first port is enabled, this function performs the + * actions necessary to make an adapter operational, such as completing + * the initialization of HW modules, and enabling interrupts. + */ +int cxgbe_up(struct adapter *adap) +{ + enable_rx(adap); + t4_sge_tx_monitor_start(adap); + t4_intr_enable(adap); + adap->flags |= FULL_INIT_DONE; + + /* TODO: deadman watchdog ?? */ + return 0; +} + +/* + * Close the port + */ +int cxgbe_down(struct port_info *pi) +{ + struct adapter *adapter = pi->adapter; + int err = 0; + + err = t4_enable_vi(adapter, adapter->mbox, pi->viid, false, false); + if (err) { + dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err); + return err; + } + + t4_reset_link_config(adapter, pi->port_id); + return 0; +} + +/* + * Release resources when all the ports have been stopped. + */ +void cxgbe_close(struct adapter *adapter) +{ + struct port_info *pi; + int i; + + if (adapter->flags & FULL_INIT_DONE) { + t4_intr_disable(adapter); + t4_sge_tx_monitor_stop(adapter); + t4_free_sge_resources(adapter); + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + if (pi->viid != 0) + t4_free_vi(adapter, adapter->mbox, + adapter->pf, 0, pi->viid); + rte_free(pi->eth_dev->data->mac_addrs); + } + adapter->flags &= ~FULL_INIT_DONE; + } + + if (adapter->flags & FW_OK) + t4_fw_bye(adapter, adapter->mbox); +} + +int cxgbe_probe(struct adapter *adapter) +{ + struct port_info *pi; + int func, i; + int err = 0; + + func = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI)); + adapter->mbox = func; + adapter->pf = func; + + t4_os_lock_init(&adapter->mbox_lock); + TAILQ_INIT(&adapter->mbox_list); + + err = t4_prep_adapter(adapter); + if (err) + return err; + + setup_memwin(adapter); + err = adap_init0(adapter); + if (err) { + dev_err(adapter, "%s: Adapter initialization failed, error %d\n", + __func__, err); + goto out_free; + } + + if (!is_t4(adapter->params.chip)) { + /* + * The userspace doorbell BAR is split evenly into doorbell + * regions, each associated with an egress queue. If this + * per-queue region is large enough (at least UDBS_SEG_SIZE) + * then it can be used to submit a tx work request with an + * implied doorbell. Enable write combining on the BAR if + * there is room for such work requests. + */ + int s_qpp, qpp, num_seg; + + s_qpp = (S_QUEUESPERPAGEPF0 + + (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * + adapter->pf); + qpp = 1 << ((t4_read_reg(adapter, + A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp) + & M_QUEUESPERPAGEPF0); + num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE; + if (qpp > num_seg) + dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n"); + + adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr; + if (!adapter->bar2) { + dev_err(adapter, "cannot map device bar2 region\n"); + err = -ENOMEM; + goto out_free; + } + t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) | + V_STATMODE(0)); + } + + for_each_port(adapter, i) { + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev_data *data = NULL; + const unsigned int numa_node = rte_socket_id(); + + pi = &adapter->port[i]; + pi->adapter = adapter; + pi->xact_addr_filt = -1; + pi->port_id = i; + + snprintf(name, sizeof(name), "cxgbe%d", + adapter->eth_dev->data->port_id + i); + + if (i == 0) { + /* First port is already allocated by DPDK */ + pi->eth_dev = adapter->eth_dev; + goto allocate_mac; + } + + /* + * now do all data allocation - for eth_dev structure, + * and internal (private) data for the remaining ports + */ + + /* reserve an ethdev entry */ + pi->eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_PCI); + if (!pi->eth_dev) + goto out_free; + + data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); + if (!data) + goto out_free; + + data->port_id = adapter->eth_dev->data->port_id + i; + + pi->eth_dev->data = data; + +allocate_mac: + pi->eth_dev->pci_dev = adapter->pdev; + pi->eth_dev->data->dev_private = pi; + pi->eth_dev->driver = adapter->eth_dev->driver; + pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops; + pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst; + pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst; + + rte_eth_copy_pci_info(pi->eth_dev, pi->eth_dev->pci_dev); + + TAILQ_INIT(&pi->eth_dev->link_intr_cbs); + + pi->eth_dev->data->mac_addrs = rte_zmalloc(name, + ETHER_ADDR_LEN, 0); + if (!pi->eth_dev->data->mac_addrs) { + dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n", + __func__); + err = -1; + goto out_free; + } + } + + if (adapter->flags & FW_OK) { + err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0); + if (err) { + dev_err(adapter, "%s: t4_port_init failed with err %d\n", + __func__, err); + goto out_free; + } + } + + cfg_queues(adapter->eth_dev); + + print_port_info(adapter); + + err = init_rss(adapter); + if (err) + goto out_free; + + return 0; + +out_free: + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + if (pi->viid != 0) + t4_free_vi(adapter, adapter->mbox, adapter->pf, + 0, pi->viid); + /* Skip first port since it'll be de-allocated by DPDK */ + if (i == 0) + continue; + if (pi->eth_dev->data) + rte_free(pi->eth_dev->data); + } + + if (adapter->flags & FW_OK) + t4_fw_bye(adapter, adapter->mbox); + return -err; +} diff --git a/drivers/net/cxgbe/rte_pmd_cxgbe_version.map b/drivers/net/cxgbe/rte_pmd_cxgbe_version.map new file mode 100644 index 00000000..bd8138a0 --- /dev/null +++ b/drivers/net/cxgbe/rte_pmd_cxgbe_version.map @@ -0,0 +1,4 @@ +DPDK_2.1 { + + local: *; +}; diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c new file mode 100644 index 00000000..ab5a842a --- /dev/null +++ b/drivers/net/cxgbe/sge.c @@ -0,0 +1,2255 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <stdarg.h> +#include <inttypes.h> +#include <netinet/in.h> + +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_interrupts.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_pci.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_tailq.h> +#include <rte_eal.h> +#include <rte_alarm.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_atomic.h> +#include <rte_malloc.h> +#include <rte_random.h> +#include <rte_dev.h> + +#include "common.h" +#include "t4_regs.h" +#include "t4_msg.h" +#include "cxgbe.h" + +static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, + struct sge_eth_txq *txq); + +/* + * Max number of Rx buffers we replenish at a time. + */ +#define MAX_RX_REFILL 64U + +#define NOMEM_TMR_IDX (SGE_NTIMERS - 1) + +/* + * Max Tx descriptor space we allow for an Ethernet packet to be inlined + * into a WR. + */ +#define MAX_IMM_TX_PKT_LEN 256 + +/* + * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet + * per mbuf buffer). We currently only support two sizes for 1500- and + * 9000-byte MTUs. We could easily support more but there doesn't seem to be + * much need for that ... + */ +#define FL_MTU_SMALL 1500 +#define FL_MTU_LARGE 9000 + +static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, + unsigned int mtu) +{ + struct sge *s = &adapter->sge; + + return CXGBE_ALIGN(s->pktshift + ETHER_HDR_LEN + VLAN_HLEN + mtu, + s->fl_align); +} + +#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) +#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE) + +/* + * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses + * these to specify the buffer size as an index into the SGE Free List Buffer + * Size register array. We also use bit 4, when the buffer has been unmapped + * for DMA, but this is of course never sent to the hardware and is only used + * to prevent double unmappings. All of the above requires that the Free List + * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are + * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal + * Free List Buffer alignment is 32 bytes, this works out for us ... + */ +enum { + RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */ + RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */ + RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */ + + /* + * XXX We shouldn't depend on being able to use these indices. + * XXX Especially when some other Master PF has initialized the + * XXX adapter or we use the Firmware Configuration File. We + * XXX should really search through the Host Buffer Size register + * XXX array for the appropriately sized buffer indices. + */ + RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */ + RX_LARGE_PG_BUF = 0x1, /* buffer large page buffer */ + + RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */ + RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ +}; + +/** + * txq_avail - return the number of available slots in a Tx queue + * @q: the Tx queue + * + * Returns the number of descriptors in a Tx queue available to write new + * packets. + */ +static inline unsigned int txq_avail(const struct sge_txq *q) +{ + return q->size - 1 - q->in_use; +} + +static int map_mbuf(struct rte_mbuf *mbuf, dma_addr_t *addr) +{ + struct rte_mbuf *m = mbuf; + + for (; m; m = m->next, addr++) { + *addr = m->buf_physaddr + rte_pktmbuf_headroom(m); + if (*addr == 0) + goto out_err; + } + return 0; + +out_err: + return -ENOMEM; +} + +/** + * free_tx_desc - reclaims Tx descriptors and their buffers + * @q: the Tx queue to reclaim descriptors from + * @n: the number of descriptors to reclaim + * + * Reclaims Tx descriptors from an SGE Tx queue and frees the associated + * Tx buffers. Called with the Tx queue lock held. + */ +static void free_tx_desc(struct sge_txq *q, unsigned int n) +{ + struct tx_sw_desc *d; + unsigned int cidx = 0; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->mbuf) { /* an SGL is present */ + rte_pktmbuf_free(d->mbuf); + d->mbuf = NULL; + } + if (d->coalesce.idx) { + int i; + + for (i = 0; i < d->coalesce.idx; i++) { + rte_pktmbuf_free(d->coalesce.mbuf[i]); + d->coalesce.mbuf[i] = NULL; + } + d->coalesce.idx = 0; + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + RTE_MBUF_PREFETCH_TO_FREE(&q->sdesc->mbuf->pool); + } +} + +static void reclaim_tx_desc(struct sge_txq *q, unsigned int n) +{ + struct tx_sw_desc *d; + unsigned int cidx = q->cidx; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->mbuf) { /* an SGL is present */ + rte_pktmbuf_free(d->mbuf); + d->mbuf = NULL; + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + } + q->cidx = cidx; +} + +/** + * fl_cap - return the capacity of a free-buffer list + * @fl: the FL + * + * Returns the capacity of a free-buffer list. The capacity is less than + * the size because one descriptor needs to be left unpopulated, otherwise + * HW will think the FL is empty. + */ +static inline unsigned int fl_cap(const struct sge_fl *fl) +{ + return fl->size - 8; /* 1 descriptor = 8 buffers */ +} + +/** + * fl_starving - return whether a Free List is starving. + * @adapter: pointer to the adapter + * @fl: the Free List + * + * Tests specified Free List to see whether the number of buffers + * available to the hardware has falled below our "starvation" + * threshold. + */ +static inline bool fl_starving(const struct adapter *adapter, + const struct sge_fl *fl) +{ + const struct sge *s = &adapter->sge; + + return fl->avail - fl->pend_cred <= s->fl_starve_thres; +} + +static inline unsigned int get_buf_size(struct adapter *adapter, + const struct rx_sw_desc *d) +{ + unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; + unsigned int buf_size = 0; + + switch (rx_buf_size_idx) { + case RX_SMALL_MTU_BUF: + buf_size = FL_MTU_SMALL_BUFSIZE(adapter); + break; + + case RX_LARGE_MTU_BUF: + buf_size = FL_MTU_LARGE_BUFSIZE(adapter); + break; + + default: + BUG_ON(1); + /* NOT REACHED */ + } + + return buf_size; +} + +/** + * free_rx_bufs - free the Rx buffers on an SGE free list + * @q: the SGE free list to free buffers from + * @n: how many buffers to free + * + * Release the next @n buffers on an SGE free-buffer Rx queue. The + * buffers must be made inaccessible to HW before calling this function. + */ +static void free_rx_bufs(struct sge_fl *q, int n) +{ + unsigned int cidx = q->cidx; + struct rx_sw_desc *d; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->buf) { + rte_pktmbuf_free(d->buf); + d->buf = NULL; + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + q->avail--; + } + q->cidx = cidx; +} + +/** + * unmap_rx_buf - unmap the current Rx buffer on an SGE free list + * @q: the SGE free list + * + * Unmap the current buffer on an SGE free-buffer Rx queue. The + * buffer must be made inaccessible to HW before calling this function. + * + * This is similar to @free_rx_bufs above but does not free the buffer. + * Do note that the FL still loses any further access to the buffer. + */ +static void unmap_rx_buf(struct sge_fl *q) +{ + if (++q->cidx == q->size) + q->cidx = 0; + q->avail--; +} + +static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) +{ + if (q->pend_cred >= 64) { + u32 val = adap->params.arch.sge_fl_db; + + if (is_t4(adap->params.chip)) + val |= V_PIDX(q->pend_cred / 8); + else + val |= V_PIDX_T5(q->pend_cred / 8); + + /* + * Make sure all memory writes to the Free List queue are + * committed before we tell the hardware about them. + */ + wmb(); + + /* + * If we don't have access to the new User Doorbell (T5+), use + * the old doorbell mechanism; otherwise use the new BAR2 + * mechanism. + */ + if (unlikely(!q->bar2_addr)) { + t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), + val | V_QID(q->cntxt_id)); + } else { + writel(val | V_QID(q->bar2_qid), + (void *)((uintptr_t)q->bar2_addr + + SGE_UDB_KDOORBELL)); + + /* + * This Write memory Barrier will force the write to + * the User Doorbell area to be flushed. + */ + wmb(); + } + q->pend_cred &= 7; + } +} + +static inline void set_rx_sw_desc(struct rx_sw_desc *sd, void *buf, + dma_addr_t mapping) +{ + sd->buf = buf; + sd->dma_addr = mapping; /* includes size low bits */ +} + +/** + * refill_fl_usembufs - refill an SGE Rx buffer ring with mbufs + * @adap: the adapter + * @q: the ring to refill + * @n: the number of new buffers to allocate + * + * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, + * allocated with the supplied gfp flags. The caller must assure that + * @n does not exceed the queue's capacity. If afterwards the queue is + * found critically low mark it as starving in the bitmap of starving FLs. + * + * Returns the number of buffers allocated. + */ +static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q, + int n) +{ + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, fl); + unsigned int cred = q->avail; + __be64 *d = &q->desc[q->pidx]; + struct rx_sw_desc *sd = &q->sdesc[q->pidx]; + unsigned int buf_size_idx = RX_SMALL_MTU_BUF; + struct rte_mbuf *buf_bulk[n]; + int ret, i; + struct rte_pktmbuf_pool_private *mbp_priv; + u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.jumbo_frame; + + /* Use jumbo mtu buffers iff mbuf data room size can fit jumbo data. */ + mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool); + if (jumbo_en && + ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000)) + buf_size_idx = RX_LARGE_MTU_BUF; + + ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n); + if (unlikely(ret != 0)) { + dev_debug(adap, "%s: failed to allocated fl entries in bulk ..\n", + __func__); + q->alloc_failed++; + rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++; + goto out; + } + + for (i = 0; i < n; i++) { + struct rte_mbuf *mbuf = buf_bulk[i]; + dma_addr_t mapping; + + if (!mbuf) { + dev_debug(adap, "%s: mbuf alloc failed\n", __func__); + q->alloc_failed++; + rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++; + goto out; + } + + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->next = NULL; + mbuf->nb_segs = 1; + mbuf->port = rxq->rspq.port_id; + + mapping = (dma_addr_t)(mbuf->buf_physaddr + mbuf->data_off); + mapping |= buf_size_idx; + *d++ = cpu_to_be64(mapping); + set_rx_sw_desc(sd, mbuf, mapping); + sd++; + + q->avail++; + if (++q->pidx == q->size) { + q->pidx = 0; + sd = q->sdesc; + d = q->desc; + } + } + +out: cred = q->avail - cred; + q->pend_cred += cred; + ring_fl_db(adap, q); + + if (unlikely(fl_starving(adap, q))) { + /* + * Make sure data has been written to free list + */ + wmb(); + q->low++; + } + + return cred; +} + +/** + * refill_fl - refill an SGE Rx buffer ring with mbufs + * @adap: the adapter + * @q: the ring to refill + * @n: the number of new buffers to allocate + * + * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, + * allocated with the supplied gfp flags. The caller must assure that + * @n does not exceed the queue's capacity. Returns the number of buffers + * allocated. + */ +static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n) +{ + return refill_fl_usembufs(adap, q, n); +} + +static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) +{ + refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail)); +} + +/* + * Return the number of reclaimable descriptors in a Tx queue. + */ +static inline int reclaimable(const struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + + hw_cidx -= q->cidx; + if (hw_cidx < 0) + return hw_cidx + q->size; + return hw_cidx; +} + +/** + * reclaim_completed_tx - reclaims completed Tx descriptors + * @q: the Tx queue to reclaim completed descriptors from + * + * Reclaims Tx descriptors that the SGE has indicated it has processed. + */ +void reclaim_completed_tx(struct sge_txq *q) +{ + unsigned int avail = reclaimable(q); + + do { + /* reclaim as much as possible */ + reclaim_tx_desc(q, avail); + q->in_use -= avail; + avail = reclaimable(q); + } while (avail); +} + +/** + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + /* + * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA + * addresses. The DSGL Work Request starts off with a 32-bit DSGL + * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, + * repeated sequences of { Length[i], Length[i+1], Address[i], + * Address[i+1] } (this ensures that all addresses are on 64-bit + * boundaries). If N is even, then Length[N+1] should be set to 0 and + * Address[N+1] is omitted. + * + * The following calculation incorporates all of the above. It's + * somewhat hard to follow but, briefly: the "+2" accounts for the + * first two flits which include the DSGL header, Length0 and + * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 + * flits for every pair of the remaining N) +1 if (n-1) is odd; and + * finally the "+((n-1)&1)" adds the one remaining flit needed if + * (n-1) is odd ... + */ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/** + * flits_to_desc - returns the num of Tx descriptors for the given flits + * @n: the number of flits + * + * Returns the number of Tx descriptors needed for the supplied number + * of flits. + */ +static inline unsigned int flits_to_desc(unsigned int n) +{ + return DIV_ROUND_UP(n, 8); +} + +/** + * is_eth_imm - can an Ethernet packet be sent as immediate data? + * @m: the packet + * + * Returns whether an Ethernet packet is small enough to fit as + * immediate data. Return value corresponds to the headroom required. + */ +static inline int is_eth_imm(const struct rte_mbuf *m) +{ + unsigned int hdrlen = (m->ol_flags & PKT_TX_TCP_SEG) ? + sizeof(struct cpl_tx_pkt_lso_core) : 0; + + hdrlen += sizeof(struct cpl_tx_pkt); + if (m->pkt_len <= MAX_IMM_TX_PKT_LEN - hdrlen) + return hdrlen; + + return 0; +} + +/** + * calc_tx_flits - calculate the number of flits for a packet Tx WR + * @m: the packet + * + * Returns the number of flits needed for a Tx WR for the given Ethernet + * packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_flits(const struct rte_mbuf *m) +{ + unsigned int flits; + int hdrlen; + + /* + * If the mbuf is small enough, we can pump it out as a work request + * with only immediate data. In that case we just have to have the + * TX Packet header plus the mbuf data in the Work Request. + */ + + hdrlen = is_eth_imm(m); + if (hdrlen) + return DIV_ROUND_UP(m->pkt_len + hdrlen, sizeof(__be64)); + + /* + * Otherwise, we're going to have to construct a Scatter gather list + * of the mbuf body and fragments. We also include the flits necessary + * for the TX Packet Work Request and CPL. We always have a firmware + * Write Header (incorporated as part of the cpl_tx_pkt_lso and + * cpl_tx_pkt structures), followed by either a TX Packet Write CPL + * message or, if we're doing a Large Send Offload, an LSO CPL message + * with an embeded TX Packet Write CPL message. + */ + flits = sgl_len(m->nb_segs); + if (m->tso_segsz) + flits += (sizeof(struct fw_eth_tx_pkt_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + else + flits += (sizeof(struct fw_eth_tx_pkt_wr) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + return flits; +} + +/** + * write_sgl - populate a scatter/gather list for a packet + * @mbuf: the packet + * @q: the Tx queue we are writing into + * @sgl: starting location for writing the SGL + * @end: points right after the end of the SGL + * @start: start offset into mbuf main-body data to include in the SGL + * @addr: address of mapped region + * + * Generates a scatter/gather list for the buffers that make up a packet. + * The caller must provide adequate space for the SGL that will be written. + * The SGL includes all of the packet's page fragments and the data in its + * main body except for the first @start bytes. @sgl must be 16-byte + * aligned and within a Tx descriptor with available space. @end points + * write after the end of the SGL but does not account for any potential + * wrap around, i.e., @end > @sgl. + */ +static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q, + struct ulptx_sgl *sgl, u64 *end, unsigned int start, + const dma_addr_t *addr) +{ + unsigned int i, len; + struct ulptx_sge_pair *to; + struct rte_mbuf *m = mbuf; + unsigned int nfrags = m->nb_segs; + struct ulptx_sge_pair buf[nfrags / 2]; + + len = m->data_len - start; + sgl->len0 = htonl(len); + sgl->addr0 = rte_cpu_to_be_64(addr[0]); + + sgl->cmd_nsge = htonl(V_ULPTX_CMD(ULP_TX_SC_DSGL) | + V_ULPTX_NSGE(nfrags)); + if (likely(--nfrags == 0)) + return; + /* + * Most of the complexity below deals with the possibility we hit the + * end of the queue in the middle of writing the SGL. For this case + * only we create the SGL in a temporary buffer and then copy it. + */ + to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; + + for (i = 0; nfrags >= 2; nfrags -= 2, to++) { + m = m->next; + to->len[0] = rte_cpu_to_be_32(m->data_len); + to->addr[0] = rte_cpu_to_be_64(addr[++i]); + m = m->next; + to->len[1] = rte_cpu_to_be_32(m->data_len); + to->addr[1] = rte_cpu_to_be_64(addr[++i]); + } + if (nfrags) { + m = m->next; + to->len[0] = rte_cpu_to_be_32(m->data_len); + to->len[1] = rte_cpu_to_be_32(0); + to->addr[0] = rte_cpu_to_be_64(addr[i + 1]); + } + if (unlikely((u8 *)end > (u8 *)q->stat)) { + unsigned int part0 = RTE_PTR_DIFF((u8 *)q->stat, + (u8 *)sgl->sge); + unsigned int part1; + + if (likely(part0)) + memcpy(sgl->sge, buf, part0); + part1 = RTE_PTR_DIFF((u8 *)end, (u8 *)q->stat); + rte_memcpy(q->desc, RTE_PTR_ADD((u8 *)buf, part0), part1); + end = RTE_PTR_ADD((void *)q->desc, part1); + } + if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ + *(u64 *)end = 0; +} + +#define IDXDIFF(head, tail, wrap) \ + ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) + +#define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size) +#define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size) + +/** + * ring_tx_db - ring a Tx queue's doorbell + * @adap: the adapter + * @q: the Tx queue + * @n: number of new descriptors to give to HW + * + * Ring the doorbel for a Tx queue. + */ +static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q) +{ + int n = Q_IDXDIFF(q, dbidx); + + /* + * Make sure that all writes to the TX Descriptors are committed + * before we tell the hardware about them. + */ + rte_wmb(); + + /* + * If we don't have access to the new User Doorbell (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(!q->bar2_addr)) { + u32 val = V_PIDX(n); + + /* + * For T4 we need to participate in the Doorbell Recovery + * mechanism. + */ + if (!q->db_disabled) + t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), + V_QID(q->cntxt_id) | val); + else + q->db_pidx_inc += n; + q->db_pidx = q->pidx; + } else { + u32 val = V_PIDX_T5(n); + + /* + * T4 and later chips share the same PIDX field offset within + * the doorbell, but T5 and later shrank the field in order to + * gain a bit for Doorbell Priority. The field was absurdly + * large in the first place (14 bits) so we just use the T5 + * and later limits and warn if a Queue ID is too large. + */ + WARN_ON(val & F_DBPRIO); + + writel(val | V_QID(q->bar2_qid), + (void *)((uintptr_t)q->bar2_addr + SGE_UDB_KDOORBELL)); + + /* + * This Write Memory Barrier will force the write to the User + * Doorbell area to be flushed. This is needed to prevent + * writes on different CPUs for the same queue from hitting + * the adapter out of order. This is required when some Work + * Requests take the Write Combine Gather Buffer path (user + * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some + * take the traditional path where we simply increment the + * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the + * hardware DMA read the actual Work Request. + */ + rte_wmb(); + } + q->dbidx = q->pidx; +} + +/* + * Figure out what HW csum a packet wants and return the appropriate control + * bits. + */ +static u64 hwcsum(enum chip_type chip, const struct rte_mbuf *m) +{ + int csum_type; + + if (m->ol_flags & PKT_TX_IP_CKSUM) { + switch (m->ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + csum_type = TX_CSUM_TCPIP; + break; + case PKT_TX_UDP_CKSUM: + csum_type = TX_CSUM_UDPIP; + break; + default: + goto nocsum; + } + } else { + goto nocsum; + } + + if (likely(csum_type >= TX_CSUM_TCPIP)) { + int hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len); + int eth_hdr_len = m->l2_len; + + if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) + hdr_len |= V_TXPKT_ETHHDR_LEN(eth_hdr_len); + else + hdr_len |= V_T6_TXPKT_ETHHDR_LEN(eth_hdr_len); + return V_TXPKT_CSUM_TYPE(csum_type) | hdr_len; + } +nocsum: + /* + * unknown protocol, disable HW csum + * and hope a bad packet is detected + */ + return F_TXPKT_L4CSUM_DIS; +} + +static inline void txq_advance(struct sge_txq *q, unsigned int n) +{ + q->in_use += n; + q->pidx += n; + if (q->pidx >= q->size) + q->pidx -= q->size; +} + +#define MAX_COALESCE_LEN 64000 + +static inline int wraps_around(struct sge_txq *q, int ndesc) +{ + return (q->pidx + ndesc) > q->size ? 1 : 0; +} + +static void tx_timer_cb(void *data) +{ + struct adapter *adap = (struct adapter *)data; + struct sge_eth_txq *txq = &adap->sge.ethtxq[0]; + int i; + unsigned int coal_idx; + + /* monitor any pending tx */ + for (i = 0; i < adap->sge.max_ethqsets; i++, txq++) { + if (t4_os_trylock(&txq->txq_lock)) { + coal_idx = txq->q.coalesce.idx; + if (coal_idx) { + if (coal_idx == txq->q.last_coal_idx && + txq->q.pidx == txq->q.last_pidx) { + ship_tx_pkt_coalesce_wr(adap, txq); + } else { + txq->q.last_coal_idx = coal_idx; + txq->q.last_pidx = txq->q.pidx; + } + } + t4_os_unlock(&txq->txq_lock); + } + } + rte_eal_alarm_set(50, tx_timer_cb, (void *)adap); +} + +/** + * ship_tx_pkt_coalesce_wr - finalizes and ships a coalesce WR + * @ adap: adapter structure + * @txq: tx queue + * + * writes the different fields of the pkts WR and sends it. + */ +static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, + struct sge_eth_txq *txq) +{ + u32 wr_mid; + struct sge_txq *q = &txq->q; + struct fw_eth_tx_pkts_wr *wr; + unsigned int ndesc; + + /* fill the pkts WR header */ + wr = (void *)&q->desc[q->pidx]; + wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); + + wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2)); + ndesc = flits_to_desc(q->coalesce.flits); + wr->equiq_to_len16 = htonl(wr_mid); + wr->plen = cpu_to_be16(q->coalesce.len); + wr->npkt = q->coalesce.idx; + wr->r3 = 0; + wr->type = q->coalesce.type; + + /* zero out coalesce structure members */ + q->coalesce.idx = 0; + q->coalesce.flits = 0; + q->coalesce.len = 0; + + txq_advance(q, ndesc); + txq->stats.coal_wr++; + txq->stats.coal_pkts += wr->npkt; + + if (Q_IDXDIFF(q, equeidx) >= q->size / 2) { + q->equeidx = q->pidx; + wr_mid |= F_FW_WR_EQUEQ; + wr->equiq_to_len16 = htonl(wr_mid); + } + ring_tx_db(adap, q); +} + +/** + * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not + * @txq: tx queue where the mbuf is sent + * @mbuf: mbuf to be sent + * @nflits: return value for number of flits needed + * @adap: adapter structure + * + * This function decides if a packet should be coalesced or not. + */ +static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq, + struct rte_mbuf *mbuf, + unsigned int *nflits, + struct adapter *adap) +{ + struct sge_txq *q = &txq->q; + unsigned int flits, ndesc; + unsigned char type = 0; + int credits, hw_cidx = ntohs(q->stat->cidx); + int in_use = q->pidx - hw_cidx + flits_to_desc(q->coalesce.flits); + + /* use coal WR type 1 when no frags are present */ + type = (mbuf->nb_segs == 1) ? 1 : 0; + + if (in_use < 0) + in_use += q->size; + + if (unlikely(type != q->coalesce.type && q->coalesce.idx)) + ship_tx_pkt_coalesce_wr(adap, txq); + + /* calculate the number of flits required for coalescing this packet + * without the 2 flits of the WR header. These are added further down + * if we are just starting in new PKTS WR. sgl_len doesn't account for + * the possible 16 bytes alignment ULP TX commands so we do it here. + */ + flits = (sgl_len(mbuf->nb_segs) + 1) & ~1U; + if (type == 0) + flits += (sizeof(struct ulp_txpkt) + + sizeof(struct ulptx_idata)) / sizeof(__be64); + flits += sizeof(struct cpl_tx_pkt_core) / sizeof(__be64); + *nflits = flits; + + /* If coalescing is on, the mbuf is added to a pkts WR */ + if (q->coalesce.idx) { + ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8); + credits = txq_avail(q) - ndesc; + + /* If we are wrapping or this is last mbuf then, send the + * already coalesced mbufs and let the non-coalesce pass + * handle the mbuf. + */ + if (unlikely(credits < 0 || wraps_around(q, ndesc))) { + ship_tx_pkt_coalesce_wr(adap, txq); + return 0; + } + + /* If the max coalesce len or the max WR len is reached + * ship the WR and keep coalescing on. + */ + if (unlikely((q->coalesce.len + mbuf->pkt_len > + MAX_COALESCE_LEN) || + (q->coalesce.flits + flits > + q->coalesce.max))) { + ship_tx_pkt_coalesce_wr(adap, txq); + goto new; + } + return 1; + } + +new: + /* start a new pkts WR, the WR header is not filled below */ + flits += sizeof(struct fw_eth_tx_pkts_wr) / sizeof(__be64); + ndesc = flits_to_desc(q->coalesce.flits + flits); + credits = txq_avail(q) - ndesc; + + if (unlikely(credits < 0 || wraps_around(q, ndesc))) + return 0; + q->coalesce.flits += 2; + q->coalesce.type = type; + q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] + + 2 * sizeof(__be64); + return 1; +} + +/** + * tx_do_packet_coalesce - add an mbuf to a coalesce WR + * @txq: sge_eth_txq used send the mbuf + * @mbuf: mbuf to be sent + * @flits: flits needed for this mbuf + * @adap: adapter structure + * @pi: port_info structure + * @addr: mapped address of the mbuf + * + * Adds an mbuf to be sent as part of a coalesce WR by filling a + * ulp_tx_pkt command, ulp_tx_sc_imm command, cpl message and + * ulp_tx_sc_dsgl command. + */ +static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq, + struct rte_mbuf *mbuf, + int flits, struct adapter *adap, + const struct port_info *pi, + dma_addr_t *addr) +{ + u64 cntrl, *end; + struct sge_txq *q = &txq->q; + struct ulp_txpkt *mc; + struct ulptx_idata *sc_imm; + struct cpl_tx_pkt_core *cpl; + struct tx_sw_desc *sd; + unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len; + + if (q->coalesce.type == 0) { + mc = (struct ulp_txpkt *)q->coalesce.ptr; + mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) | + V_ULP_TXPKT_FID(adap->sge.fw_evtq.cntxt_id) | + F_ULP_TXPKT_RO); + mc->len = htonl(DIV_ROUND_UP(flits, 2)); + sc_imm = (struct ulptx_idata *)(mc + 1); + sc_imm->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM) | + F_ULP_TX_SC_MORE); + sc_imm->len = htonl(sizeof(*cpl)); + end = (u64 *)mc + flits; + cpl = (struct cpl_tx_pkt_core *)(sc_imm + 1); + } else { + end = (u64 *)q->coalesce.ptr + flits; + cpl = (struct cpl_tx_pkt_core *)q->coalesce.ptr; + } + + /* update coalesce structure for this txq */ + q->coalesce.flits += flits; + q->coalesce.ptr += flits * sizeof(__be64); + q->coalesce.len += mbuf->pkt_len; + + /* fill the cpl message, same as in t4_eth_xmit, this should be kept + * similar to t4_eth_xmit + */ + if (mbuf->ol_flags & PKT_TX_IP_CKSUM) { + cntrl = hwcsum(adap->params.chip, mbuf) | + F_TXPKT_IPCSUM_DIS; + txq->stats.tx_cso++; + } else { + cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS; + } + + if (mbuf->ol_flags & PKT_TX_VLAN_PKT) { + txq->stats.vlan_ins++; + cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci); + } + + cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | + V_TXPKT_INTF(pi->tx_chan) | + V_TXPKT_PF(adap->pf)); + cpl->pack = htons(0); + cpl->len = htons(len); + cpl->ctrl1 = cpu_to_be64(cntrl); + write_sgl(mbuf, q, (struct ulptx_sgl *)(cpl + 1), end, 0, addr); + txq->stats.pkts++; + txq->stats.tx_bytes += len; + + sd = &q->sdesc[q->pidx + (idx >> 1)]; + if (!(idx & 1)) { + if (sd->coalesce.idx) { + int i; + + for (i = 0; i < sd->coalesce.idx; i++) { + rte_pktmbuf_free(sd->coalesce.mbuf[i]); + sd->coalesce.mbuf[i] = NULL; + } + } + } + + /* store pointers to the mbuf and the sgl used in free_tx_desc. + * each tx desc can hold two pointers corresponding to the value + * of ETH_COALESCE_PKT_PER_DESC + */ + sd->coalesce.mbuf[idx & 1] = mbuf; + sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1); + sd->coalesce.idx = (idx & 1) + 1; + + /* send the coaelsced work request if max reached */ + if (++q->coalesce.idx == ETH_COALESCE_PKT_NUM) + ship_tx_pkt_coalesce_wr(adap, txq); + return 0; +} + +/** + * t4_eth_xmit - add a packet to an Ethernet Tx queue + * @txq: the egress queue + * @mbuf: the packet + * + * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. + */ +int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf) +{ + const struct port_info *pi; + struct cpl_tx_pkt_lso_core *lso; + struct adapter *adap; + struct rte_mbuf *m = mbuf; + struct fw_eth_tx_pkt_wr *wr; + struct cpl_tx_pkt_core *cpl; + struct tx_sw_desc *d; + dma_addr_t addr[m->nb_segs]; + unsigned int flits, ndesc, cflits; + int l3hdr_len, l4hdr_len, eth_xtra_len; + int len, last_desc; + int credits; + u32 wr_mid; + u64 cntrl, *end; + bool v6; + u32 max_pkt_len = txq->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + + /* Reject xmit if queue is stopped */ + if (unlikely(txq->flags & EQ_STOPPED)) + return -(EBUSY); + + /* + * The chip min packet length is 10 octets but play safe and reject + * anything shorter than an Ethernet header. + */ + if (unlikely(m->pkt_len < ETHER_HDR_LEN)) { +out_free: + rte_pktmbuf_free(m); + return 0; + } + + if ((!(m->ol_flags & PKT_TX_TCP_SEG)) && + (unlikely(m->pkt_len > max_pkt_len))) + goto out_free; + + pi = (struct port_info *)txq->eth_dev->data->dev_private; + adap = pi->adapter; + + cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS; + /* align the end of coalesce WR to a 512 byte boundary */ + txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8; + + if (!((m->ol_flags & PKT_TX_TCP_SEG) || (m->pkt_len > ETHER_MAX_LEN))) { + if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) { + if (unlikely(map_mbuf(mbuf, addr) < 0)) { + dev_warn(adap, "%s: mapping err for coalesce\n", + __func__); + txq->stats.mapping_err++; + goto out_free; + } + rte_prefetch0((volatile void *)addr); + return tx_do_packet_coalesce(txq, mbuf, cflits, adap, + pi, addr); + } else { + return -EBUSY; + } + } + + if (txq->q.coalesce.idx) + ship_tx_pkt_coalesce_wr(adap, txq); + + flits = calc_tx_flits(m); + ndesc = flits_to_desc(flits); + credits = txq_avail(&txq->q) - ndesc; + + if (unlikely(credits < 0)) { + dev_debug(adap, "%s: Tx ring %u full; credits = %d\n", + __func__, txq->q.cntxt_id, credits); + return -EBUSY; + } + + if (unlikely(map_mbuf(m, addr) < 0)) { + txq->stats.mapping_err++; + goto out_free; + } + + wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + if (Q_IDXDIFF(&txq->q, equeidx) >= 64) { + txq->q.equeidx = txq->q.pidx; + wr_mid |= F_FW_WR_EQUEQ; + } + + wr = (void *)&txq->q.desc[txq->q.pidx]; + wr->equiq_to_len16 = htonl(wr_mid); + wr->r3 = rte_cpu_to_be_64(0); + end = (u64 *)wr + flits; + + len = 0; + len += sizeof(*cpl); + + /* Coalescing skipped and we send through normal path */ + if (!(m->ol_flags & PKT_TX_TCP_SEG)) { + wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | + V_FW_WR_IMMDLEN(len)); + cpl = (void *)(wr + 1); + if (m->ol_flags & PKT_TX_IP_CKSUM) { + cntrl = hwcsum(adap->params.chip, m) | + F_TXPKT_IPCSUM_DIS; + txq->stats.tx_cso++; + } + } else { + lso = (void *)(wr + 1); + v6 = (m->ol_flags & PKT_TX_IPV6) != 0; + l3hdr_len = m->l3_len; + l4hdr_len = m->l4_len; + eth_xtra_len = m->l2_len - ETHER_HDR_LEN; + len += sizeof(*lso); + wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | + V_FW_WR_IMMDLEN(len)); + lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) | + F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | + V_LSO_IPV6(v6) | + V_LSO_ETHHDR_LEN(eth_xtra_len / 4) | + V_LSO_IPHDR_LEN(l3hdr_len / 4) | + V_LSO_TCPHDR_LEN(l4hdr_len / 4)); + lso->ipid_ofst = htons(0); + lso->mss = htons(m->tso_segsz); + lso->seqno_offset = htonl(0); + if (is_t4(adap->params.chip)) + lso->len = htonl(m->pkt_len); + else + lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len)); + cpl = (void *)(lso + 1); + cntrl = V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + V_TXPKT_IPHDR_LEN(l3hdr_len) | + V_TXPKT_ETHHDR_LEN(eth_xtra_len); + txq->stats.tso++; + txq->stats.tx_cso += m->tso_segsz; + } + + if (m->ol_flags & PKT_TX_VLAN_PKT) { + txq->stats.vlan_ins++; + cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci); + } + + cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | + V_TXPKT_INTF(pi->tx_chan) | + V_TXPKT_PF(adap->pf)); + cpl->pack = htons(0); + cpl->len = htons(m->pkt_len); + cpl->ctrl1 = cpu_to_be64(cntrl); + + txq->stats.pkts++; + txq->stats.tx_bytes += m->pkt_len; + last_desc = txq->q.pidx + ndesc - 1; + if (last_desc >= (int)txq->q.size) + last_desc -= txq->q.size; + + d = &txq->q.sdesc[last_desc]; + if (d->coalesce.idx) { + int i; + + for (i = 0; i < d->coalesce.idx; i++) { + rte_pktmbuf_free(d->coalesce.mbuf[i]); + d->coalesce.mbuf[i] = NULL; + } + d->coalesce.idx = 0; + } + write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0, + addr); + txq->q.sdesc[last_desc].mbuf = m; + txq->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); + txq_advance(&txq->q, ndesc); + ring_tx_db(adap, &txq->q); + return 0; +} + +/** + * alloc_ring - allocate resources for an SGE descriptor ring + * @dev: the PCI device's core device + * @nelem: the number of descriptors + * @elem_size: the size of each descriptor + * @sw_size: the size of the SW state associated with each ring element + * @phys: the physical address of the allocated ring + * @metadata: address of the array holding the SW state for the ring + * @stat_size: extra space in HW ring for status information + * @node: preferred node for memory allocations + * + * Allocates resources for an SGE descriptor ring, such as Tx queues, + * free buffer lists, or response queues. Each SGE ring requires + * space for its HW descriptors plus, optionally, space for the SW state + * associated with each HW entry (the metadata). The function returns + * three values: the virtual address for the HW ring (the return value + * of the function), the bus address of the HW ring, and the address + * of the SW ring. + */ +static void *alloc_ring(size_t nelem, size_t elem_size, + size_t sw_size, dma_addr_t *phys, void *metadata, + size_t stat_size, __rte_unused uint16_t queue_id, + int socket_id, const char *z_name, + const char *z_name_sw) +{ + size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size; + const struct rte_memzone *tz; + void *s = NULL; + + dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; " + "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;" + " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size, + stat_size, queue_id, socket_id, z_name, z_name_sw); + + tz = rte_memzone_lookup(z_name); + if (tz) { + dev_debug(adapter, "%s: tz exists...returning existing..\n", + __func__); + goto alloc_sw_ring; + } + + /* + * Allocate TX/RX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + tz = rte_memzone_reserve_aligned(z_name, len, socket_id, 0, 4096); + if (!tz) + return NULL; + +alloc_sw_ring: + memset(tz->addr, 0, len); + if (sw_size) { + s = rte_zmalloc_socket(z_name_sw, nelem * sw_size, + RTE_CACHE_LINE_SIZE, socket_id); + + if (!s) { + dev_err(adapter, "%s: failed to get sw_ring memory\n", + __func__); + return NULL; + } + } + if (metadata) + *(void **)metadata = s; + + *phys = (uint64_t)tz->phys_addr; + return tz->addr; +} + +/** + * t4_pktgl_to_mbuf_usembufs - build an mbuf from a packet gather list + * @gl: the gather list + * + * Builds an mbuf from the given packet gather list. Returns the mbuf or + * %NULL if mbuf allocation failed. + */ +static struct rte_mbuf *t4_pktgl_to_mbuf_usembufs(const struct pkt_gl *gl) +{ + /* + * If there's only one mbuf fragment, just return that. + */ + if (likely(gl->nfrags == 1)) + return gl->mbufs[0]; + + return NULL; +} + +/** + * t4_pktgl_to_mbuf - build an mbuf from a packet gather list + * @gl: the gather list + * + * Builds an mbuf from the given packet gather list. Returns the mbuf or + * %NULL if mbuf allocation failed. + */ +static struct rte_mbuf *t4_pktgl_to_mbuf(const struct pkt_gl *gl) +{ + return t4_pktgl_to_mbuf_usembufs(gl); +} + +/** + * t4_ethrx_handler - process an ingress ethernet packet + * @q: the response queue that received the packet + * @rsp: the response queue descriptor holding the RX_PKT message + * @si: the gather list of packet fragments + * + * Process an ingress ethernet packet and deliver it to the stack. + */ +int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *si) +{ + struct rte_mbuf *mbuf; + const struct cpl_rx_pkt *pkt; + const struct rss_header *rss_hdr; + bool csum_ok; + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + + rss_hdr = (const void *)rsp; + pkt = (const void *)&rsp[1]; + csum_ok = pkt->csum_calc && !pkt->err_vec; + + mbuf = t4_pktgl_to_mbuf(si); + if (unlikely(!mbuf)) { + rxq->stats.rx_drops++; + return 0; + } + + mbuf->port = pkt->iff; + if (pkt->l2info & htonl(F_RXF_IP)) { + mbuf->packet_type = RTE_PTYPE_L3_IPV4; + if (unlikely(!csum_ok)) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + + if ((pkt->l2info & htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + } else if (pkt->l2info & htonl(F_RXF_IP6)) { + mbuf->packet_type = RTE_PTYPE_L3_IPV6; + } + + mbuf->port = pkt->iff; + + if (!rss_hdr->filter_tid && rss_hdr->hash_type) { + mbuf->ol_flags |= PKT_RX_RSS_HASH; + mbuf->hash.rss = ntohl(rss_hdr->hash_val); + } + + if (pkt->vlan_ex) { + mbuf->ol_flags |= PKT_RX_VLAN_PKT; + mbuf->vlan_tci = ntohs(pkt->vlan); + } + rxq->stats.pkts++; + rxq->stats.rx_bytes += mbuf->pkt_len; + + return 0; +} + +/** + * is_new_response - check if a response is newly written + * @r: the response descriptor + * @q: the response queue + * + * Returns true if a response descriptor contains a yet unprocessed + * response. + */ +static inline bool is_new_response(const struct rsp_ctrl *r, + const struct sge_rspq *q) +{ + return (r->u.type_gen >> S_RSPD_GEN) == q->gen; +} + +#define CXGB4_MSG_AN ((void *)1) + +/** + * rspq_next - advance to the next entry in a response queue + * @q: the queue + * + * Updates the state of a response queue to advance it to the next entry. + */ +static inline void rspq_next(struct sge_rspq *q) +{ + q->cur_desc = (const __be64 *)((const char *)q->cur_desc + q->iqe_len); + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->gen ^= 1; + q->cur_desc = q->desc; + } +} + +/** + * process_responses - process responses from an SGE response queue + * @q: the ingress queue to process + * @budget: how many responses can be processed in this round + * @rx_pkts: mbuf to put the pkts + * + * Process responses from an SGE response queue up to the supplied budget. + * Responses include received packets as well as control messages from FW + * or HW. + * + * Additionally choose the interrupt holdoff time for the next interrupt + * on this queue. If the system is under memory shortage use a fairly + * long delay to help recovery. + */ +static int process_responses(struct sge_rspq *q, int budget, + struct rte_mbuf **rx_pkts) +{ + int ret = 0, rsp_type; + int budget_left = budget; + const struct rsp_ctrl *rc; + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + + while (likely(budget_left)) { + rc = (const struct rsp_ctrl *) + ((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc))); + + if (!is_new_response(rc, q)) + break; + + /* + * Ensure response has been read + */ + rmb(); + rsp_type = G_RSPD_TYPE(rc->u.type_gen); + + if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) { + const struct rx_sw_desc *rsd = + &rxq->fl.sdesc[rxq->fl.cidx]; + const struct rss_header *rss_hdr = + (const void *)q->cur_desc; + const struct cpl_rx_pkt *cpl = + (const void *)&q->cur_desc[1]; + bool csum_ok = cpl->csum_calc && !cpl->err_vec; + struct rte_mbuf *pkt, *npkt; + u32 len, bufsz; + + len = ntohl(rc->pldbuflen_qid); + BUG_ON(!(len & F_RSPD_NEWBUF)); + pkt = rsd->buf; + npkt = pkt; + len = G_RSPD_LEN(len); + pkt->pkt_len = len; + + /* Chain mbufs into len if necessary */ + while (len) { + struct rte_mbuf *new_pkt = rsd->buf; + + bufsz = min(get_buf_size(q->adapter, rsd), len); + new_pkt->data_len = bufsz; + unmap_rx_buf(&rxq->fl); + len -= bufsz; + npkt->next = new_pkt; + npkt = new_pkt; + pkt->nb_segs++; + rsd = &rxq->fl.sdesc[rxq->fl.cidx]; + } + npkt->next = NULL; + pkt->nb_segs--; + + if (cpl->l2info & htonl(F_RXF_IP)) { + pkt->packet_type = RTE_PTYPE_L3_IPV4; + if (unlikely(!csum_ok)) + pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD; + + if ((cpl->l2info & + htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok) + pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD; + } else if (cpl->l2info & htonl(F_RXF_IP6)) { + pkt->packet_type = RTE_PTYPE_L3_IPV6; + } + + if (!rss_hdr->filter_tid && rss_hdr->hash_type) { + pkt->ol_flags |= PKT_RX_RSS_HASH; + pkt->hash.rss = ntohl(rss_hdr->hash_val); + } + + if (cpl->vlan_ex) { + pkt->ol_flags |= PKT_RX_VLAN_PKT; + pkt->vlan_tci = ntohs(cpl->vlan); + } + rxq->stats.pkts++; + rxq->stats.rx_bytes += pkt->pkt_len; + rx_pkts[budget - budget_left] = pkt; + } else if (likely(rsp_type == X_RSPD_TYPE_CPL)) { + ret = q->handler(q, q->cur_desc, NULL); + } else { + ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); + } + + if (unlikely(ret)) { + /* couldn't process descriptor, back off for recovery */ + q->next_intr_params = V_QINTR_TIMER_IDX(NOMEM_TMR_IDX); + break; + } + + rspq_next(q); + budget_left--; + + if (R_IDXDIFF(q, gts_idx) >= 64) { + unsigned int cidx_inc = R_IDXDIFF(q, gts_idx); + unsigned int params; + u32 val; + + if (fl_cap(&rxq->fl) - rxq->fl.avail >= 64) + __refill_fl(q->adapter, &rxq->fl); + params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX); + q->next_intr_params = params; + val = V_CIDXINC(cidx_inc) | V_SEINTARM(params); + + if (unlikely(!q->bar2_addr)) + t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS), + val | + V_INGRESSQID((u32)q->cntxt_id)); + else { + writel(val | V_INGRESSQID(q->bar2_qid), + (void *)((uintptr_t)q->bar2_addr + + SGE_UDB_GTS)); + /* + * This Write memory Barrier will force the + * write to the User Doorbell area to be + * flushed. + */ + wmb(); + } + q->gts_idx = q->cidx; + } + } + + /* + * If this is a Response Queue with an associated Free List and + * there's room for another chunk of new Free List buffer pointers, + * refill the Free List. + */ + + if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64) + __refill_fl(q->adapter, &rxq->fl); + + return budget - budget_left; +} + +int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts, + unsigned int budget, unsigned int *work_done) +{ + int err = 0; + + *work_done = process_responses(q, budget, rx_pkts); + return err; +} + +/** + * bar2_address - return the BAR2 address for an SGE Queue's Registers + * @adapter: the adapter + * @qid: the SGE Queue ID + * @qtype: the SGE Queue Type (Egress or Ingress) + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 address for the SGE Queue Registers associated with + * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also + * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE + * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" + * Registers are supported (e.g. the Write Combining Doorbell Buffer). + */ +static void __iomem *bar2_address(struct adapter *adapter, unsigned int qid, + enum t4_bar2_qtype qtype, + unsigned int *pbar2_qid) +{ + u64 bar2_qoffset; + int ret; + + ret = t4_bar2_sge_qregs(adapter, qid, qtype, &bar2_qoffset, pbar2_qid); + if (ret) + return NULL; + + return adapter->bar2 + bar2_qoffset; +} + +int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq) +{ + struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq); + unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff; + + return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0, + rq->cntxt_id, fl_id, 0xffff); +} + +int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq) +{ + struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq); + unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff; + + return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0, + rq->cntxt_id, fl_id, 0xffff); +} + +/* + * @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0 + * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map + */ +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, + struct rte_eth_dev *eth_dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t hnd, int cong, + struct rte_mempool *mp, int queue_id, int socket_id) +{ + int ret, flsz = 0; + struct fw_iq_cmd c; + struct sge *s = &adap->sge; + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + char z_name[RTE_MEMZONE_NAMESIZE]; + char z_name_sw[RTE_MEMZONE_NAMESIZE]; + unsigned int nb_refill; + + /* Size needs to be multiple of 16, including status entry. */ + iq->size = cxgbe_roundup(iq->size, 16); + + snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + eth_dev->driver->pci_drv.name, fwevtq ? "fwq_ring" : "rx_ring", + eth_dev->data->port_id, queue_id); + snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); + + iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0, + queue_id, socket_id, z_name, z_name_sw); + if (!iq->desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | F_FW_CMD_EXEC | + V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0)); + c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | + (sizeof(c) / 16)); + c.type_to_iqandstindex = + htonl(V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | + V_FW_IQ_CMD_IQASYNCH(fwevtq) | + V_FW_IQ_CMD_VIID(pi->viid) | + V_FW_IQ_CMD_IQANDST(intr_idx < 0) | + V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT) | + V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : + -intr_idx - 1)); + c.iqdroprss_to_iqesize = + htons(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | + F_FW_IQ_CMD_IQGTSMODE | + V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | + V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); + c.iqsize = htons(iq->size); + c.iqaddr = cpu_to_be64(iq->phys_addr); + if (cong >= 0) + c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN); + + if (fl) { + struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq, + fl); + enum chip_type chip = (enum chip_type)CHELSIO_CHIP_VERSION( + adap->params.chip); + + /* + * Allocate the ring for the hardware free list (with space + * for its status page) along with the associated software + * descriptor ring. The free list size needs to be a multiple + * of the Egress Queue Unit and at least 2 Egress Units larger + * than the SGE's Egress Congrestion Threshold + * (fl_starve_thres - 1). + */ + if (fl->size < s->fl_starve_thres - 1 + 2 * 8) + fl->size = s->fl_starve_thres - 1 + 2 * 8; + fl->size = cxgbe_roundup(fl->size, 8); + + snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + eth_dev->driver->pci_drv.name, + fwevtq ? "fwq_ring" : "fl_ring", + eth_dev->data->port_id, queue_id); + snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); + + fl->desc = alloc_ring(fl->size, sizeof(__be64), + sizeof(struct rx_sw_desc), + &fl->addr, &fl->sdesc, s->stat_len, + queue_id, socket_id, z_name, z_name_sw); + + if (!fl->desc) + goto fl_nomem; + + flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); + c.iqns_to_fl0congen |= + htonl(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | + (unlikely(rxq->usembufs) ? + 0 : F_FW_IQ_CMD_FL0PACKEN) | + F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | + F_FW_IQ_CMD_FL0PADEN); + if (cong >= 0) + c.iqns_to_fl0congen |= + htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | + F_FW_IQ_CMD_FL0CONGCIF | + F_FW_IQ_CMD_FL0CONGEN); + + /* In T6, for egress queue type FL there is internal overhead + * of 16B for header going into FLM module. + * Hence maximum allowed burst size will be 448 bytes. + */ + c.fl0dcaen_to_fl0cidxfthresh = + htons(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_128B) | + V_FW_IQ_CMD_FL0FBMAX((chip <= CHELSIO_T5) ? + X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); + c.fl0size = htons(flsz); + c.fl0addr = cpu_to_be64(fl->addr); + } + + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (ret) + goto err; + + iq->cur_desc = iq->desc; + iq->cidx = 0; + iq->gts_idx = 0; + iq->gen = 1; + iq->next_intr_params = iq->intr_params; + iq->cntxt_id = ntohs(c.iqid); + iq->abs_id = ntohs(c.physiqid); + iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS, + &iq->bar2_qid); + iq->size--; /* subtract status entry */ + iq->eth_dev = eth_dev; + iq->handler = hnd; + iq->port_id = pi->port_id; + iq->mb_pool = mp; + + /* set offset to -1 to distinguish ingress queues without FL */ + iq->offset = fl ? 0 : -1; + + if (fl) { + fl->cntxt_id = ntohs(c.fl0id); + fl->avail = 0; + fl->pend_cred = 0; + fl->pidx = 0; + fl->cidx = 0; + fl->alloc_failed = 0; + + /* + * Note, we must initialize the BAR2 Free List User Doorbell + * information before refilling the Free List! + */ + fl->bar2_addr = bar2_address(adap, fl->cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &fl->bar2_qid); + + nb_refill = refill_fl(adap, fl, fl_cap(fl)); + if (nb_refill != fl_cap(fl)) { + ret = -ENOMEM; + dev_err(adap, "%s: mbuf alloc failed with error: %d\n", + __func__, ret); + goto refill_fl_err; + } + } + + /* + * For T5 and later we attempt to set up the Congestion Manager values + * of the new RX Ethernet Queue. This should really be handled by + * firmware because it's more complex than any host driver wants to + * get involved with and it's different per chip and this is almost + * certainly wrong. Formware would be wrong as well, but it would be + * a lot easier to fix in one place ... For now we do something very + * simple (and hopefully less wrong). + */ + if (!is_t4(adap->params.chip) && cong >= 0) { + u32 param, val; + int i; + + param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | + V_FW_PARAMS_PARAM_YZ(iq->cntxt_id)); + if (cong == 0) { + val = V_CONMCTXT_CNGTPMODE(X_CONMCTXT_CNGTPMODE_QUEUE); + } else { + val = V_CONMCTXT_CNGTPMODE( + X_CONMCTXT_CNGTPMODE_CHANNEL); + for (i = 0; i < 4; i++) { + if (cong & (1 << i)) + val |= V_CONMCTXT_CNGCHMAP(1 << + (i << 2)); + } + } + ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, + ¶m, &val); + if (ret) + dev_warn(adap->pdev_dev, "Failed to set Congestion Manager Context for Ingress Queue %d: %d\n", + iq->cntxt_id, -ret); + } + + return 0; + +refill_fl_err: + t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, + iq->cntxt_id, fl->cntxt_id, 0xffff); +fl_nomem: + ret = -ENOMEM; +err: + iq->cntxt_id = 0; + iq->abs_id = 0; + if (iq->desc) + iq->desc = NULL; + + if (fl && fl->desc) { + rte_free(fl->sdesc); + fl->cntxt_id = 0; + fl->sdesc = NULL; + fl->desc = NULL; + } + return ret; +} + +static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) +{ + q->cntxt_id = id; + q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS, + &q->bar2_qid); + q->cidx = 0; + q->pidx = 0; + q->dbidx = 0; + q->in_use = 0; + q->equeidx = 0; + q->coalesce.idx = 0; + q->coalesce.len = 0; + q->coalesce.flits = 0; + q->last_coal_idx = 0; + q->last_pidx = 0; + q->stat = (void *)&q->desc[q->size]; +} + +int t4_sge_eth_txq_start(struct sge_eth_txq *txq) +{ + /* + * TODO: For flow-control, queue may be stopped waiting to reclaim + * credits. + * Ensure queue is in EQ_STOPPED state before starting it. + */ + if (!(txq->flags & EQ_STOPPED)) + return -(EBUSY); + + txq->flags &= ~EQ_STOPPED; + + return 0; +} + +int t4_sge_eth_txq_stop(struct sge_eth_txq *txq) +{ + txq->flags |= EQ_STOPPED; + + return 0; +} + +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct rte_eth_dev *eth_dev, uint16_t queue_id, + unsigned int iqid, int socket_id) +{ + int ret, nentries; + struct fw_eq_eth_cmd c; + struct sge *s = &adap->sge; + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + char z_name[RTE_MEMZONE_NAMESIZE]; + char z_name_sw[RTE_MEMZONE_NAMESIZE]; + + /* Add status entries */ + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); + + snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + eth_dev->driver->pci_drv.name, "tx_ring", + eth_dev->data->port_id, queue_id); + snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); + + txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc), + sizeof(struct tx_sw_desc), &txq->q.phys_addr, + &txq->q.sdesc, s->stat_len, queue_id, + socket_id, z_name, z_name_sw); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | F_FW_CMD_EXEC | + V_FW_EQ_ETH_CMD_PFN(adap->pf) | + V_FW_EQ_ETH_CMD_VFN(0)); + c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC | + F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16)); + c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE | + V_FW_EQ_ETH_CMD_VIID(pi->viid)); + c.fetchszm_to_iqid = + htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | + V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | + F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid)); + c.dcaen_to_eqsize = + htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | + V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | + V_FW_EQ_ETH_CMD_EQSIZE(nentries)); + c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (ret) { + rte_free(txq->q.sdesc); + txq->q.sdesc = NULL; + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd))); + txq->stats.tso = 0; + txq->stats.pkts = 0; + txq->stats.tx_cso = 0; + txq->stats.coal_wr = 0; + txq->stats.vlan_ins = 0; + txq->stats.tx_bytes = 0; + txq->stats.coal_pkts = 0; + txq->stats.mapping_err = 0; + txq->flags |= EQ_STOPPED; + txq->eth_dev = eth_dev; + t4_os_lock_init(&txq->txq_lock); + return 0; +} + +static void free_txq(struct sge_txq *q) +{ + q->cntxt_id = 0; + q->sdesc = NULL; + q->desc = NULL; +} + +static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, + struct sge_fl *fl) +{ + unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; + + t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, + rq->cntxt_id, fl_id, 0xffff); + rq->cntxt_id = 0; + rq->abs_id = 0; + rq->desc = NULL; + + if (fl) { + free_rx_bufs(fl, fl->avail); + rte_free(fl->sdesc); + fl->sdesc = NULL; + fl->cntxt_id = 0; + fl->desc = NULL; + } +} + +/* + * Clear all queues of the port + * + * Note: This function must only be called after rx and tx path + * of the port have been disabled. + */ +void t4_sge_eth_clear_queues(struct port_info *pi) +{ + int i; + struct adapter *adap = pi->adapter; + struct sge_eth_rxq *rxq = &adap->sge.ethrxq[pi->first_qset]; + struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; + + for (i = 0; i < pi->n_rx_qsets; i++, rxq++) { + if (rxq->rspq.desc) + t4_sge_eth_rxq_stop(adap, &rxq->rspq); + } + for (i = 0; i < pi->n_tx_qsets; i++, txq++) { + if (txq->q.desc) { + struct sge_txq *q = &txq->q; + + t4_sge_eth_txq_stop(txq); + reclaim_completed_tx(q); + free_tx_desc(q, q->size); + q->equeidx = q->pidx; + } + } +} + +void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq) +{ + if (rxq->rspq.desc) { + t4_sge_eth_rxq_stop(adap, &rxq->rspq); + free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL); + } +} + +void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq) +{ + if (txq->q.desc) { + t4_sge_eth_txq_stop(txq); + reclaim_completed_tx(&txq->q); + t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, txq->q.cntxt_id); + free_tx_desc(&txq->q, txq->q.size); + rte_free(txq->q.sdesc); + free_txq(&txq->q); + } +} + +void t4_sge_tx_monitor_start(struct adapter *adap) +{ + rte_eal_alarm_set(50, tx_timer_cb, (void *)adap); +} + +void t4_sge_tx_monitor_stop(struct adapter *adap) +{ + rte_eal_alarm_cancel(tx_timer_cb, (void *)adap); +} + +/** + * t4_free_sge_resources - free SGE resources + * @adap: the adapter + * + * Frees resources used by the SGE queue sets. + */ +void t4_free_sge_resources(struct adapter *adap) +{ + int i; + struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0]; + struct sge_eth_txq *txq = &adap->sge.ethtxq[0]; + + /* clean up Ethernet Tx/Rx queues */ + for (i = 0; i < adap->sge.max_ethqsets; i++, rxq++, txq++) { + /* Free only the queues allocated */ + if (rxq->rspq.desc) { + t4_sge_eth_rxq_release(adap, rxq); + rxq->rspq.eth_dev = NULL; + } + if (txq->q.desc) { + t4_sge_eth_txq_release(adap, txq); + txq->eth_dev = NULL; + } + } + + if (adap->sge.fw_evtq.desc) + free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); +} + +/** + * t4_sge_init - initialize SGE + * @adap: the adapter + * + * Performs SGE initialization needed every time after a chip reset. + * We do not initialize any of the queues here, instead the driver + * top-level must request those individually. + * + * Called in two different modes: + * + * 1. Perform actual hardware initialization and record hard-coded + * parameters which were used. This gets used when we're the + * Master PF and the Firmware Configuration File support didn't + * work for some reason. + * + * 2. We're not the Master PF or initialization was performed with + * a Firmware Configuration File. In this case we need to grab + * any of the SGE operating parameters that we need to have in + * order to do our job and make sure we can live with them ... + */ +static int t4_sge_init_soft(struct adapter *adap) +{ + struct sge *s = &adap->sge; + u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu; + u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; + u32 ingress_rx_threshold; + + /* + * Verify that CPL messages are going to the Ingress Queue for + * process_responses() and that only packet data is going to the + * Free Lists. + */ + if ((t4_read_reg(adap, A_SGE_CONTROL) & F_RXPKTCPLMODE) != + V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) { + dev_err(adap, "bad SGE CPL MODE\n"); + return -EINVAL; + } + + /* + * Validate the Host Buffer Register Array indices that we want to + * use ... + * + * XXX Note that we should really read through the Host Buffer Size + * XXX register array and find the indices of the Buffer Sizes which + * XXX meet our needs! + */ +#define READ_FL_BUF(x) \ + t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE0 + (x) * sizeof(u32)) + + fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); + fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); + fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); + fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); + + /* + * We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + +#undef READ_FL_BUF + + /* + * The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ + if (fl_small_pg != CXGBE_PAGE_SIZE || + (fl_large_pg & (fl_large_pg - 1)) != 0) { + dev_err(adap, "bad SGE FL page buffer sizes [%d, %d]\n", + fl_small_pg, fl_large_pg); + return -EINVAL; + } + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; + + if (adap->use_unpacked_mode) { + int err = 0; + + if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap)) { + dev_err(adap, "bad SGE FL small MTU %d\n", + fl_small_mtu); + err = -EINVAL; + } + if (fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { + dev_err(adap, "bad SGE FL large MTU %d\n", + fl_large_mtu); + err = -EINVAL; + } + if (err) + return err; + } + + /* + * Retrieve our RX interrupt holdoff timer values and counter + * threshold values from the SGE parameters. + */ + timer_value_0_and_1 = t4_read_reg(adap, A_SGE_TIMER_VALUE_0_AND_1); + timer_value_2_and_3 = t4_read_reg(adap, A_SGE_TIMER_VALUE_2_AND_3); + timer_value_4_and_5 = t4_read_reg(adap, A_SGE_TIMER_VALUE_4_AND_5); + s->timer_val[0] = core_ticks_to_us(adap, + G_TIMERVALUE0(timer_value_0_and_1)); + s->timer_val[1] = core_ticks_to_us(adap, + G_TIMERVALUE1(timer_value_0_and_1)); + s->timer_val[2] = core_ticks_to_us(adap, + G_TIMERVALUE2(timer_value_2_and_3)); + s->timer_val[3] = core_ticks_to_us(adap, + G_TIMERVALUE3(timer_value_2_and_3)); + s->timer_val[4] = core_ticks_to_us(adap, + G_TIMERVALUE4(timer_value_4_and_5)); + s->timer_val[5] = core_ticks_to_us(adap, + G_TIMERVALUE5(timer_value_4_and_5)); + + ingress_rx_threshold = t4_read_reg(adap, A_SGE_INGRESS_RX_THRESHOLD); + s->counter_val[0] = G_THRESHOLD_0(ingress_rx_threshold); + s->counter_val[1] = G_THRESHOLD_1(ingress_rx_threshold); + s->counter_val[2] = G_THRESHOLD_2(ingress_rx_threshold); + s->counter_val[3] = G_THRESHOLD_3(ingress_rx_threshold); + + return 0; +} + +int t4_sge_init(struct adapter *adap) +{ + struct sge *s = &adap->sge; + u32 sge_control, sge_control2, sge_conm_ctrl; + unsigned int ingpadboundary, ingpackboundary; + int ret, egress_threshold; + + /* + * Ingress Padding Boundary and Egress Status Page Size are set up by + * t4_fixup_host_params(). + */ + sge_control = t4_read_reg(adap, A_SGE_CONTROL); + s->pktshift = G_PKTSHIFT(sge_control); + s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64; + + /* + * T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. + */ + ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + + X_INGPADBOUNDARY_SHIFT); + s->fl_align = ingpadboundary; + + if (!is_t4(adap->params.chip) && !adap->use_unpacked_mode) { + /* + * T5 has a weird interpretation of one of the PCIe Packing + * Boundary values. No idea why ... + */ + sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2); + ingpackboundary = G_INGPACKBOUNDARY(sge_control2); + if (ingpackboundary == X_INGPACKBOUNDARY_16B) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + X_INGPACKBOUNDARY_SHIFT); + + s->fl_align = max(ingpadboundary, ingpackboundary); + } + + ret = t4_sge_init_soft(adap); + if (ret < 0) { + dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n", + __func__, -ret); + return ret; + } + + /* + * A FL with <= fl_starve_thres buffers is starving and a periodic + * timer will attempt to refill it. This needs to be larger than the + * SGE's Egress Congestion Threshold. If it isn't, then we can get + * stuck waiting for new packets while the SGE is waiting for us to + * give it more Free List entries. (Note that the SGE's Egress + * Congestion Threshold is in units of 2 Free List pointers.) For T4, + * there was only a single field to control this. For T5 there's the + * original field which now only applies to Unpacked Mode Free List + * buffers and a new field which only applies to Packed Mode Free List + * buffers. + */ + sge_conm_ctrl = t4_read_reg(adap, A_SGE_CONM_CTRL); + if (is_t4(adap->params.chip) || adap->use_unpacked_mode) + egress_threshold = G_EGRTHRESHOLD(sge_conm_ctrl); + else + egress_threshold = G_EGRTHRESHOLDPACKING(sge_conm_ctrl); + s->fl_starve_thres = 2 * egress_threshold + 1; + + return 0; +} diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile new file mode 100644 index 00000000..f4879e67 --- /dev/null +++ b/drivers/net/e1000/Makefile @@ -0,0 +1,102 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2015 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_e1000.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_e1000_version.map + +LIBABIVER := 1 + +ifeq ($(CC), icc) +# +# CFLAGS for icc +# +CFLAGS_BASE_DRIVER = -wd177 -wd181 -wd188 -wd869 -wd2259 +else +# +# CFLAGS for gcc +# +CFLAGS_BASE_DRIVER = -Wno-uninitialized -Wno-unused-parameter +CFLAGS_BASE_DRIVER += -Wno-unused-variable +ifeq ($(shell test $(GCC_VERSION) -ge 60 && echo 1), 1) +CFLAGS_BASE_DRIVER += -Wno-misleading-indentation +endif +endif + +# +# Add extra flags for base driver files (also known as shared code) +# to disable warnings in them +# +BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))) +$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +VPATH += $(SRCDIR)/base + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_80003es2lan.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82540.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82541.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82542.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82543.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82571.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82575.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_i210.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_api.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_ich8lan.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mac.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_manage.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mbx.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_nvm.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_osdep.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_phy.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_pf.c +SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_rxtx.c + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_net + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/e1000/base/README b/drivers/net/e1000/base/README new file mode 100644 index 00000000..8d48135a --- /dev/null +++ b/drivers/net/e1000/base/README @@ -0,0 +1,44 @@ +.. + BSD LICENSE + + Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +This directory contains source code of FreeBSD em & igb drivers of version +cid-shared-code.2015.10.09 released by ND. The sub-directory of base/ +contains the original source package. + +Updating the driver +=================== + +NOTE: The source code in this directory should not be modified apart from +the following file(s): + + e1000_osdep.c + e1000_osdep.h diff --git a/drivers/net/e1000/base/e1000_80003es2lan.c b/drivers/net/e1000/base/e1000_80003es2lan.c new file mode 100644 index 00000000..5ac925e4 --- /dev/null +++ b/drivers/net/e1000/base/e1000_80003es2lan.c @@ -0,0 +1,1525 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +/* 80003ES2LAN Gigabit Ethernet Controller (Copper) + * 80003ES2LAN Gigabit Ethernet Controller (Serdes) + */ + +#include "e1000_api.h" + +STATIC s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw); +STATIC void e1000_release_phy_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw); +STATIC void e1000_release_nvm_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, + u16 *data); +STATIC s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, + u16 data); +STATIC s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +STATIC s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); +STATIC void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +STATIC s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); +STATIC s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw); +STATIC s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data); +STATIC s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data); +STATIC void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); +STATIC void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +STATIC s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw); +STATIC void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); + +/* A table for the GG82563 cable length where the range is defined + * with a lower bound at "index" and the upper bound at + * "index + 5". + */ +STATIC const u16 e1000_gg82563_cable_length_table[] = { + 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; +#define GG82563_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_gg82563_cable_length_table) / \ + sizeof(e1000_gg82563_cable_length_table[0])) + +/** + * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("e1000_init_phy_params_80003es2lan"); + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return E1000_SUCCESS; + } else { + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + phy->type = e1000_phy_gg82563; + + phy->ops.acquire = e1000_acquire_phy_80003es2lan; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.check_reset_block = e1000_check_reset_block_generic; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.get_cfg_done = e1000_get_cfg_done_80003es2lan; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.release = e1000_release_phy_80003es2lan; + phy->ops.reset = e1000_phy_hw_reset_generic; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan; + phy->ops.get_cable_length = e1000_get_cable_length_80003es2lan; + phy->ops.read_reg = e1000_read_phy_reg_gg82563_80003es2lan; + phy->ops.write_reg = e1000_write_phy_reg_gg82563_80003es2lan; + + phy->ops.cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan; + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000_get_phy_id(hw); + + /* Verify phy id */ + if (phy->id != GG82563_E_PHY_ID) + return -E1000_ERR_PHY; + + return ret_val; +} + +/** + * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_80003es2lan"); + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + nvm->type = e1000_nvm_eeprom_spi; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_80003es2lan; + nvm->ops.read = e1000_read_nvm_eerd; + nvm->ops.release = e1000_release_nvm_80003es2lan; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_generic; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.write = e1000_write_nvm_80003es2lan; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_init_mac_params_80003es2lan"); + + /* Set media type and media-dependent function pointers */ + switch (hw->device_id) { + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + hw->phy.media_type = e1000_media_type_internal_serdes; + mac->ops.check_for_link = e1000_check_for_serdes_link_generic; + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_generic; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + mac->ops.check_for_link = e1000_check_for_copper_link_generic; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_80003es2lan; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = !!(E1000_READ_REG(hw, E1000_FWSM) & + E1000_FWSM_MODE_MASK); + /* Adaptive IFS not supported */ + mac->adaptive_ifs = false; + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_80003es2lan; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_80003es2lan; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_generic; + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_generic; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_80003es2lan; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* blink LED */ + mac->ops.blink_led = e1000_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_generic; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_generic; + mac->ops.led_off = e1000_led_off_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_80003es2lan; + + /* set lan id for port to determine which phy lock to use */ + hw->mac.ops.set_lan_id(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_80003es2lan - Init ESB2 func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_80003es2lan"); + + hw->mac.ops.init_params = e1000_init_mac_params_80003es2lan; + hw->nvm.ops.init_params = e1000_init_nvm_params_80003es2lan; + hw->phy.ops.init_params = e1000_init_phy_params_80003es2lan; +} + +/** + * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to acquire access rights to the correct PHY. + **/ +STATIC s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + DEBUGFUNC("e1000_acquire_phy_80003es2lan"); + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_phy_80003es2lan - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. + **/ +STATIC void e1000_release_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + DEBUGFUNC("e1000_release_phy_80003es2lan"); + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_mac_csr_80003es2lan - Acquire right to access Kumeran register + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the Kumeran interface. + * + **/ +STATIC s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + DEBUGFUNC("e1000_acquire_mac_csr_80003es2lan"); + + mask = E1000_SWFW_CSR_SM; + + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_mac_csr_80003es2lan - Release right to access Kumeran Register + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the Kumeran interface + **/ +STATIC void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + DEBUGFUNC("e1000_release_mac_csr_80003es2lan"); + + mask = E1000_SWFW_CSR_SM; + + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the EEPROM. + **/ +STATIC s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_acquire_nvm_80003es2lan"); + + ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + if (ret_val) + return ret_val; + + ret_val = e1000_acquire_nvm_generic(hw); + + if (ret_val) + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + + return ret_val; +} + +/** + * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the EEPROM. + **/ +STATIC void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_80003es2lan"); + + e1000_release_nvm_generic(hw); + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +STATIC s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 i = 0; + s32 timeout = 50; + + DEBUGFUNC("e1000_acquire_swfw_sync_80003es2lan"); + + while (i < timeout) { + if (e1000_get_hw_semaphore_generic(hw)) + return -E1000_ERR_SWFW_SYNC; + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + return -E1000_ERR_SWFW_SYNC; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +STATIC void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("e1000_release_swfw_sync_80003es2lan"); + + while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) + ; /* Empty */ + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); +} + +/** + * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: pointer to the data returned from the operation + * + * Read the GG82563 PHY register. + **/ +STATIC s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 *data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + DEBUGFUNC("e1000_read_phy_reg_gg82563_80003es2lan"); + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + if (hw->dev_spec._80003es2lan.mdic_wa_enable) { + /* The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + usec_delay(200); + + /* ...and verify the command was successful. */ + ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + e1000_release_phy_80003es2lan(hw); + return -E1000_ERR_PHY; + } + + usec_delay(200); + + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + usec_delay(200); + } else { + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + } + + e1000_release_phy_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: value to write to the register + * + * Write to the GG82563 PHY register. + **/ +STATIC s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + DEBUGFUNC("e1000_write_phy_reg_gg82563_80003es2lan"); + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + if (hw->dev_spec._80003es2lan.mdic_wa_enable) { + /* The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + usec_delay(200); + + /* ...and verify the command was successful. */ + ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + e1000_release_phy_80003es2lan(hw); + return -E1000_ERR_PHY; + } + + usec_delay(200); + + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + usec_delay(200); + } else { + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + } + + e1000_release_phy_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_nvm_80003es2lan - Write to ESB2 NVM + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @words: number of words to write + * @data: buffer of data to write to the NVM + * + * Write "words" of data to the ESB2 NVM. + **/ +STATIC s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + DEBUGFUNC("e1000_write_nvm_80003es2lan"); + + return e1000_write_nvm_spi(hw, offset, words, data); +} + +/** + * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete + * @hw: pointer to the HW structure + * + * Wait a specific amount of time for manageability processes to complete. + * This is a function pointer entry point called by the phy module. + **/ +STATIC s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + DEBUGFUNC("e1000_get_cfg_done_80003es2lan"); + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) + break; + msec_delay(1); + timeout--; + } + if (!timeout) { + DEBUGOUT("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex + * @hw: pointer to the HW structure + * + * Force the speed and duplex settings onto the PHY. This is a + * function pointer entry point called by the phy module. + **/ +STATIC s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_80003es2lan"); + + if (!(hw->phy.ops.read_reg)) + return E1000_SUCCESS; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + DEBUGOUT1("GG82563 PSCR: %X\n", phy_data); + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + /* Reset the phy to commit changes. */ + phy_data |= MII_CR_RESET; + + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + usec_delay(1); + + if (hw->phy.autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on GG82563 phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1000_phy_reset_dsp_generic(hw); + if (ret_val) + return ret_val; + } + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to verify the TX_CLK corresponds + * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. + */ + phy_data &= ~GG82563_MSCR_TX_CLK_MASK; + if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED) + phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5; + else + phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, + phy_data); + + return ret_val; +} + +/** + * e1000_get_cable_length_80003es2lan - Set approximate cable length + * @hw: pointer to the HW structure + * + * Find the approximate cable length as measured by the GG82563 PHY. + * This is a function pointer entry point called by the phy module. + **/ +STATIC s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + DEBUGFUNC("e1000_get_cable_length_80003es2lan"); + + if (!(hw->phy.ops.read_reg)) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); + if (ret_val) + return ret_val; + + index = phy_data & GG82563_DSPD_CABLE_LENGTH; + + if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_gg82563_cable_length_table[index]; + phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return E1000_SUCCESS; +} + +/** + * e1000_get_link_up_info_80003es2lan - Report speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to speed buffer + * @duplex: pointer to duplex buffer + * + * Retrieve the current speed and duplex configuration. + **/ +STATIC s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_link_up_info_80003es2lan"); + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, + duplex); + hw->phy.ops.cfg_on_link_up(hw); + } else { + ret_val = e1000_get_speed_and_duplex_fiber_serdes_generic(hw, + speed, + duplex); + } + + return ret_val; +} + +/** + * e1000_reset_hw_80003es2lan - Reset the ESB2 controller + * @hw: pointer to the HW structure + * + * Perform a global reset to the ESB2 controller. + **/ +STATIC s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 kum_reg_data; + + DEBUGFUNC("e1000_reset_hw_80003es2lan"); + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + DEBUGOUT("Issuing a global reset to MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + e1000_release_phy_80003es2lan(hw); + + /* Disable IBIST slave mode (far-end loopback) */ + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, &kum_reg_data); + if (!ret_val) { + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + if (ret_val) + DEBUGOUT("Error disabling far-end loopback\n"); + } else + DEBUGOUT("Error disabling far-end loopback\n"); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + return e1000_check_alt_mac_addr_generic(hw); +} + +/** + * e1000_init_hw_80003es2lan - Initialize the ESB2 controller + * @hw: pointer to the HW structure + * + * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. + **/ +STATIC s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 kum_reg_data; + u16 i; + + DEBUGFUNC("e1000_init_hw_80003es2lan"); + + e1000_initialize_hw_bits_80003es2lan(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + DEBUGOUT("Error initializing identification LED\n"); + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + if (ret_val) + return ret_val; + + /* Disable IBIST slave mode (far-end loopback) */ + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + if (!ret_val) { + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + if (ret_val) + DEBUGOUT("Error disabling far-end loopback\n"); + } else + DEBUGOUT("Error disabling far-end loopback\n"); + + /* Set the transmit descriptor write-back policy */ + reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data); + + /* ...for both queues. */ + reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data); + + /* Enable retransmit on late collisions */ + reg_data = E1000_READ_REG(hw, E1000_TCTL); + reg_data |= E1000_TCTL_RTLC; + E1000_WRITE_REG(hw, E1000_TCTL, reg_data); + + /* Configure Gigabit Carry Extend Padding */ + reg_data = E1000_READ_REG(hw, E1000_TCTL_EXT); + reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; + reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN; + E1000_WRITE_REG(hw, E1000_TCTL_EXT, reg_data); + + /* Configure Transmit Inter-Packet Gap */ + reg_data = E1000_READ_REG(hw, E1000_TIPG); + reg_data &= ~E1000_TIPG_IPGT_MASK; + reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + E1000_WRITE_REG(hw, E1000_TIPG, reg_data); + + reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001); + reg_data &= ~0x00100000; + E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); + + /* default to true to enable the MDIC W/A */ + hw->dev_spec._80003es2lan.mdic_wa_enable = true; + + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >> + E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i); + if (!ret_val) { + if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == + E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) + hw->dev_spec._80003es2lan.mdic_wa_enable = false; + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2 + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +STATIC void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_initialize_hw_bits_80003es2lan"); + + /* Transmit Descriptor Control 0 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = E1000_READ_REG(hw, E1000_TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + if (hw->phy.media_type != e1000_media_type_copper) + reg &= ~(1 << 20); + E1000_WRITE_REG(hw, E1000_TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = E1000_READ_REG(hw, E1000_TARC(1)); + if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + E1000_WRITE_REG(hw, E1000_TARC(1), reg); + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + reg = E1000_READ_REG(hw, E1000_RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + E1000_WRITE_REG(hw, E1000_RFCTL, reg); + + return; +} + +/** + * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link + * @hw: pointer to the HW structure + * + * Setup some GG82563 PHY registers for obtaining link + **/ +STATIC s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 reg; + u16 data; + + DEBUGFUNC("e1000_copper_link_setup_gg82563_80003es2lan"); + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ + data |= GG82563_MSCR_TX_CLK_1000MBPS_25; + + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; + + switch (phy->mdix) { + case 1: + data |= GG82563_PSCR_CROSSOVER_MODE_MDI; + break; + case 2: + data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; + break; + case 0: + default: + data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + if (phy->disable_polarity_correction) + data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* SW Reset the PHY so all changes take effect */ + ret_val = hw->phy.ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error Resetting the PHY\n"); + return ret_val; + } + + /* Bypass Rx and Tx FIFO's */ + reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL; + data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | + E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); + if (ret_val) + return ret_val; + + reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data); + if (ret_val) + return ret_val; + data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL_2, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL_2, data); + if (ret_val) + return ret_val; + + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); + if (ret_val) + return ret_val; + + /* Do not init these registers when the HW is in IAMT mode, since the + * firmware will have already initialized them. We only initialize + * them if the HW is not in IAMT mode. + */ + if (!hw->mac.ops.check_mng_mode(hw)) { + /* Enable Electrical Idle on the PHY */ + data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, + data); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + data); + if (ret_val) + return ret_val; + } + + /* Workaround: Disable padding in Kumeran interface in the MAC + * and in the PHY to avoid CRC errors. + */ + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_INBAND_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_ICR_DIS_PADDING; + ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_INBAND_CTRL, data); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + +/** + * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2 + * @hw: pointer to the HW structure + * + * Essentially a wrapper for setting up all things "copper" related. + * This is a function pointer entry point called by the mac module. + **/ +STATIC s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + DEBUGFUNC("e1000_setup_copper_link_80003es2lan"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Set the mac to wait the maximum time between each + * iteration and increase the max iterations when + * polling the phy; this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), + 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), + reg_data); + if (ret_val) + return ret_val; + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + ®_data); + if (ret_val) + return ret_val; + reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + reg_data); + if (ret_val) + return ret_val; + + ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); + if (ret_val) + return ret_val; + + return e1000_setup_copper_link_generic(hw); +} + +/** + * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +STATIC s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 speed; + u16 duplex; + + DEBUGFUNC("e1000_configure_on_link_up"); + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, &speed, + &duplex); + if (ret_val) + return ret_val; + + if (speed == SPEED_1000) + ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); + else + ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex); + } + + return ret_val; +} + +/** + * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +STATIC s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) +{ + s32 ret_val; + u32 tipg; + u32 i = 0; + u16 reg_data, reg_data2; + + DEBUGFUNC("e1000_configure_kmrn_for_10_100"); + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = E1000_READ_REG(hw, E1000_TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; + E1000_WRITE_REG(hw, E1000_TIPG, tipg); + + do { + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + ®_data); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + ®_data2); + if (ret_val) + return ret_val; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + if (duplex == HALF_DUPLEX) + reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; + else + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + return hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); +} + +/** + * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation + * @hw: pointer to the HW structure + * + * Configure the KMRN interface by applying last minute quirks for + * gigabit operation. + **/ +STATIC s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data, reg_data2; + u32 tipg; + u32 i = 0; + + DEBUGFUNC("e1000_configure_kmrn_for_1000"); + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = E1000_READ_REG(hw, E1000_TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + E1000_WRITE_REG(hw, E1000_TIPG, tipg); + + do { + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + ®_data); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + ®_data2); + if (ret_val) + return ret_val; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + return hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); +} + +/** + * e1000_read_kmrn_reg_80003es2lan - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquire semaphore, then read the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release the semaphore before exiting. + **/ +STATIC s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + DEBUGFUNC("e1000_read_kmrn_reg_80003es2lan"); + + ret_val = e1000_acquire_mac_csr_80003es2lan(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); + + usec_delay(2); + + kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + e1000_release_mac_csr_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_kmrn_reg_80003es2lan - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquire semaphore, then write the data to PHY register + * at the offset using the kumeran interface. Release semaphore + * before exiting. + **/ +STATIC s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + DEBUGFUNC("e1000_write_kmrn_reg_80003es2lan"); + + ret_val = e1000_acquire_mac_csr_80003es2lan(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); + + usec_delay(2); + + e1000_release_mac_csr_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_read_mac_addr_80003es2lan - Read device MAC address + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_mac_addr_80003es2lan"); + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +STATIC void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +STATIC void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_80003es2lan"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); + + E1000_READ_REG(hw, E1000_IAC); + E1000_READ_REG(hw, E1000_ICRXOC); + + E1000_READ_REG(hw, E1000_ICRXPTC); + E1000_READ_REG(hw, E1000_ICRXATC); + E1000_READ_REG(hw, E1000_ICTXPTC); + E1000_READ_REG(hw, E1000_ICTXATC); + E1000_READ_REG(hw, E1000_ICTXQEC); + E1000_READ_REG(hw, E1000_ICTXQMTC); + E1000_READ_REG(hw, E1000_ICRXDMTC); +} diff --git a/drivers/net/e1000/base/e1000_80003es2lan.h b/drivers/net/e1000/base/e1000_80003es2lan.h new file mode 100644 index 00000000..93ec19be --- /dev/null +++ b/drivers/net/e1000/base/e1000_80003es2lan.h @@ -0,0 +1,100 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_80003ES2LAN_H_ +#define _E1000_80003ES2LAN_H_ + +#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 +#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 +#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 +#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F + +#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 +#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 +#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 + +#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 +#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 +#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 + +#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C +#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 + +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */ +#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 + +#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 +#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 + +/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ +#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */ +#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 +#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ +#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ +#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ + +/* PHY Specific Control Register 2 (Page 0, Register 26) */ +#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */ + +/* MAC Specific Control Register (Page 2, Register 21) */ +/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ +#define GG82563_MSCR_TX_CLK_MASK 0x0007 +#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 +#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 +#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 + +#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ + +/* DSP Distance Register (Page 5, Register 26) + * 0 = <50M + * 1 = 50-80M + * 2 = 80-100M + * 3 = 110-140M + * 4 = >140M + */ +#define GG82563_DSPD_CABLE_LENGTH 0x0007 + +/* Kumeran Mode Control Register (Page 193, Register 16) */ +#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 + +/* Max number of times Kumeran read/write should be validated */ +#define GG82563_MAX_KMRN_RETRY 0x5 + +/* Power Management Control Register (Page 193, Register 20) */ +/* 1=Enable SERDES Electrical Idle */ +#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 + +/* In-Band Control Register (Page 194, Register 18) */ +#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ + +#endif diff --git a/drivers/net/e1000/base/e1000_82540.c b/drivers/net/e1000/base/e1000_82540.c new file mode 100644 index 00000000..7de7b7ba --- /dev/null +++ b/drivers/net/e1000/base/e1000_82540.c @@ -0,0 +1,717 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +/* + * 82540EM Gigabit Ethernet Controller + * 82540EP Gigabit Ethernet Controller + * 82545EM Gigabit Ethernet Controller (Copper) + * 82545EM Gigabit Ethernet Controller (Fiber) + * 82545GM Gigabit Ethernet Controller + * 82546EB Gigabit Ethernet Controller (Copper) + * 82546EB Gigabit Ethernet Controller (Fiber) + * 82546GB Gigabit Ethernet Controller + */ + +#include "e1000_api.h" + +STATIC s32 e1000_init_phy_params_82540(struct e1000_hw *hw); +STATIC s32 e1000_init_nvm_params_82540(struct e1000_hw *hw); +STATIC s32 e1000_init_mac_params_82540(struct e1000_hw *hw); +STATIC s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw); +STATIC void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw); +STATIC s32 e1000_init_hw_82540(struct e1000_hw *hw); +STATIC s32 e1000_reset_hw_82540(struct e1000_hw *hw); +STATIC s32 e1000_set_phy_mode_82540(struct e1000_hw *hw); +STATIC s32 e1000_set_vco_speed_82540(struct e1000_hw *hw); +STATIC s32 e1000_setup_copper_link_82540(struct e1000_hw *hw); +STATIC s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw); +STATIC void e1000_power_down_phy_copper_82540(struct e1000_hw *hw); +STATIC s32 e1000_read_mac_addr_82540(struct e1000_hw *hw); + +/** + * e1000_init_phy_params_82540 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_phy_params_82540(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 10000; + phy->type = e1000_phy_m88; + + /* Function Pointers */ + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.get_cfg_done = e1000_get_cfg_done_generic; + phy->ops.read_reg = e1000_read_phy_reg_m88; + phy->ops.reset = e1000_phy_hw_reset_generic; + phy->ops.write_reg = e1000_write_phy_reg_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82540; + + ret_val = e1000_get_phy_id(hw); + if (ret_val) + goto out; + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (phy->id == M88E1011_I_PHY_ID) + break; + /* Fall Through */ + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82540 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_nvm_params_82540(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + + DEBUGFUNC("e1000_init_nvm_params_82540"); + + nvm->type = e1000_nvm_eeprom_microwire; + nvm->delay_usec = 50; + nvm->opcode_bits = 3; + switch (nvm->override) { + case e1000_nvm_override_microwire_large: + nvm->address_bits = 8; + nvm->word_size = 256; + break; + case e1000_nvm_override_microwire_small: + nvm->address_bits = 6; + nvm->word_size = 64; + break; + default: + nvm->address_bits = eecd & E1000_EECD_SIZE ? 8 : 6; + nvm->word_size = eecd & E1000_EECD_SIZE ? 256 : 64; + break; + } + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_generic; + nvm->ops.read = e1000_read_nvm_microwire; + nvm->ops.release = e1000_release_nvm_generic; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_generic; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.write = e1000_write_nvm_microwire; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82540 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_82540(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_mac_params_82540"); + + /* Set media type */ + switch (hw->device_id) { + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + break; + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pci_generic; + /* function id */ + mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_82540; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_82540; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_generic; + /* physical interface setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82540 + : e1000_setup_fiber_serdes_link_82540; + /* check for link */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + mac->ops.check_for_link = e1000_check_for_copper_link_generic; + break; + case e1000_media_type_fiber: + mac->ops.check_for_link = e1000_check_for_fiber_link_generic; + break; + case e1000_media_type_internal_serdes: + mac->ops.check_for_link = e1000_check_for_serdes_link_generic; + break; + default: + ret_val = -E1000_ERR_CONFIG; + goto out; + break; + } + /* link info */ + mac->ops.get_link_up_info = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_get_speed_and_duplex_copper_generic + : e1000_get_speed_and_duplex_fiber_serdes_generic; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_82540; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_generic; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_generic; + mac->ops.led_off = e1000_led_off_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82540; + +out: + return ret_val; +} + +/** + * e1000_init_function_pointers_82540 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82540(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82540"); + + hw->mac.ops.init_params = e1000_init_mac_params_82540; + hw->nvm.ops.init_params = e1000_init_nvm_params_82540; + hw->phy.ops.init_params = e1000_init_phy_params_82540; +} + +/** + * e1000_reset_hw_82540 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +STATIC s32 e1000_reset_hw_82540(struct e1000_hw *hw) +{ + u32 ctrl, manc; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_reset_hw_82540"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + /* + * Delay to allow any outstanding PCI transactions to complete + * before resetting the device. + */ + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to 82540/82545/82546 MAC\n"); + switch (hw->mac.type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + E1000_WRITE_REG(hw, E1000_CTRL_DUP, ctrl | E1000_CTRL_RST); + break; + default: + /* + * These controllers can't ack the 64-bit write when + * issuing the reset, so we use IO-mapping as a + * workaround to issue the reset. + */ + E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + break; + } + + /* Wait for EEPROM reload */ + msec_delay(5); + + /* Disable HW ARPs on ASF enabled adapters */ + manc = E1000_READ_REG(hw, E1000_MANC); + manc &= ~E1000_MANC_ARP_EN; + E1000_WRITE_REG(hw, E1000_MANC, manc); + + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + return ret_val; +} + +/** + * e1000_init_hw_82540 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +STATIC s32 e1000_init_hw_82540(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txdctl, ctrl_ext; + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_init_hw_82540"); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + if (mac->type < e1000_82545_rev_3) + E1000_WRITE_REG(hw, E1000_VET, 0); + + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + /* + * Avoid back to back register writes by adding the register + * read (flush). This is to protect against some strange + * bridge configurations that may issue Memory Write Block + * (MWB) to our register space. The *_rev_3 hardware at + * least doesn't respond correctly to every other dword in an + * MWB to our register space. + */ + E1000_WRITE_FLUSH(hw); + } + + if (mac->type < e1000_82545_rev_3) + e1000_pcix_mmrbc_workaround_generic(hw); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82540(hw); + + if ((hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER) || + (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3)) { + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* + * Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. + */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_setup_copper_link_82540 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +STATIC s32 e1000_setup_copper_link_82540(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_setup_copper_link_82540"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + ret_val = e1000_set_phy_mode_82540(hw); + if (ret_val) + goto out; + + if (hw->mac.type == e1000_82545_rev_3 || + hw->mac.type == e1000_82546_rev_3) { + ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &data); + if (ret_val) + goto out; + data |= 0x00000008; + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + data); + if (ret_val) + goto out; + } + + ret_val = e1000_copper_link_setup_m88(hw); + if (ret_val) + goto out; + + ret_val = e1000_setup_copper_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link_82540 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Set the output amplitude to the value in the EEPROM and adjust the VCO + * speed to improve Bit Error Rate (BER) performance. Configures collision + * distance and flow control for fiber and serdes links. Upon successful + * setup, poll for link. + **/ +STATIC s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_fiber_serdes_link_82540"); + + switch (mac->type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + if (hw->phy.media_type == e1000_media_type_internal_serdes) { + /* + * If we're on serdes media, adjust the output + * amplitude to value set in the EEPROM. + */ + ret_val = e1000_adjust_serdes_amplitude_82540(hw); + if (ret_val) + goto out; + } + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed_82540(hw); + if (ret_val) + goto out; + default: + break; + } + + ret_val = e1000_setup_fiber_serdes_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude_82540 - Adjust amplitude based on EEPROM + * @hw: pointer to the HW structure + * + * Adjust the SERDES output amplitude based on the EEPROM settings. + **/ +STATIC s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + + DEBUGFUNC("e1000_adjust_serdes_amplitude_82540"); + + ret_val = hw->nvm.ops.read(hw, NVM_SERDES_AMPLITUDE, 1, &nvm_data); + if (ret_val) + goto out; + + if (nvm_data != NVM_RESERVED_WORD) { + /* Adjust serdes output amplitude only. */ + nvm_data &= NVM_SERDES_AMPLITUDE_MASK; + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_EXT_CTRL, + nvm_data); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_set_vco_speed_82540 - Set VCO speed for better performance + * @hw: pointer to the HW structure + * + * Set the VCO speed to improve Bit Error Rate (BER) performance. + **/ +STATIC s32 e1000_set_vco_speed_82540(struct e1000_hw *hw) +{ + s32 ret_val; + u16 default_page = 0; + u16 phy_data; + + DEBUGFUNC("e1000_set_vco_speed_82540"); + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, + default_page); + +out: + return ret_val; +} + +/** + * e1000_set_phy_mode_82540 - Set PHY to class A mode + * @hw: pointer to the HW structure + * + * Sets the PHY to class A mode and assumes the following operations will + * follow to enable the new class mode: + * 1. Do a PHY soft reset. + * 2. Restart auto-negotiation or force link. + **/ +STATIC s32 e1000_set_phy_mode_82540(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 nvm_data; + + DEBUGFUNC("e1000_set_phy_mode_82540"); + + if (hw->mac.type != e1000_82545_rev_3) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_PHY_CLASS_WORD, 1, &nvm_data); + if (ret_val) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + if ((nvm_data != NVM_RESERVED_WORD) && (nvm_data & NVM_PHY_CLASS_A)) { + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) { + ret_val = -E1000_ERR_PHY; + goto out; + } + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + } + +out: + return ret_val; +} + +/** + * e1000_power_down_phy_copper_82540 - Remove link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +STATIC void e1000_power_down_phy_copper_82540(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN)) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_82540 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +STATIC void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82540"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); +} + +/** + * e1000_read_mac_addr_82540 - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + * + * This version is being used over generic because of customer issues + * with VmWare and Virtual Box when using generic. It seems in + * the emulated 82545, RAR[0] does NOT have a valid address after a + * reset, this older method works and using this breaks nothing for + * these legacy adapters. + **/ +s32 e1000_read_mac_addr_82540(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 offset, nvm_data, i; + + DEBUGFUNC("e1000_read_mac_addr"); + + for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = i >> 1; + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); + hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); + } + + /* Flip last bit of mac address if we're on second port */ + if (hw->bus.func == E1000_FUNC_1) + hw->mac.perm_addr[5] ^= 1; + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + +out: + return ret_val; +} diff --git a/drivers/net/e1000/base/e1000_82541.c b/drivers/net/e1000/base/e1000_82541.c new file mode 100644 index 00000000..9cdb91c9 --- /dev/null +++ b/drivers/net/e1000/base/e1000_82541.c @@ -0,0 +1,1268 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +/* + * 82541EI Gigabit Ethernet Controller + * 82541ER Gigabit Ethernet Controller + * 82541GI Gigabit Ethernet Controller + * 82541PI Gigabit Ethernet Controller + * 82547EI Gigabit Ethernet Controller + * 82547GI Gigabit Ethernet Controller + */ + +#include "e1000_api.h" + +STATIC s32 e1000_init_phy_params_82541(struct e1000_hw *hw); +STATIC s32 e1000_init_nvm_params_82541(struct e1000_hw *hw); +STATIC s32 e1000_init_mac_params_82541(struct e1000_hw *hw); +STATIC s32 e1000_reset_hw_82541(struct e1000_hw *hw); +STATIC s32 e1000_init_hw_82541(struct e1000_hw *hw); +STATIC s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +STATIC s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw); +STATIC s32 e1000_setup_copper_link_82541(struct e1000_hw *hw); +STATIC s32 e1000_check_for_link_82541(struct e1000_hw *hw); +STATIC s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw); +STATIC s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_setup_led_82541(struct e1000_hw *hw); +STATIC s32 e1000_cleanup_led_82541(struct e1000_hw *hw); +STATIC void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw); +STATIC s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw, + bool link_up); +STATIC s32 e1000_phy_init_script_82541(struct e1000_hw *hw); +STATIC void e1000_power_down_phy_copper_82541(struct e1000_hw *hw); + +STATIC const u16 e1000_igp_cable_length_table[] = { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 10, 10, 10, 10, 10, + 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, 25, 25, 25, 25, 30, 30, 30, 30, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, + 80, 90, 90, 90, 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, + 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 120, 120, + 120, 120, 120, 120, 120, 120, 120, 120}; +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE \ + (sizeof(e1000_igp_cable_length_table) / \ + sizeof(e1000_igp_cable_length_table[0])) + +/** + * e1000_init_phy_params_82541 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_phy_params_82541(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("e1000_init_phy_params_82541"); + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 10000; + phy->type = e1000_phy_igp; + + /* Function Pointers */ + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; + phy->ops.get_cable_length = e1000_get_cable_length_igp_82541; + phy->ops.get_cfg_done = e1000_get_cfg_done_generic; + phy->ops.get_info = e1000_get_phy_info_igp; + phy->ops.read_reg = e1000_read_phy_reg_igp; + phy->ops.reset = e1000_phy_hw_reset_82541; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82541; + phy->ops.write_reg = e1000_write_phy_reg_igp; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82541; + + ret_val = e1000_get_phy_id(hw); + if (ret_val) + goto out; + + /* Verify phy id */ + if (phy->id != IGP01E1000_I_PHY_ID) { + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82541 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_nvm_params_82541(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = E1000_SUCCESS; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_82541"); + + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->type = e1000_nvm_eeprom_spi; + eecd |= E1000_EECD_ADDR_BITS; + break; + case e1000_nvm_override_spi_small: + nvm->type = e1000_nvm_eeprom_spi; + eecd &= ~E1000_EECD_ADDR_BITS; + break; + case e1000_nvm_override_microwire_large: + nvm->type = e1000_nvm_eeprom_microwire; + eecd |= E1000_EECD_SIZE; + break; + case e1000_nvm_override_microwire_small: + nvm->type = e1000_nvm_eeprom_microwire; + eecd &= ~E1000_EECD_SIZE; + break; + default: + nvm->type = eecd & E1000_EECD_TYPE ? e1000_nvm_eeprom_spi + : e1000_nvm_eeprom_microwire; + break; + } + + if (nvm->type == e1000_nvm_eeprom_spi) { + nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) ? 16 : 8; + nvm->delay_usec = 1; + nvm->opcode_bits = 8; + nvm->page_size = (eecd & E1000_EECD_ADDR_BITS) ? 32 : 8; + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_generic; + nvm->ops.read = e1000_read_nvm_spi; + nvm->ops.release = e1000_release_nvm_generic; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_generic; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.write = e1000_write_nvm_spi; + + /* + * nvm->word_size must be discovered after the pointers + * are set so we can verify the size from the nvm image + * itself. Temporarily set it to a dummy value so the + * read will work. + */ + nvm->word_size = 64; + ret_val = nvm->ops.read(hw, NVM_CFG, 1, &size); + if (ret_val) + goto out; + size = (size & NVM_SIZE_MASK) >> NVM_SIZE_SHIFT; + /* + * if size != 0, it can be added to a constant and become + * the left-shift value to set the word_size. Otherwise, + * word_size stays at 64. + */ + if (size) { + size += NVM_WORD_SIZE_BASE_SHIFT_82541; + nvm->word_size = 1 << size; + } + } else { + nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) ? 8 : 6; + nvm->delay_usec = 50; + nvm->opcode_bits = 3; + nvm->word_size = (eecd & E1000_EECD_ADDR_BITS) ? 256 : 64; + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_generic; + nvm->ops.read = e1000_read_nvm_microwire; + nvm->ops.release = e1000_release_nvm_generic; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_generic; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.write = e1000_write_nvm_microwire; + } + +out: + return ret_val; +} + +/** + * e1000_init_mac_params_82541 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_82541(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_init_mac_params_82541"); + + /* Set media type */ + hw->phy.media_type = e1000_media_type_copper; + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + + /* Function Pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pci_generic; + /* function id */ + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_82541; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_82541; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_generic; + /* physical interface link setup */ + mac->ops.setup_physical_interface = e1000_setup_copper_link_82541; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_82541; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_82541; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_82541; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_82541; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_generic; + mac->ops.led_off = e1000_led_off_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82541; + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_82541 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82541(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82541"); + + hw->mac.ops.init_params = e1000_init_mac_params_82541; + hw->nvm.ops.init_params = e1000_init_nvm_params_82541; + hw->phy.ops.init_params = e1000_init_phy_params_82541; +} + +/** + * e1000_reset_hw_82541 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +STATIC s32 e1000_reset_hw_82541(struct e1000_hw *hw) +{ + u32 ledctl, ctrl, manc; + + DEBUGFUNC("e1000_reset_hw_82541"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + /* + * Delay to allow any outstanding PCI transactions to complete + * before resetting the device. + */ + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Must reset the Phy before resetting the MAC */ + if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(hw); + msec_delay(5); + } + + DEBUGOUT("Issuing a global reset to 82541/82547 MAC\n"); + switch (hw->mac.type) { + case e1000_82541: + case e1000_82541_rev_2: + /* + * These controllers can't ack the 64-bit write when + * issuing the reset, so we use IO-mapping as a + * workaround to issue the reset. + */ + E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + break; + default: + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + break; + } + + /* Wait for NVM reload */ + msec_delay(20); + + /* Disable HW ARPs on ASF enabled adapters */ + manc = E1000_READ_REG(hw, E1000_MANC); + manc &= ~E1000_MANC_ARP_EN; + E1000_WRITE_REG(hw, E1000_MANC, manc); + + if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { + e1000_phy_init_script_82541(hw); + + /* Configure activity LED after Phy reset */ + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + ledctl &= IGP_ACTIVITY_LED_MASK; + ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + } + + /* Once again, mask the interrupts */ + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); + + /* Clear any pending interrupt events. */ + E1000_READ_REG(hw, E1000_ICR); + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw_82541 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +STATIC s32 e1000_init_hw_82541(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; + u32 i, txdctl; + s32 ret_val; + + DEBUGFUNC("e1000_init_hw_82541"); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Storing the Speed Power Down value for later use */ + ret_val = hw->phy.ops.read_reg(hw, IGP01E1000_GMII_FIFO, + &dev_spec->spd_default); + if (ret_val) + goto out; + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + /* + * Avoid back to back register writes by adding the register + * read (flush). This is to protect against some strange + * bridge configurations that may issue Memory Write Block + * (MWB) to our register space. + */ + E1000_WRITE_FLUSH(hw); + } + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82541(hw); + +out: + return ret_val; +} + +/** + * e1000_get_link_up_info_82541 - Report speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to speed buffer + * @duplex: pointer to duplex buffer + * + * Retrieve the current speed and duplex configuration. + **/ +STATIC s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_get_link_up_info_82541"); + + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); + if (ret_val) + goto out; + + if (!phy->speed_downgraded) + goto out; + + /* + * IGP01 PHY may advertise full duplex operation after speed + * downgrade even if it is operating at half duplex. + * Here we set the duplex settings to match the duplex in the + * link partner's capabilities. + */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_EXP, &data); + if (ret_val) + goto out; + + if (!(data & NWAY_ER_LP_NWAY_CAPS)) { + *duplex = HALF_DUPLEX; + } else { + ret_val = phy->ops.read_reg(hw, PHY_LP_ABILITY, &data); + if (ret_val) + goto out; + + if (*speed == SPEED_100) { + if (!(data & NWAY_LPAR_100TX_FD_CAPS)) + *duplex = HALF_DUPLEX; + } else if (*speed == SPEED_10) { + if (!(data & NWAY_LPAR_10T_FD_CAPS)) + *duplex = HALF_DUPLEX; + } + } + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_82541 - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +STATIC s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ledctl; + + DEBUGFUNC("e1000_phy_hw_reset_82541"); + + ret_val = e1000_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + e1000_phy_init_script_82541(hw); + + if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + ledctl &= IGP_ACTIVITY_LED_MASK; + ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + } + +out: + return ret_val; +} + +/** + * e1000_setup_copper_link_82541 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +STATIC s32 e1000_setup_copper_link_82541(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; + s32 ret_val; + u32 ctrl, ledctl; + + DEBUGFUNC("e1000_setup_copper_link_82541"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + + /* Earlier revs of the IGP phy require us to force MDI. */ + if (hw->mac.type == e1000_82541 || hw->mac.type == e1000_82547) { + dev_spec->dsp_config = e1000_dsp_config_disabled; + phy->mdix = 1; + } else { + dev_spec->dsp_config = e1000_dsp_config_enabled; + } + + ret_val = e1000_copper_link_setup_igp(hw); + if (ret_val) + goto out; + + if (hw->mac.autoneg) { + if (dev_spec->ffe_config == e1000_ffe_config_active) + dev_spec->ffe_config = e1000_ffe_config_enabled; + } + + /* Configure activity LED after Phy reset */ + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + ledctl &= IGP_ACTIVITY_LED_MASK; + ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + + ret_val = e1000_setup_copper_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_check_for_link_82541 - Check/Store link connection + * @hw: pointer to the HW structure + * + * This checks the link condition of the adapter and stores the + * results in the hw->mac structure. + **/ +STATIC s32 e1000_check_for_link_82541(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_check_for_link_82541"); + + /* + * We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* + * First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + ret_val = e1000_config_dsp_after_link_change_82541(hw, false); + goto out; /* No link detected */ + } + + mac->get_link_status = false; + + /* + * Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift_generic(hw); + + /* + * If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = e1000_config_dsp_after_link_change_82541(hw, true); + + /* + * Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + +out: + return ret_val; +} + +/** + * e1000_config_dsp_after_link_change_82541 - Config DSP after link + * @hw: pointer to the HW structure + * @link_up: boolean flag for link up status + * + * Return E1000_ERR_PHY when failing to read/write the PHY, else E1000_SUCCESS + * at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + **/ +STATIC s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw, + bool link_up) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; + s32 ret_val; + u32 idle_errs = 0; + u16 phy_data, phy_saved_data, speed, duplex, i; + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D}; + + DEBUGFUNC("e1000_config_dsp_after_link_change_82541"); + + if (link_up) { + ret_val = hw->mac.ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + goto out; + } + + if (speed != SPEED_1000) { + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + if ((dev_spec->dsp_config == e1000_dsp_config_enabled) && + phy->min_cable_length >= 50) { + + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, + dsp_reg_array[i], + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = phy->ops.write_reg(hw, + dsp_reg_array[i], + phy_data); + if (ret_val) + goto out; + } + dev_spec->dsp_config = e1000_dsp_config_activated; + } + + if ((dev_spec->ffe_config != e1000_ffe_config_enabled) || + (phy->min_cable_length >= 50)) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* clear previous idle error counts */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + goto out; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + usec_delay(1000); + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + goto out; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + dev_spec->ffe_config = e1000_ffe_config_active; + + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + goto out; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } else { + if (dev_spec->dsp_config == e1000_dsp_config_activated) { + /* + * Save off the current value of register 0x2F5B + * to be restored at the end of the routines. + */ + ret_val = phy->ops.read_reg(hw, 0x2F5B, + &phy_saved_data); + if (ret_val) + goto out; + + /* Disable the PHY transmitter */ + ret_val = phy->ops.write_reg(hw, 0x2F5B, 0x0003); + if (ret_val) + goto out; + + msec_delay_irq(20); + + ret_val = phy->ops.write_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIG); + if (ret_val) + goto out; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, + dsp_reg_array[i], + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = phy->ops.write_reg(hw, + dsp_reg_array[i], + phy_data); + if (ret_val) + goto out; + } + + ret_val = phy->ops.write_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + goto out; + + msec_delay_irq(20); + + /* Now enable the transmitter */ + ret_val = phy->ops.write_reg(hw, 0x2F5B, + phy_saved_data); + if (ret_val) + goto out; + + dev_spec->dsp_config = e1000_dsp_config_enabled; + } + + if (dev_spec->ffe_config != e1000_ffe_config_active) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* + * Save off the current value of register 0x2F5B + * to be restored at the end of the routines. + */ + ret_val = phy->ops.read_reg(hw, 0x2F5B, &phy_saved_data); + if (ret_val) + goto out; + + /* Disable the PHY transmitter */ + ret_val = phy->ops.write_reg(hw, 0x2F5B, 0x0003); + if (ret_val) + goto out; + + msec_delay_irq(20); + + ret_val = phy->ops.write_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIG); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + goto out; + + msec_delay_irq(20); + + /* Now enable the transmitter */ + ret_val = phy->ops.write_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + goto out; + + dev_spec->ffe_config = e1000_ffe_config_enabled; + } + +out: + return ret_val; +} + +/** + * e1000_get_cable_length_igp_82541 - Determine cable length for igp PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +STATIC s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 i, data; + u16 cur_agc_value, agc_value = 0; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D}; + + DEBUGFUNC("e1000_get_cable_length_igp_82541"); + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &data); + if (ret_val) + goto out; + + cur_agc_value = data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Bounds checking */ + if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + agc_value += cur_agc_value; + + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * 50) { + agc_value -= min_agc_value; + /* Average the three remaining channels for the length. */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Average the channels for the length. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + phy->min_cable_length = (e1000_igp_cable_length_table[agc_value] > + IGP01E1000_AGC_RANGE) + ? (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) + : 0; + phy->max_cable_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * e1000_set_d3_lplu_state_82541 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +STATIC s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_82541"); + + switch (hw->mac.type) { + case e1000_82541_rev_2: + case e1000_82547_rev_2: + break; + default: + ret_val = e1000_set_d3_lplu_state_generic(hw, active); + goto out; + break; + } + + ret_val = phy->ops.read_reg(hw, IGP01E1000_GMII_FIFO, &data); + if (ret_val) + goto out; + + if (!active) { + data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = phy->ops.write_reg(hw, IGP01E1000_GMII_FIFO, data); + if (ret_val) + goto out; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = phy->ops.write_reg(hw, IGP01E1000_GMII_FIFO, data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + +out: + return ret_val; +} + +/** + * e1000_setup_led_82541 - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +STATIC s32 e1000_setup_led_82541(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; + s32 ret_val; + + DEBUGFUNC("e1000_setup_led_82541"); + + ret_val = hw->phy.ops.read_reg(hw, IGP01E1000_GMII_FIFO, + &dev_spec->spd_default); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, IGP01E1000_GMII_FIFO, + (u16)(dev_spec->spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + goto out; + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + +out: + return ret_val; +} + +/** + * e1000_cleanup_led_82541 - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +STATIC s32 e1000_cleanup_led_82541(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; + s32 ret_val; + + DEBUGFUNC("e1000_cleanup_led_82541"); + + ret_val = hw->phy.ops.write_reg(hw, IGP01E1000_GMII_FIFO, + dev_spec->spd_default); + if (ret_val) + goto out; + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); + +out: + return ret_val; +} + +/** + * e1000_phy_init_script_82541 - Initialize GbE PHY + * @hw: pointer to the HW structure + * + * Initializes the IGP PHY. + **/ +STATIC s32 e1000_phy_init_script_82541(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; + u32 ret_val; + u16 phy_saved_data; + + DEBUGFUNC("e1000_phy_init_script_82541"); + + if (!dev_spec->phy_init_script) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* Delay after phy reset to enable NVM configuration to load */ + msec_delay(20); + + /* + * Save off the current value of register 0x2F5B to be restored at + * the end of this routine. + */ + ret_val = hw->phy.ops.read_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x0003); + + msec_delay(20); + + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + + msec_delay(5); + + switch (hw->mac.type) { + case e1000_82541: + case e1000_82547: + hw->phy.ops.write_reg(hw, 0x1F95, 0x0001); + + hw->phy.ops.write_reg(hw, 0x1F71, 0xBD21); + + hw->phy.ops.write_reg(hw, 0x1F79, 0x0018); + + hw->phy.ops.write_reg(hw, 0x1F30, 0x1600); + + hw->phy.ops.write_reg(hw, 0x1F31, 0x0014); + + hw->phy.ops.write_reg(hw, 0x1F32, 0x161C); + + hw->phy.ops.write_reg(hw, 0x1F94, 0x0003); + + hw->phy.ops.write_reg(hw, 0x1F96, 0x003F); + + hw->phy.ops.write_reg(hw, 0x2010, 0x0008); + break; + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy.ops.write_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + hw->phy.ops.write_reg(hw, 0x0000, 0x3300); + + msec_delay(20); + + /* Now enable the transmitter */ + hw->phy.ops.write_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac.type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + hw->phy.ops.read_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + hw->phy.ops.read_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + hw->phy.ops.write_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + hw->phy.ops.write_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + +out: + return ret_val; +} + +/** + * e1000_init_script_state_82541 - Enable/Disable PHY init script + * @hw: pointer to the HW structure + * @state: boolean value used to enable/disable PHY init script + * + * Allows the driver to enable/disable the PHY init script, if the PHY is an + * IGP PHY. + **/ +void e1000_init_script_state_82541(struct e1000_hw *hw, bool state) +{ + struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; + + DEBUGFUNC("e1000_init_script_state_82541"); + + if (hw->phy.type != e1000_phy_igp) { + DEBUGOUT("Initialization script not necessary.\n"); + goto out; + } + + dev_spec->phy_init_script = state; + +out: + return; +} + +/** + * e1000_power_down_phy_copper_82541 - Remove link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +STATIC void e1000_power_down_phy_copper_82541(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN)) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_82541 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +STATIC void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82541"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); +} diff --git a/drivers/net/e1000/base/e1000_82541.h b/drivers/net/e1000/base/e1000_82541.h new file mode 100644 index 00000000..e0bee7ce --- /dev/null +++ b/drivers/net/e1000/base/e1000_82541.h @@ -0,0 +1,91 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_82541_H_ +#define _E1000_82541_H_ + +#define NVM_WORD_SIZE_BASE_SHIFT_82541 (NVM_WORD_SIZE_BASE_SHIFT + 1) + +#define IGP01E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_DSP_RESET 0x1F33 + +#define IGP01E1000_PHY_DSP_FFE 0x1F35 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A + +#define IGP01E1000_IEEE_FORCE_GIG 0x0140 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 +#define IGP01E1000_AGC_RANGE 10 + +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + + +void e1000_init_script_state_82541(struct e1000_hw *hw, bool state); +#endif diff --git a/drivers/net/e1000/base/e1000_82542.c b/drivers/net/e1000/base/e1000_82542.c new file mode 100644 index 00000000..4f1183af --- /dev/null +++ b/drivers/net/e1000/base/e1000_82542.c @@ -0,0 +1,590 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +/* + * 82542 Gigabit Ethernet Controller + */ + +#include "e1000_api.h" + +STATIC s32 e1000_init_phy_params_82542(struct e1000_hw *hw); +STATIC s32 e1000_init_nvm_params_82542(struct e1000_hw *hw); +STATIC s32 e1000_init_mac_params_82542(struct e1000_hw *hw); +STATIC s32 e1000_get_bus_info_82542(struct e1000_hw *hw); +STATIC s32 e1000_reset_hw_82542(struct e1000_hw *hw); +STATIC s32 e1000_init_hw_82542(struct e1000_hw *hw); +STATIC s32 e1000_setup_link_82542(struct e1000_hw *hw); +STATIC s32 e1000_led_on_82542(struct e1000_hw *hw); +STATIC s32 e1000_led_off_82542(struct e1000_hw *hw); +STATIC int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index); +STATIC void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw); +STATIC s32 e1000_read_mac_addr_82542(struct e1000_hw *hw); + +/** + * e1000_init_phy_params_82542 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_phy_params_82542(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_phy_params_82542"); + + phy->type = e1000_phy_none; + + return ret_val; +} + +/** + * e1000_init_nvm_params_82542 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_nvm_params_82542(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + + DEBUGFUNC("e1000_init_nvm_params_82542"); + + nvm->address_bits = 6; + nvm->delay_usec = 50; + nvm->opcode_bits = 3; + nvm->type = e1000_nvm_eeprom_microwire; + nvm->word_size = 64; + + /* Function Pointers */ + nvm->ops.read = e1000_read_nvm_microwire; + nvm->ops.release = e1000_stop_nvm; + nvm->ops.write = e1000_write_nvm_microwire; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82542 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_82542(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_init_mac_params_82542"); + + /* Set media type */ + hw->phy.media_type = e1000_media_type_fiber; + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_82542; + /* function id */ + mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_82542; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_82542; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_82542; + /* phy/fiber/serdes setup */ + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_generic; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_fiber_link_generic; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_82542; + /* set RAR */ + mac->ops.rar_set = e1000_rar_set_82542; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_82542; + mac->ops.led_off = e1000_led_off_82542; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82542; + /* link info */ + mac->ops.get_link_up_info = + e1000_get_speed_and_duplex_fiber_serdes_generic; + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_82542 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82542(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82542"); + + hw->mac.ops.init_params = e1000_init_mac_params_82542; + hw->nvm.ops.init_params = e1000_init_nvm_params_82542; + hw->phy.ops.init_params = e1000_init_phy_params_82542; +} + +/** + * e1000_get_bus_info_82542 - Obtain bus information for adapter + * @hw: pointer to the HW structure + * + * This will obtain information about the HW bus for which the + * adapter is attached and stores it in the hw structure. + **/ +STATIC s32 e1000_get_bus_info_82542(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_get_bus_info_82542"); + + hw->bus.type = e1000_bus_type_pci; + hw->bus.speed = e1000_bus_speed_unknown; + hw->bus.width = e1000_bus_width_unknown; + + return E1000_SUCCESS; +} + +/** + * e1000_reset_hw_82542 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +STATIC s32 e1000_reset_hw_82542(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val = E1000_SUCCESS; + u32 ctrl; + + DEBUGFUNC("e1000_reset_hw_82542"); + + if (hw->revision_id == E1000_REVISION_2) { + DEBUGOUT("Disabling MWI on 82542 rev 2\n"); + e1000_pci_clear_mwi(hw); + } + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + /* + * Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to 82542/82543 MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + hw->nvm.ops.reload(hw); + msec_delay(2); + + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + if (hw->revision_id == E1000_REVISION_2) { + if (bus->pci_cmd_word & CMD_MEM_WRT_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return ret_val; +} + +/** + * e1000_init_hw_82542 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +STATIC s32 e1000_init_hw_82542(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82542 *dev_spec = &hw->dev_spec._82542; + s32 ret_val = E1000_SUCCESS; + u32 ctrl; + u16 i; + + DEBUGFUNC("e1000_init_hw_82542"); + + /* Disabling VLAN filtering */ + E1000_WRITE_REG(hw, E1000_VET, 0); + mac->ops.clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->revision_id == E1000_REVISION_2) { + DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(hw); + msec_delay(5); + } + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->revision_id == E1000_REVISION_2) { + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* + * Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. + */ + if (dev_spec->dma_fairness) { + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR); + } + + /* Setup link and flow control */ + ret_val = e1000_setup_link_82542(hw); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82542(hw); + + return ret_val; +} + +/** + * e1000_setup_link_82542 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +STATIC s32 e1000_setup_link_82542(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + + DEBUGFUNC("e1000_setup_link_82542"); + + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + goto out; + + hw->fc.requested_mode &= ~e1000_fc_tx_pause; + + if (mac->report_tx_early) + hw->fc.requested_mode &= ~e1000_fc_rx_pause; + + /* + * Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Call the necessary subroutine to configure the link. */ + ret_val = mac->ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* + * Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + DEBUGOUT("Initializing Flow Control address, type and timer regs\n"); + + E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); + + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + + ret_val = e1000_set_fc_watermarks_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_led_on_82542 - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. + **/ +STATIC s32 e1000_led_on_82542(struct e1000_hw *hw) +{ + u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGFUNC("e1000_led_on_82542"); + + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off_82542 - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. + **/ +STATIC s32 e1000_led_off_82542(struct e1000_hw *hw) +{ + u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGFUNC("e1000_led_off_82542"); + + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_rar_set_82542 - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +STATIC int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + DEBUGFUNC("e1000_rar_set_82542"); + + /* + * HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); + E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); + + return E1000_SUCCESS; +} + +/** + * e1000_translate_register_82542 - Translate the proper register offset + * @reg: e1000 register to be read + * + * Registers in 82542 are located in different offsets than other adapters + * even though they function in the same manner. This function takes in + * the name of the register to read and returns the correct offset for + * 82542 silicon. + **/ +u32 e1000_translate_register_82542(u32 reg) +{ + /* + * Some of the 82542 registers are located at different + * offsets than they are in newer adapters. + * Despite the difference in location, the registers + * function in the same manner. + */ + switch (reg) { + case E1000_RA: + reg = 0x00040; + break; + case E1000_RDTR: + reg = 0x00108; + break; + case E1000_RDBAL(0): + reg = 0x00110; + break; + case E1000_RDBAH(0): + reg = 0x00114; + break; + case E1000_RDLEN(0): + reg = 0x00118; + break; + case E1000_RDH(0): + reg = 0x00120; + break; + case E1000_RDT(0): + reg = 0x00128; + break; + case E1000_RDBAL(1): + reg = 0x00138; + break; + case E1000_RDBAH(1): + reg = 0x0013C; + break; + case E1000_RDLEN(1): + reg = 0x00140; + break; + case E1000_RDH(1): + reg = 0x00148; + break; + case E1000_RDT(1): + reg = 0x00150; + break; + case E1000_FCRTH: + reg = 0x00160; + break; + case E1000_FCRTL: + reg = 0x00168; + break; + case E1000_MTA: + reg = 0x00200; + break; + case E1000_TDBAL(0): + reg = 0x00420; + break; + case E1000_TDBAH(0): + reg = 0x00424; + break; + case E1000_TDLEN(0): + reg = 0x00428; + break; + case E1000_TDH(0): + reg = 0x00430; + break; + case E1000_TDT(0): + reg = 0x00438; + break; + case E1000_TIDV: + reg = 0x00440; + break; + case E1000_VFTA: + reg = 0x00600; + break; + case E1000_TDFH: + reg = 0x08010; + break; + case E1000_TDFT: + reg = 0x08018; + break; + default: + break; + } + + return reg; +} + +/** + * e1000_clear_hw_cntrs_82542 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +STATIC void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82542"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); +} + +/** + * e1000_read_mac_addr_82542 - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + **/ +s32 e1000_read_mac_addr_82542(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 offset, nvm_data, i; + + DEBUGFUNC("e1000_read_mac_addr"); + + for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = i >> 1; + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); + hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); + } + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + +out: + return ret_val; +} diff --git a/drivers/net/e1000/base/e1000_82543.c b/drivers/net/e1000/base/e1000_82543.c new file mode 100644 index 00000000..fc96199d --- /dev/null +++ b/drivers/net/e1000/base/e1000_82543.c @@ -0,0 +1,1553 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +/* + * 82543GC Gigabit Ethernet Controller (Fiber) + * 82543GC Gigabit Ethernet Controller (Copper) + * 82544EI Gigabit Ethernet Controller (Copper) + * 82544EI Gigabit Ethernet Controller (Fiber) + * 82544GC Gigabit Ethernet Controller (Copper) + * 82544GC Gigabit Ethernet Controller (LOM) + */ + +#include "e1000_api.h" + +STATIC s32 e1000_init_phy_params_82543(struct e1000_hw *hw); +STATIC s32 e1000_init_nvm_params_82543(struct e1000_hw *hw); +STATIC s32 e1000_init_mac_params_82543(struct e1000_hw *hw); +STATIC s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset, + u16 *data); +STATIC s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset, + u16 data); +STATIC s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw); +STATIC s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw); +STATIC s32 e1000_reset_hw_82543(struct e1000_hw *hw); +STATIC s32 e1000_init_hw_82543(struct e1000_hw *hw); +STATIC s32 e1000_setup_link_82543(struct e1000_hw *hw); +STATIC s32 e1000_setup_copper_link_82543(struct e1000_hw *hw); +STATIC s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw); +STATIC s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw); +STATIC s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw); +STATIC s32 e1000_led_on_82543(struct e1000_hw *hw); +STATIC s32 e1000_led_off_82543(struct e1000_hw *hw); +STATIC void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset, + u32 value); +STATIC void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw); +STATIC s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw); +STATIC bool e1000_init_phy_disabled_82543(struct e1000_hw *hw); +STATIC void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl); +STATIC s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw); +STATIC void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl); +STATIC u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw); +STATIC void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data, + u16 count); +STATIC bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw); +STATIC void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state); + +/** + * e1000_init_phy_params_82543 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_phy_params_82543(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_phy_params_82543"); + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } else { + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 10000; + phy->type = e1000_phy_m88; + + /* Function Pointers */ + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82543; + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.get_cfg_done = e1000_get_cfg_done_generic; + phy->ops.read_reg = (hw->mac.type == e1000_82543) + ? e1000_read_phy_reg_82543 + : e1000_read_phy_reg_m88; + phy->ops.reset = (hw->mac.type == e1000_82543) + ? e1000_phy_hw_reset_82543 + : e1000_phy_hw_reset_generic; + phy->ops.write_reg = (hw->mac.type == e1000_82543) + ? e1000_write_phy_reg_82543 + : e1000_write_phy_reg_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + + /* + * The external PHY of the 82543 can be in a funky state. + * Resetting helps us read the PHY registers for acquiring + * the PHY ID. + */ + if (!e1000_init_phy_disabled_82543(hw)) { + ret_val = phy->ops.reset(hw); + if (ret_val) { + DEBUGOUT("Resetting PHY during init failed.\n"); + goto out; + } + msec_delay(20); + } + + ret_val = e1000_get_phy_id(hw); + if (ret_val) + goto out; + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82543: + if (phy->id != M88E1000_E_PHY_ID) { + ret_val = -E1000_ERR_PHY; + goto out; + } + break; + case e1000_82544: + if (phy->id != M88E1000_I_PHY_ID) { + ret_val = -E1000_ERR_PHY; + goto out; + } + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82543 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_nvm_params_82543(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + + DEBUGFUNC("e1000_init_nvm_params_82543"); + + nvm->type = e1000_nvm_eeprom_microwire; + nvm->word_size = 64; + nvm->delay_usec = 50; + nvm->address_bits = 6; + nvm->opcode_bits = 3; + + /* Function Pointers */ + nvm->ops.read = e1000_read_nvm_microwire; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_generic; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.write = e1000_write_nvm_microwire; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82543 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_82543(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_init_mac_params_82543"); + + /* Set media type */ + switch (hw->device_id) { + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82544EI_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pci_generic; + /* function id */ + mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_82543; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_82543; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_82543; + /* physical interface setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82543 : e1000_setup_fiber_link_82543; + /* check for link */ + mac->ops.check_for_link = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_check_for_copper_link_82543 + : e1000_check_for_fiber_link_82543; + /* link info */ + mac->ops.get_link_up_info = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_get_speed_and_duplex_copper_generic + : e1000_get_speed_and_duplex_fiber_serdes_generic; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_82543; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_82543; + mac->ops.led_off = e1000_led_off_82543; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82543; + + /* Set tbi compatibility */ + if ((hw->mac.type != e1000_82543) || + (hw->phy.media_type == e1000_media_type_fiber)) + e1000_set_tbi_compatibility_82543(hw, false); + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_82543 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82543(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82543"); + + hw->mac.ops.init_params = e1000_init_mac_params_82543; + hw->nvm.ops.init_params = e1000_init_nvm_params_82543; + hw->phy.ops.init_params = e1000_init_phy_params_82543; +} + +/** + * e1000_tbi_compatibility_enabled_82543 - Returns TBI compat status + * @hw: pointer to the HW structure + * + * Returns the current status of 10-bit Interface (TBI) compatibility + * (enabled/disabled). + **/ +STATIC bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; + bool state = false; + + DEBUGFUNC("e1000_tbi_compatibility_enabled_82543"); + + if (hw->mac.type != e1000_82543) { + DEBUGOUT("TBI compatibility workaround for 82543 only.\n"); + goto out; + } + + state = !!(dev_spec->tbi_compatibility & TBI_COMPAT_ENABLED); + +out: + return state; +} + +/** + * e1000_set_tbi_compatibility_82543 - Set TBI compatibility + * @hw: pointer to the HW structure + * @state: enable/disable TBI compatibility + * + * Enables or disabled 10-bit Interface (TBI) compatibility. + **/ +void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw, bool state) +{ + struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; + + DEBUGFUNC("e1000_set_tbi_compatibility_82543"); + + if (hw->mac.type != e1000_82543) { + DEBUGOUT("TBI compatibility workaround for 82543 only.\n"); + goto out; + } + + if (state) + dev_spec->tbi_compatibility |= TBI_COMPAT_ENABLED; + else + dev_spec->tbi_compatibility &= ~TBI_COMPAT_ENABLED; + +out: + return; +} + +/** + * e1000_tbi_sbp_enabled_82543 - Returns TBI SBP status + * @hw: pointer to the HW structure + * + * Returns the current status of 10-bit Interface (TBI) store bad packet (SBP) + * (enabled/disabled). + **/ +bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; + bool state = false; + + DEBUGFUNC("e1000_tbi_sbp_enabled_82543"); + + if (hw->mac.type != e1000_82543) { + DEBUGOUT("TBI compatibility workaround for 82543 only.\n"); + goto out; + } + + state = !!(dev_spec->tbi_compatibility & TBI_SBP_ENABLED); + +out: + return state; +} + +/** + * e1000_set_tbi_sbp_82543 - Set TBI SBP + * @hw: pointer to the HW structure + * @state: enable/disable TBI store bad packet + * + * Enables or disabled 10-bit Interface (TBI) store bad packet (SBP). + **/ +STATIC void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state) +{ + struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; + + DEBUGFUNC("e1000_set_tbi_sbp_82543"); + + if (state && e1000_tbi_compatibility_enabled_82543(hw)) + dev_spec->tbi_compatibility |= TBI_SBP_ENABLED; + else + dev_spec->tbi_compatibility &= ~TBI_SBP_ENABLED; + + return; +} + +/** + * e1000_init_phy_disabled_82543 - Returns init PHY status + * @hw: pointer to the HW structure + * + * Returns the current status of whether PHY initialization is disabled. + * True if PHY initialization is disabled else false. + **/ +STATIC bool e1000_init_phy_disabled_82543(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; + bool ret_val; + + DEBUGFUNC("e1000_init_phy_disabled_82543"); + + if (hw->mac.type != e1000_82543) { + ret_val = false; + goto out; + } + + ret_val = dev_spec->init_phy_disabled; + +out: + return ret_val; +} + +/** + * e1000_tbi_adjust_stats_82543 - Adjust stats when TBI enabled + * @hw: pointer to the HW structure + * @stats: Struct containing statistic register values + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * @max_frame_size: The maximum frame size + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + **/ +void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw, + struct e1000_hw_stats *stats, u32 frame_len, + u8 *mac_addr, u32 max_frame_size) +{ + if (!(e1000_tbi_sbp_enabled_82543(hw))) + goto out; + + /* First adjust the frame length. */ + frame_len--; + /* + * We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + stats->gorc += frame_len; + + /* + * Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if ((mac_addr[0] == 0xff) && (mac_addr[1] == 0xff)) + /* Broadcast packet */ + stats->bprc++; + else if (*mac_addr & 0x01) + /* Multicast packet */ + stats->mprc++; + + /* + * In this case, the hardware has over counted the number of + * oversize frames. + */ + if ((frame_len == max_frame_size) && (stats->roc > 0)) + stats->roc--; + + /* + * Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } + +out: + return; +} + +/** + * e1000_read_phy_reg_82543 - Read PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY at offset and stores the information read to data. + **/ +STATIC s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 *data) +{ + u32 mdic; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_phy_reg_82543"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* + * We must first send a preamble through the MDIO pin to signal the + * beginning of an MII instruction. This is done by sending 32 + * consecutive "1" bits. + */ + e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* + * Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The format + * of an MII read instruction consists of a shift out of 14 bits and + * is defined as follows: + * <Preamble><SOF><Op Code><Phy Addr><Offset> + * followed by a shift in of 18 bits. This first two bits shifted in + * are TurnAround bits used to avoid contention on the MDIO pin when a + * READ operation is performed. These two bits are thrown away + * followed by a shift in of 16 bits which contains the desired data. + */ + mdic = (offset | (hw->phy.addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits_82543(hw, mdic, 14); + + /* + * Now that we've shifted out the read command to the MII, we need to + * "shift in" the 16-bit value (18 total bits) of the requested PHY + * register address. + */ + *data = e1000_shift_in_mdi_bits_82543(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_82543 - Write PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be written + * @data: pointer to the data to be written at offset + * + * Writes data to the PHY at offset. + **/ +STATIC s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 data) +{ + u32 mdic; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_phy_reg_82543"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* + * We'll need to use the SW defined pins to shift the write command + * out to the PHY. We first send a preamble to the PHY to signal the + * beginning of the MII instruction. This is done by sending 32 + * consecutive "1" bits. + */ + e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* + * Now combine the remaining required fields that will indicate a + * write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the command. The + * format of a MII write instruction is as follows: + * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>. + */ + mdic = ((PHY_TURNAROUND) | (offset << 2) | (hw->phy.addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32)data; + + e1000_shift_out_mdi_bits_82543(hw, mdic, 32); + +out: + return ret_val; +} + +/** + * e1000_raise_mdi_clk_82543 - Raise Management Data Input clock + * @hw: pointer to the HW structure + * @ctrl: pointer to the control register + * + * Raise the management data input clock by setting the MDC bit in the control + * register. + **/ +STATIC void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl) +{ + /* + * Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay a sufficient amount of time. + */ + E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(hw); + usec_delay(10); +} + +/** + * e1000_lower_mdi_clk_82543 - Lower Management Data Input clock + * @hw: pointer to the HW structure + * @ctrl: pointer to the control register + * + * Lower the management data input clock by clearing the MDC bit in the + * control register. + **/ +STATIC void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl) +{ + /* + * Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay a sufficient amount of time. + */ + E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(hw); + usec_delay(10); +} + +/** + * e1000_shift_out_mdi_bits_82543 - Shift data bits our to the PHY + * @hw: pointer to the HW structure + * @data: data to send to the PHY + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the PHY. So, the value in the + * "data" parameter will be shifted out to the PHY one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +STATIC void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data, + u16 count) +{ + u32 ctrl, mask; + + /* + * We need to shift "count" number of bits out to the PHY. So, the + * value in the "data" parameter will be shifted out to the PHY one + * bit at a time. In order to do this, "data" must be broken down + * into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* + * A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + usec_delay(10); + + e1000_raise_mdi_clk_82543(hw, &ctrl); + e1000_lower_mdi_clk_82543(hw, &ctrl); + + mask >>= 1; + } +} + +/** + * e1000_shift_in_mdi_bits_82543 - Shift data bits in from the PHY + * @hw: pointer to the HW structure + * + * In order to read a register from the PHY, we need to shift 18 bits + * in from the PHY. Bits are "shifted in" by raising the clock input to + * the PHY (setting the MDC bit), and then reading the value of the data out + * MDIO bit. + **/ +STATIC u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* + * In order to read a register from the PHY, we need to shift in a + * total of 18 bits from the PHY. The first two bit (turnaround) + * times are used to avoid contention on the MDIO pin when a read + * operation is performed. These two bits are ignored by us and + * thrown away. Bits are "shifted in" by raising the input to the + * Management Data Clock (setting the MDC bit) and then reading the + * value of the MDIO bit. + */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* + * Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + /* + * Raise and lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked + * out the last bit of the Register Address. + */ + e1000_raise_mdi_clk_82543(hw, &ctrl); + e1000_lower_mdi_clk_82543(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data <<= 1; + e1000_raise_mdi_clk_82543(hw, &ctrl); + ctrl = E1000_READ_REG(hw, E1000_CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk_82543(hw, &ctrl); + } + + e1000_raise_mdi_clk_82543(hw, &ctrl); + e1000_lower_mdi_clk_82543(hw, &ctrl); + + return data; +} + +/** + * e1000_phy_force_speed_duplex_82543 - Force speed/duplex for PHY + * @hw: pointer to the HW structure + * + * Calls the function to force speed and duplex for the m88 PHY, and + * if the PHY is not auto-negotiating and the speed is forced to 10Mbit, + * then call the function for polarity reversal workaround. + **/ +STATIC s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_phy_force_speed_duplex_82543"); + + ret_val = e1000_phy_force_speed_duplex_m88(hw); + if (ret_val) + goto out; + + if (!hw->mac.autoneg && (hw->mac.forced_speed_duplex & + E1000_ALL_10_SPEED)) + ret_val = e1000_polarity_reversal_workaround_82543(hw); + +out: + return ret_val; +} + +/** + * e1000_polarity_reversal_workaround_82543 - Workaround polarity reversal + * @hw: pointer to the HW structure + * + * When forcing link to 10 Full or 10 Half, the PHY can reverse the polarity + * inadvertently. To workaround the issue, we disable the transmitter on + * the PHY until we have established the link partner's link parameters. + **/ +STATIC s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 mii_status_reg; + u16 i; + bool link; + + if (!(hw->phy.ops.write_reg)) + goto out; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + goto out; + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + goto out; + + /* + * This loop will early-out if the NO link condition has been met. + * In other words, DO NOT use e1000_phy_has_link_generic() here. + */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* + * Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & ~MII_SR_LINK_STATUS)) + break; + msec_delay_irq(100); + } + + /* Recommended delay time after link has been lost */ + msec_delay_irq(1000); + + /* Now we will re-enable the transmitter on the PHY */ + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + goto out; + msec_delay_irq(50); + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + goto out; + msec_delay_irq(50); + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + goto out; + msec_delay_irq(50); + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + goto out; + + /* + * Read the MII Status Register and wait for Link Status bit + * to be set. + */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_TIME, 100000, &link); + if (ret_val) + goto out; + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_82543 - PHY hardware reset + * @hw: pointer to the HW structure + * + * Sets the PHY_RESET_DIR bit in the extended device control register + * to put the PHY into a reset and waits for completion. Once the reset + * has been accomplished, clear the PHY_RESET_DIR bit to take the PHY out + * of reset. + **/ +STATIC s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + + DEBUGFUNC("e1000_phy_hw_reset_82543"); + + /* + * Read the Extended Device Control Register, assert the PHY_RESET_DIR + * bit to put the PHY into reset... + */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + /* ...then take it out of reset. */ + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + usec_delay(150); + + if (!(hw->phy.ops.get_cfg_done)) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.get_cfg_done(hw); + + return ret_val; +} + +/** + * e1000_reset_hw_82543 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +STATIC s32 e1000_reset_hw_82543(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_reset_hw_82543"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + e1000_set_tbi_sbp_82543(hw, false); + + /* + * Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to 82543/82544 MAC\n"); + if (hw->mac.type == e1000_82543) { + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + } else { + /* + * The 82544 can't ACK the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround. + */ + E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + } + + /* + * After MAC reset, force reload of NVM to restore power-on + * settings to device. + */ + hw->nvm.ops.reload(hw); + msec_delay(2); + + /* Masking off and clearing any pending interrupts */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + return ret_val; +} + +/** + * e1000_init_hw_82543 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +STATIC s32 e1000_init_hw_82543(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; + u32 ctrl; + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_init_hw_82543"); + + /* Disabling VLAN filtering */ + E1000_WRITE_REG(hw, E1000_VET, 0); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + E1000_WRITE_FLUSH(hw); + } + + /* + * Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. + */ + if (hw->mac.type == e1000_82543 && dev_spec->dma_fairness) { + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR); + } + + e1000_pcix_mmrbc_workaround_generic(hw); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82543(hw); + + return ret_val; +} + +/** + * e1000_setup_link_82543 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Read the EEPROM to determine the initial polarity value and write the + * extended device control register with the information before calling + * the generic setup link function, which does the following: + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +STATIC s32 e1000_setup_link_82543(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_setup_link_82543"); + + /* + * Take the 4 bits from NVM word 0xF that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before phy setup. + */ + if (hw->mac.type == e1000_82543) { + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + ctrl_ext = ((data & NVM_WORD0F_SWPDIO_EXT_MASK) << + NVM_SWDPIO_EXT_SHIFT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + } + + ret_val = e1000_setup_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_setup_copper_link_82543 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +STATIC s32 e1000_setup_copper_link_82543(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_setup_copper_link_82543"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL) | E1000_CTRL_SLU; + /* + * With 82543, we need to force speed and duplex on the MAC + * equal to what the PHY speed and duplex configuration is. + * In addition, we need to perform a hardware reset on the + * PHY to take it out of reset. + */ + if (hw->mac.type == e1000_82543) { + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + ret_val = hw->phy.ops.reset(hw); + if (ret_val) + goto out; + } else { + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + } + + /* Set MDI/MDI-X, Polarity Reversal, and downshift settings */ + ret_val = e1000_copper_link_setup_m88(hw); + if (ret_val) + goto out; + + if (hw->mac.autoneg) { + /* + * Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* + * PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + DEBUGOUT("Forcing Speed and Duplex\n"); + ret_val = e1000_phy_force_speed_duplex_82543(hw); + if (ret_val) { + DEBUGOUT("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* + * Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, + &link); + if (ret_val) + goto out; + + + if (link) { + DEBUGOUT("Valid link established!!!\n"); + /* Config the MAC and PHY after link is up */ + if (hw->mac.type == e1000_82544) { + hw->mac.ops.config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy_82543(hw); + if (ret_val) + goto out; + } + ret_val = e1000_config_fc_after_link_up_generic(hw); + } else { + DEBUGOUT("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * e1000_setup_fiber_link_82543 - Setup link for fiber + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber links. Upon + * successful setup, poll for link. + **/ +STATIC s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_setup_fiber_link_82543"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + hw->mac.ops.config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + goto out; + + DEBUGOUT("Auto-negotiation enabled\n"); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + + /* + * For these adapters, the SW definable pin 1 is cleared when the + * optics detect a signal. If we have a signal, then poll for a + * "Link-Up" indication. + */ + if (!(E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + else + DEBUGOUT("No signal detected\n"); + +out: + return ret_val; +} + +/** + * e1000_check_for_copper_link_82543 - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks the phy for link, if link exists, do the following: + * - check for downshift + * - do polarity workaround (if necessary) + * - configure collision distance + * - configure flow control after link up + * - configure tbi compatibility + **/ +STATIC s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 icr, rctl; + s32 ret_val; + u16 speed, duplex; + bool link; + + DEBUGFUNC("e1000_check_for_copper_link_82543"); + + if (!mac->get_link_status) { + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + e1000_check_downshift_generic(hw); + + /* + * If we are forcing speed/duplex, then we can return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + /* + * If speed and duplex are forced to 10H or 10F, then we will + * implement the polarity reversal workaround. We disable + * interrupts first, and upon returning, place the devices + * interrupt state to its previous value except for the link + * status change interrupt which will happened due to the + * execution of this workaround. + */ + if (mac->forced_speed_duplex & E1000_ALL_10_SPEED) { + E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); + ret_val = e1000_polarity_reversal_workaround_82543(hw); + icr = E1000_READ_REG(hw, E1000_ICR); + E1000_WRITE_REG(hw, E1000_ICS, (icr & ~E1000_ICS_LSC)); + E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK); + } + + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if (mac->type == e1000_82544) + hw->mac.ops.config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy_82543(hw); + if (ret_val) { + DEBUGOUT("Error configuring MAC to PHY settings\n"); + goto out; + } + } + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + + /* + * At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the link + * partner capability register. We use the link speed to determine if + * TBI compatibility needs to be turned on or off. If the link is not + * at gigabit speed, then TBI compatibility is not needed. If we are + * at gigabit speed, we turn on TBI compatibility. + */ + if (e1000_tbi_compatibility_enabled_82543(hw)) { + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* + * If link speed is not set to gigabit speed, + * we do not need to enable TBI compatibility. + */ + if (e1000_tbi_sbp_enabled_82543(hw)) { + /* + * If we previously were in the mode, + * turn it off. + */ + e1000_set_tbi_sbp_82543(hw, false); + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= ~E1000_RCTL_SBP; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + } + } else { + /* + * If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and + * will look like CRC errors to to the hardware. + */ + if (!e1000_tbi_sbp_enabled_82543(hw)) { + e1000_set_tbi_sbp_82543(hw, true); + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_SBP; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + } + } + } +out: + return ret_val; +} + +/** + * e1000_check_for_fiber_link_82543 - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +STATIC s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw, ctrl, status; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_check_for_fiber_link_82543"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 0 == have signal */ + if ((!(ctrl & E1000_CTRL_SWDPIN1)) && + (!(status & E1000_STATUS_LU)) && + (!(rxcw & E1000_RXCW_C))) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + ret_val = 0; + goto out; + } + DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } + +out: + return ret_val; +} + +/** + * e1000_config_mac_to_phy_82543 - Configure MAC to PHY settings + * @hw: pointer to the HW structure + * + * For the 82543 silicon, we need to set the MAC to match the settings + * of the PHY, even if the PHY is auto-negotiating. + **/ +STATIC s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = E1000_SUCCESS; + u16 phy_data; + + DEBUGFUNC("e1000_config_mac_to_phy_82543"); + + if (!(hw->phy.ops.read_reg)) + goto out; + + /* Set the bits to force speed and duplex */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + /* + * Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + ctrl &= ~E1000_CTRL_FD; + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + + hw->mac.ops.config_collision_dist(hw); + + /* + * Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * e1000_write_vfta_82543 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: the 32-bit offset in which to write the value to. + * @value: the 32-bit value to write at location offset. + * + * This writes a 32-bit value to a 32-bit offset in the VLAN filter + * table. + **/ +STATIC void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + DEBUGFUNC("e1000_write_vfta_82543"); + + if ((hw->mac.type == e1000_82544) && (offset & 1)) { + temp = E1000_READ_REG_ARRAY(hw, E1000_VFTA, offset - 1); + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset - 1, temp); + E1000_WRITE_FLUSH(hw); + } else { + e1000_write_vfta_generic(hw, offset, value); + } +} + +/** + * e1000_led_on_82543 - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. + **/ +STATIC s32 e1000_led_on_82543(struct e1000_hw *hw) +{ + u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGFUNC("e1000_led_on_82543"); + + if (hw->mac.type == e1000_82544 && + hw->phy.media_type == e1000_media_type_copper) { + /* Clear SW-definable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Fiber 82544 and all 82543 use this method */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off_82543 - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. + **/ +STATIC s32 e1000_led_off_82543(struct e1000_hw *hw) +{ + u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGFUNC("e1000_led_off_82543"); + + if (hw->mac.type == e1000_82544 && + hw->phy.media_type == e1000_media_type_copper) { + /* Set SW-definable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs_82543 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +STATIC void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82543"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); +} diff --git a/drivers/net/e1000/base/e1000_82543.h b/drivers/net/e1000/base/e1000_82543.h new file mode 100644 index 00000000..4eb3f624 --- /dev/null +++ b/drivers/net/e1000/base/e1000_82543.h @@ -0,0 +1,56 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_82543_H_ +#define _E1000_82543_H_ + +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_PREAMBLE_SIZE 32 +#define PHY_SOF 0x1 +#define PHY_OP_READ 0x2 +#define PHY_OP_WRITE 0x1 +#define PHY_TURNAROUND 0x2 + +#define TBI_COMPAT_ENABLED 0x1 /* Global "knob" for the workaround */ +/* If TBI_COMPAT_ENABLED, then this is the current state (on/off) */ +#define TBI_SBP_ENABLED 0x2 + +void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, u8 *mac_addr, + u32 max_frame_size); +void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw, + bool state); +bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw); + +#endif diff --git a/drivers/net/e1000/base/e1000_82571.c b/drivers/net/e1000/base/e1000_82571.c new file mode 100644 index 00000000..7c279dbb --- /dev/null +++ b/drivers/net/e1000/base/e1000_82571.c @@ -0,0 +1,2030 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +/* 82571EB Gigabit Ethernet Controller + * 82571EB Gigabit Ethernet Controller (Copper) + * 82571EB Gigabit Ethernet Controller (Fiber) + * 82571EB Dual Port Gigabit Mezzanine Adapter + * 82571EB Quad Port Gigabit Mezzanine Adapter + * 82571PT Gigabit PT Quad Port Server ExpressModule + * 82572EI Gigabit Ethernet Controller (Copper) + * 82572EI Gigabit Ethernet Controller (Fiber) + * 82572EI Gigabit Ethernet Controller + * 82573V Gigabit Ethernet Controller (Copper) + * 82573E Gigabit Ethernet Controller (Copper) + * 82573L Gigabit Ethernet Controller + * 82574L Gigabit Network Connection + * 82583V Gigabit Network Connection + */ + +#include "e1000_api.h" + +STATIC s32 e1000_acquire_nvm_82571(struct e1000_hw *hw); +STATIC void e1000_release_nvm_82571(struct e1000_hw *hw); +STATIC s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw); +STATIC s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw); +STATIC s32 e1000_get_cfg_done_82571(struct e1000_hw *hw); +STATIC s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_reset_hw_82571(struct e1000_hw *hw); +STATIC s32 e1000_init_hw_82571(struct e1000_hw *hw); +STATIC void e1000_clear_vfta_82571(struct e1000_hw *hw); +STATIC bool e1000_check_mng_mode_82574(struct e1000_hw *hw); +STATIC s32 e1000_led_on_82574(struct e1000_hw *hw); +STATIC s32 e1000_setup_link_82571(struct e1000_hw *hw); +STATIC s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); +STATIC s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw); +STATIC s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); +STATIC s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data); +STATIC void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); +STATIC s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw); +STATIC s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); +STATIC s32 e1000_get_phy_id_82571(struct e1000_hw *hw); +STATIC void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); +STATIC void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); +STATIC s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); +STATIC void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); +STATIC s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, + bool active); +STATIC void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); +STATIC s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 e1000_read_mac_addr_82571(struct e1000_hw *hw); +STATIC void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); + +/** + * e1000_init_phy_params_82571 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_phy_params_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("e1000_init_phy_params_82571"); + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return E1000_SUCCESS; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + phy->ops.check_reset_block = e1000_check_reset_block_generic; + phy->ops.reset = e1000_phy_hw_reset_generic; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82571; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82571; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + phy->type = e1000_phy_igp_2; + phy->ops.get_cfg_done = e1000_get_cfg_done_82571; + phy->ops.get_info = e1000_get_phy_info_igp; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; + phy->ops.get_cable_length = e1000_get_cable_length_igp_2; + phy->ops.read_reg = e1000_read_phy_reg_igp; + phy->ops.write_reg = e1000_write_phy_reg_igp; + phy->ops.acquire = e1000_get_hw_semaphore_82571; + phy->ops.release = e1000_put_hw_semaphore_82571; + break; + case e1000_82573: + phy->type = e1000_phy_m88; + phy->ops.get_cfg_done = e1000_get_cfg_done_generic; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.read_reg = e1000_read_phy_reg_m88; + phy->ops.write_reg = e1000_write_phy_reg_m88; + phy->ops.acquire = e1000_get_hw_semaphore_82571; + phy->ops.release = e1000_put_hw_semaphore_82571; + break; + case e1000_82574: + case e1000_82583: + E1000_MUTEX_INIT(&hw->dev_spec._82571.swflag_mutex); + + phy->type = e1000_phy_bm; + phy->ops.get_cfg_done = e1000_get_cfg_done_generic; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.read_reg = e1000_read_phy_reg_bm2; + phy->ops.write_reg = e1000_write_phy_reg_bm2; + phy->ops.acquire = e1000_get_hw_semaphore_82574; + phy->ops.release = e1000_put_hw_semaphore_82574; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; + break; + default: + return -E1000_ERR_PHY; + break; + } + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000_get_phy_id_82571(hw); + if (ret_val) { + DEBUGOUT("Error getting PHY ID\n"); + return ret_val; + } + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + if (phy->id != IGP01E1000_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82573: + if (phy->id != M88E1111_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82574: + case e1000_82583: + if (phy->id != BME1000_E_PHY_ID_R2) + ret_val = -E1000_ERR_PHY; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + DEBUGOUT1("PHY ID unknown: type = 0x%08x\n", phy->id); + + return ret_val; +} + +/** + * e1000_init_nvm_params_82571 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_82571"); + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (((eecd >> 15) & 0x3) == 0x3) { + nvm->type = e1000_nvm_flash_hw; + nvm->word_size = 2048; + /* Autonomous Flash update bit must be cleared due + * to Flash update issue. + */ + eecd &= ~E1000_EECD_AUPDEN; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + break; + } + /* Fall Through */ + default: + nvm->type = e1000_nvm_eeprom_spi; + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + break; + } + + /* Function Pointers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + nvm->ops.acquire = e1000_get_hw_semaphore_82574; + nvm->ops.release = e1000_put_hw_semaphore_82574; + break; + default: + nvm->ops.acquire = e1000_acquire_nvm_82571; + nvm->ops.release = e1000_release_nvm_82571; + break; + } + nvm->ops.read = e1000_read_nvm_eerd; + nvm->ops.update = e1000_update_nvm_checksum_82571; + nvm->ops.validate = e1000_validate_nvm_checksum_82571; + nvm->ops.valid_led_default = e1000_valid_led_default_82571; + nvm->ops.write = e1000_write_nvm_82571; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82571 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 swsm = 0; + u32 swsm2 = 0; + bool force_clear_smbi = false; + + DEBUGFUNC("e1000_init_mac_params_82571"); + + /* Set media type and media-dependent function pointers */ + switch (hw->device_id) { + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + mac->ops.check_for_link = e1000_check_for_fiber_link_generic; + mac->ops.get_link_up_info = + e1000_get_speed_and_duplex_fiber_serdes_generic; + break; + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: + case E1000_DEV_ID_82572EI_SERDES: + hw->phy.media_type = e1000_media_type_internal_serdes; + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + mac->ops.check_for_link = e1000_check_for_serdes_link_82571; + mac->ops.get_link_up_info = + e1000_get_speed_and_duplex_fiber_serdes_generic; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_82571; + mac->ops.check_for_link = e1000_check_for_copper_link_generic; + mac->ops.get_link_up_info = + e1000_get_speed_and_duplex_copper_generic; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_82571; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_82571; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_82571; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_82571; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_82571; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_generic; + /* turn off LED */ + mac->ops.led_off = e1000_led_off_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82571; + + /* MAC-specific function pointers */ + switch (hw->mac.type) { + case e1000_82573: + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + mac->ops.check_mng_mode = e1000_check_mng_mode_generic; + mac->ops.led_on = e1000_led_on_generic; + mac->ops.blink_led = e1000_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are + * enabled. + */ + mac->arc_subsystem_valid = !!(E1000_READ_REG(hw, E1000_FWSM) & + E1000_FWSM_MODE_MASK); + break; + case e1000_82574: + case e1000_82583: + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + mac->ops.check_mng_mode = e1000_check_mng_mode_82574; + mac->ops.led_on = e1000_led_on_82574; + break; + default: + mac->ops.check_mng_mode = e1000_check_mng_mode_generic; + mac->ops.led_on = e1000_led_on_generic; + mac->ops.blink_led = e1000_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + break; + } + + /* Ensure that the inter-port SWSM.SMBI lock bit is clear before + * first NVM or PHY access. This should be done for single-port + * devices, and for one port only on dual-port devices so that + * for those devices we can still use the SMBI lock to synchronize + * inter-port accesses to the PHY & NVM. + */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + swsm2 = E1000_READ_REG(hw, E1000_SWSM2); + + if (!(swsm2 & E1000_SWSM2_LOCK)) { + /* Only do this for the first interface on this card */ + E1000_WRITE_REG(hw, E1000_SWSM2, swsm2 | + E1000_SWSM2_LOCK); + force_clear_smbi = true; + } else { + force_clear_smbi = false; + } + break; + default: + force_clear_smbi = true; + break; + } + + if (force_clear_smbi) { + /* Make sure SWSM.SMBI is clear */ + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (swsm & E1000_SWSM_SMBI) { + /* This bit should not be set on a first interface, and + * indicates that the bootagent or EFI code has + * improperly left this bit enabled + */ + DEBUGOUT("Please update your 82571 Bootagent\n"); + } + E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_SMBI); + } + + /* Initialze device specific counter of SMBI acquisition timeouts. */ + hw->dev_spec._82571.smb_counter = 0; + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_82571 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82571"); + + hw->mac.ops.init_params = e1000_init_mac_params_82571; + hw->nvm.ops.init_params = e1000_init_nvm_params_82571; + hw->phy.ops.init_params = e1000_init_phy_params_82571; +} + +/** + * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +STATIC s32 e1000_get_phy_id_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_id = 0; + + DEBUGFUNC("e1000_get_phy_id_82571"); + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* The 82571 firmware may still be configuring the PHY. + * In this case, we cannot access the PHY until the + * configuration is done. So we explicitly set the + * PHY ID. + */ + phy->id = IGP01E1000_I_PHY_ID; + break; + case e1000_82573: + return e1000_get_phy_id(hw); + break; + case e1000_82574: + case e1000_82583: + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + usec_delay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + break; + default: + return -E1000_ERR_PHY; + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +STATIC s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + s32 sw_timeout = hw->nvm.word_size + 1; + s32 fw_timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_82571"); + + /* If we have timedout 3 times on trying to acquire + * the inter-port SMBI semaphore, there is old code + * operating on the other port, and it is not + * releasing SMBI. Modify the number of times that + * we try for the semaphore to interwork with this + * older code. + */ + if (hw->dev_spec._82571.smb_counter > 2) + sw_timeout = 1; + + /* Get the SW semaphore */ + while (i < sw_timeout) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == sw_timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + hw->dev_spec._82571.smb_counter++; + } + /* Get the FW semaphore. */ + for (i = 0; i < fw_timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == fw_timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_82571(hw); + DEBUGOUT("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_put_hw_semaphore_82571 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +STATIC void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("e1000_put_hw_semaphore_generic"); + + swsm = E1000_READ_REG(hw, E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + E1000_WRITE_REG(hw, E1000_SWSM, swsm); +} + +/** + * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore during reset. + * + **/ +STATIC s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_82573"); + + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + do { + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) + break; + + msec_delay(2); + i++; + } while (i < MDIO_OWNERSHIP_TIMEOUT); + + if (i == MDIO_OWNERSHIP_TIMEOUT) { + /* Release semaphores */ + e1000_put_hw_semaphore_82573(hw); + DEBUGOUT("Driver can't access the PHY\n"); + return -E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} + +/** + * e1000_put_hw_semaphore_82573 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used during reset. + * + **/ +STATIC void e1000_put_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + DEBUGFUNC("e1000_put_hw_semaphore_82573"); + + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); +} + +/** + * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM. + * + **/ +STATIC s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_hw_semaphore_82574"); + + E1000_MUTEX_LOCK(&hw->dev_spec._82571.swflag_mutex); + ret_val = e1000_get_hw_semaphore_82573(hw); + if (ret_val) + E1000_MUTEX_UNLOCK(&hw->dev_spec._82571.swflag_mutex); + return ret_val; +} + +/** + * e1000_put_hw_semaphore_82574 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + * + **/ +STATIC void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_put_hw_semaphore_82574"); + + e1000_put_hw_semaphore_82573(hw); + E1000_MUTEX_UNLOCK(&hw->dev_spec._82571.swflag_mutex); +} + +/** + * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. + * LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +STATIC s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u32 data = E1000_READ_REG(hw, E1000_POEMB); + + DEBUGFUNC("e1000_set_d0_lplu_state_82574"); + + if (active) + data |= E1000_PHY_CTRL_D0A_LPLU; + else + data &= ~E1000_PHY_CTRL_D0A_LPLU; + + E1000_WRITE_REG(hw, E1000_POEMB, data); + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * The low power link up (lplu) state is set to the power management level D3 + * when active is true, else clear lplu for D3. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +STATIC s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u32 data = E1000_READ_REG(hw, E1000_POEMB); + + DEBUGFUNC("e1000_set_d3_lplu_state_82574"); + + if (!active) { + data &= ~E1000_PHY_CTRL_NOND0A_LPLU; + } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || + (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_PHY_CTRL_NOND0A_LPLU; + } + + E1000_WRITE_REG(hw, E1000_POEMB, data); + return E1000_SUCCESS; +} + +/** + * e1000_acquire_nvm_82571 - Request for access to the EEPROM + * @hw: pointer to the HW structure + * + * To gain access to the EEPROM, first we must obtain a hardware semaphore. + * Then for non-82573 hardware, set the EEPROM access request bit and wait + * for EEPROM access grant bit. If the access grant bit is not set, release + * hardware semaphore. + **/ +STATIC s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_acquire_nvm_82571"); + + ret_val = e1000_get_hw_semaphore_82571(hw); + if (ret_val) + return ret_val; + + switch (hw->mac.type) { + case e1000_82573: + break; + default: + ret_val = e1000_acquire_nvm_generic(hw); + break; + } + + if (ret_val) + e1000_put_hw_semaphore_82571(hw); + + return ret_val; +} + +/** + * e1000_release_nvm_82571 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +STATIC void e1000_release_nvm_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_82571"); + + e1000_release_nvm_generic(hw); + e1000_put_hw_semaphore_82571(hw); +} + +/** + * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * For non-82573 silicon, write data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +STATIC s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_nvm_82571"); + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); + break; + case e1000_82571: + case e1000_82572: + ret_val = e1000_write_nvm_spi(hw, offset, words, data); + break; + default: + ret_val = -E1000_ERR_NVM; + break; + } + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82571 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +STATIC s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) +{ + u32 eecd; + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_update_nvm_checksum_82571"); + + ret_val = e1000_update_nvm_checksum_generic(hw); + if (ret_val) + return ret_val; + + /* If our nvm is an EEPROM, then we're done + * otherwise, commit the checksum to the flash NVM. + */ + if (hw->nvm.type != e1000_nvm_flash_hw) + return E1000_SUCCESS; + + /* Check for pending operations. */ + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + msec_delay(1); + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_FLUPD)) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + /* Reset the firmware if using STM opcode. */ + if ((E1000_READ_REG(hw, E1000_FLOP) & 0xFF00) == E1000_STM_OPCODE) { + /* The enabling of and the actual reset must be done + * in two write cycles. + */ + E1000_WRITE_REG(hw, E1000_HICR, E1000_HICR_FW_RESET_ENABLE); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_HICR, E1000_HICR_FW_RESET); + } + + /* Commit the write to flash */ + eecd = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + msec_delay(1); + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_FLUPD)) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + return E1000_SUCCESS; +} + +/** + * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +STATIC s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_validate_nvm_checksum_82571"); + + if (hw->nvm.type == e1000_nvm_flash_hw) + e1000_fix_nvm_checksum_82571(hw); + + return e1000_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * After checking for invalid values, poll the EEPROM to ensure the previous + * command has completed before trying to write the next word. After write + * poll for completion. + * + * If e1000_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +STATIC s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eewr = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_nvm_eewr_82571"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eewr = ((data[i] << E1000_NVM_RW_REG_DATA) | + ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) | + E1000_NVM_RW_REG_START); + + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + + E1000_WRITE_REG(hw, E1000_EEWR, eewr); + + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + } + + return ret_val; +} + +/** + * e1000_get_cfg_done_82571 - Poll for configuration done + * @hw: pointer to the HW structure + * + * Reads the management control register for the config done bit to be set. + **/ +STATIC s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + + DEBUGFUNC("e1000_get_cfg_done_82571"); + + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL) & + E1000_NVM_CFG_DONE_PORT_0) + break; + msec_delay(1); + timeout--; + } + if (!timeout) { + DEBUGOUT("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When activating LPLU + * this function also disables smart speed and vice versa. LPLU will not be + * activated unless the device autonegotiation advertisement meets standards + * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function + * pointer entry point only called by PHY setup routines. + **/ +STATIC s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82571"); + + if (!(phy->ops.read_reg)) + return E1000_SUCCESS; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_reset_hw_82571 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +STATIC s32 e1000_reset_hw_82571(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext, eecd, tctl; + s32 ret_val; + + DEBUGFUNC("e1000_reset_hw_82571"); + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + tctl = E1000_READ_REG(hw, E1000_TCTL); + tctl &= ~E1000_TCTL_EN; + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + /* Must acquire the MDIO ownership before MAC reset. + * Ownership defaults to firmware after a reset. + */ + switch (hw->mac.type) { + case e1000_82573: + ret_val = e1000_get_hw_semaphore_82573(hw); + break; + case e1000_82574: + case e1000_82583: + ret_val = e1000_get_hw_semaphore_82574(hw); + break; + default: + break; + } + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + /* Must release MDIO ownership and mutex after MAC reset. */ + switch (hw->mac.type) { + case e1000_82573: + /* Release mutex only if the hw semaphore is acquired */ + if (!ret_val) + e1000_put_hw_semaphore_82573(hw); + break; + case e1000_82574: + case e1000_82583: + /* Release mutex only if the hw semaphore is acquired */ + if (!ret_val) + e1000_put_hw_semaphore_82574(hw); + break; + default: + break; + } + + if (hw->nvm.type == e1000_nvm_flash_hw) { + usec_delay(10); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + } + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Phy configuration from NVM just starts after EECD_AUTO_RD is set. + * Need to wait for Phy configuration completion before accessing + * NVM and Phy. + */ + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* REQ and GNT bits need to be cleared when using AUTO_RD + * to access the EEPROM. + */ + eecd = E1000_READ_REG(hw, E1000_EECD); + eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + break; + case e1000_82573: + case e1000_82574: + case e1000_82583: + msec_delay(25); + break; + default: + break; + } + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + if (hw->mac.type == e1000_82571) { + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + + e1000_set_laa_state_82571(hw, true); + } + + /* Reinitialize the 82571 serdes link state machine */ + if (hw->phy.media_type == e1000_media_type_internal_serdes) + hw->mac.serdes_link_state = e1000_serdes_link_down; + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw_82571 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +STATIC s32 e1000_init_hw_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + DEBUGFUNC("e1000_init_hw_82571"); + + e1000_initialize_hw_bits_82571(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + DEBUGOUT("Error initializing identification LED\n"); + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. + * If, however, a locally administered address was assigned to the + * 82571, we must reserve a RAR for it to work around an issue where + * resetting one port will reload the MAC on the other port. + */ + if (e1000_get_laa_state_82571(hw)) + rar_count--; + e1000_init_rx_addrs_generic(hw, rar_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data); + + /* ...for both queues. */ + switch (mac->type) { + case e1000_82573: + e1000_enable_tx_pkt_filtering_generic(hw); + /* fall through */ + case e1000_82574: + case e1000_82583: + reg_data = E1000_READ_REG(hw, E1000_GCR); + reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; + E1000_WRITE_REG(hw, E1000_GCR, reg_data); + break; + default: + reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data); + break; + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82571(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +STATIC void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_initialize_hw_bits_82571"); + + /* Transmit Descriptor Control 0 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = E1000_READ_REG(hw, E1000_TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); + break; + case e1000_82574: + case e1000_82583: + reg |= (1 << 26); + break; + default: + break; + } + E1000_WRITE_REG(hw, E1000_TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = E1000_READ_REG(hw, E1000_TARC(1)); + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg &= ~((1 << 29) | (1 << 30)); + reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); + if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + E1000_WRITE_REG(hw, E1000_TARC(1), reg); + break; + default: + break; + } + + /* Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = E1000_READ_REG(hw, E1000_CTRL); + reg &= ~(1 << 29); + E1000_WRITE_REG(hw, E1000_CTRL, reg); + break; + default: + break; + } + + /* Extended Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~(1 << 23); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + break; + default: + break; + } + + if (hw->mac.type == e1000_82571) { + reg = E1000_READ_REG(hw, E1000_PBA_ECC); + reg |= E1000_PBA_ECC_CORR_EN; + E1000_WRITE_REG(hw, E1000_PBA_ECC, reg); + } + + /* Workaround for hardware errata. + * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 + */ + if ((hw->mac.type == e1000_82571) || + (hw->mac.type == e1000_82572)) { + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + } + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type <= e1000_82573) { + reg = E1000_READ_REG(hw, E1000_RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + E1000_WRITE_REG(hw, E1000_RFCTL, reg); + } + + /* PCI-Ex Control Registers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + reg = E1000_READ_REG(hw, E1000_GCR); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_GCR, reg); + + /* Workaround for hardware errata. + * apply workaround for hardware errata documented in errata + * docs Fixes issue where some error prone or unreliable PCIe + * completions are occurring, particularly with ASPM enabled. + * Without fix, issue can cause Tx timeouts. + */ + reg = E1000_READ_REG(hw, E1000_GCR2); + reg |= 1; + E1000_WRITE_REG(hw, E1000_GCR2, reg); + break; + default: + break; + } + + return; +} + +/** + * e1000_clear_vfta_82571 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +STATIC void e1000_clear_vfta_82571(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + DEBUGFUNC("e1000_clear_vfta_82571"); + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->mng_cookie.vlan_id != 0) { + /* The VFTA is a 4096b bit-field, each identifying + * a single VLAN ID. The following operations + * determine which 32b entry (i.e. offset) into the + * array we want to set the VLAN ID (i.e. bit) of + * the manageability unit. + */ + vfta_offset = (hw->mng_cookie.vlan_id >> + E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK; + vfta_bit_in_reg = + 1 << (hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + } + break; + default: + break; + } + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of + * the manageability unit. + */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_check_mng_mode_82574 - Check manageability is enabled + * @hw: pointer to the HW structure + * + * Reads the NVM Initialization Control Word 2 and returns true + * (>0) if any manageability is enabled, else false (0). + **/ +STATIC bool e1000_check_mng_mode_82574(struct e1000_hw *hw) +{ + u16 data; + s32 ret_val; + + DEBUGFUNC("e1000_check_mng_mode_82574"); + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data); + if (ret_val) + return false; + + return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0; +} + +/** + * e1000_led_on_82574 - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +STATIC s32 e1000_led_on_82574(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + + DEBUGFUNC("e1000_led_on_82574"); + + ctrl = hw->mac.ledctl_mode2; + if (!(E1000_STATUS_LU & E1000_READ_REG(hw, E1000_STATUS))) { + /* If no link, then turn LED on by setting the invert bit + * for each LED that's "on" (0x0E) in ledctl_mode2. + */ + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8)); + } + E1000_WRITE_REG(hw, E1000_LEDCTL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_check_phy_82574 - check 82574 phy hung state + * @hw: pointer to the HW structure + * + * Returns whether phy is hung or not + **/ +bool e1000_check_phy_82574(struct e1000_hw *hw) +{ + u16 status_1kbt = 0; + u16 receive_errors = 0; + s32 ret_val; + + DEBUGFUNC("e1000_check_phy_82574"); + + /* Read PHY Receive Error counter first, if its is max - all F's then + * read the Base1000T status register If both are max then PHY is hung. + */ + ret_val = hw->phy.ops.read_reg(hw, E1000_RECEIVE_ERROR_COUNTER, + &receive_errors); + if (ret_val) + return false; + if (receive_errors == E1000_RECEIVE_ERROR_MAX) { + ret_val = hw->phy.ops.read_reg(hw, E1000_BASE1000T_STATUS, + &status_1kbt); + if (ret_val) + return false; + if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == + E1000_IDLE_ERROR_COUNT_MASK) + return true; + } + + return false; +} + + +/** + * e1000_setup_link_82571 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +STATIC s32 e1000_setup_link_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_setup_link_82571"); + + /* 82573 does not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->fc.requested_mode == e1000_fc_default) + hw->fc.requested_mode = e1000_fc_full; + break; + default: + break; + } + + return e1000_setup_link_generic(hw); +} + +/** + * e1000_setup_copper_link_82571 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +STATIC s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_setup_copper_link_82571"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + switch (hw->phy.type) { + case e1000_phy_m88: + case e1000_phy_bm: + ret_val = e1000_copper_link_setup_m88(hw); + break; + case e1000_phy_igp_2: + ret_val = e1000_copper_link_setup_igp(hw); + break; + default: + return -E1000_ERR_PHY; + break; + } + + if (ret_val) + return ret_val; + + return e1000_setup_copper_link_generic(hw); +} + +/** + * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes links. + * Upon successful setup, poll for link. + **/ +STATIC s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_setup_fiber_serdes_link_82571"); + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* If SerDes loopback mode is entered, there is no form + * of reset to take the adapter out of that mode. So we + * have to explicitly take the adapter out of loopback + * mode. This prevents drivers from twiddling their thumbs + * if another tool failed to take it out of loopback mode. + */ + E1000_WRITE_REG(hw, E1000_SCTL, + E1000_SCTL_DISABLE_SERDES_LOOPBACK); + break; + default: + break; + } + + return e1000_setup_fiber_serdes_link_generic(hw); +} + +/** + * e1000_check_for_serdes_link_82571 - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Reports the link state as up or down. + * + * If autonegotiation is supported by the link partner, the link state is + * determined by the result of autonegotiation. This is the most likely case. + * If autonegotiation is not supported by the link partner, and the link + * has a valid signal, force the link up. + * + * The link state is represented internally here by 4 states: + * + * 1) down + * 2) autoneg_progress + * 3) autoneg_complete (the link successfully autonegotiated) + * 4) forced_up (the link has been forced up, it did not autonegotiate) + * + **/ +STATIC s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + u32 txcw; + u32 i; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_check_for_serdes_link_82571"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + E1000_READ_REG(hw, E1000_RXCW); + /* SYNCH bit and IV bit are sticky */ + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { + /* Receiver is synchronized with no invalid bits. */ + switch (mac->serdes_link_state) { + case e1000_serdes_link_autoneg_complete: + if (!(status & E1000_STATUS_LU)) { + /* We have lost link, retry autoneg before + * reporting link failure + */ + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + DEBUGOUT("AN_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_forced_up: + /* If we are receiving /C/ ordered sets, re-enable + * auto-negotiation in the TXCW register and disable + * forced link in the Device Control register in an + * attempt to auto-negotiate with our link partner. + */ + if (rxcw & E1000_RXCW_C) { + /* Enable autoneg, and unforce link up */ + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, + (ctrl & ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + DEBUGOUT("FORCED_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_autoneg_progress: + if (rxcw & E1000_RXCW_C) { + /* We received /C/ ordered sets, meaning the + * link partner has autonegotiated, and we can + * trust the Link Up (LU) status bit. + */ + if (status & E1000_STATUS_LU) { + mac->serdes_link_state = + e1000_serdes_link_autoneg_complete; + DEBUGOUT("AN_PROG -> AN_UP\n"); + mac->serdes_has_link = true; + } else { + /* Autoneg completed, but failed. */ + mac->serdes_link_state = + e1000_serdes_link_down; + DEBUGOUT("AN_PROG -> DOWN\n"); + } + } else { + /* The link partner did not autoneg. + * Force link up and full duplex, and change + * state to forced. + */ + E1000_WRITE_REG(hw, E1000_TXCW, + (mac->txcw & ~E1000_TXCW_ANE)); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after link up. */ + ret_val = + e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error config flow control\n"); + break; + } + mac->serdes_link_state = + e1000_serdes_link_forced_up; + mac->serdes_has_link = true; + DEBUGOUT("AN_PROG -> FORCED_UP\n"); + } + break; + + case e1000_serdes_link_down: + default: + /* The link was down but the receiver has now gained + * valid sync, so lets see if we can bring the link + * up. + */ + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & + ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + DEBUGOUT("DOWN -> AN_PROG\n"); + break; + } + } else { + if (!(rxcw & E1000_RXCW_SYNCH)) { + mac->serdes_has_link = false; + mac->serdes_link_state = e1000_serdes_link_down; + DEBUGOUT("ANYSTATE -> DOWN\n"); + } else { + /* Check several times, if SYNCH bit and CONFIG + * bit both are consistently 1 then simply ignore + * the IV bit and restart Autoneg + */ + for (i = 0; i < AN_RETRY_COUNT; i++) { + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + if ((rxcw & E1000_RXCW_SYNCH) && + (rxcw & E1000_RXCW_C)) + continue; + + if (rxcw & E1000_RXCW_IV) { + mac->serdes_has_link = false; + mac->serdes_link_state = + e1000_serdes_link_down; + DEBUGOUT("ANYSTATE -> DOWN\n"); + break; + } + } + + if (i == AN_RETRY_COUNT) { + txcw = E1000_READ_REG(hw, E1000_TXCW); + txcw |= E1000_TXCW_ANE; + E1000_WRITE_REG(hw, E1000_TXCW, txcw); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + DEBUGOUT("ANYSTATE -> AN_PROG\n"); + } + } + } + + return ret_val; +} + +/** + * e1000_valid_led_default_82571 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +STATIC s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_82571"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (*data == ID_LED_RESERVED_F746) + *data = ID_LED_DEFAULT_82573; + break; + default: + if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_laa_state_82571 - Get locally administered address state + * @hw: pointer to the HW structure + * + * Retrieve and return the current locally administered address state. + **/ +bool e1000_get_laa_state_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_get_laa_state_82571"); + + if (hw->mac.type != e1000_82571) + return false; + + return hw->dev_spec._82571.laa_is_present; +} + +/** + * e1000_set_laa_state_82571 - Set locally administered address state + * @hw: pointer to the HW structure + * @state: enable/disable locally administered address + * + * Enable/Disable the current locally administered address state. + **/ +void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state) +{ + DEBUGFUNC("e1000_set_laa_state_82571"); + + if (hw->mac.type != e1000_82571) + return; + + hw->dev_spec._82571.laa_is_present = state; + + /* If workaround is activated... */ + if (state) + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed, the actual LAA is in one of the RARs and no + * incoming packets directed to this port are dropped. + * Eventually the LAA will be in RAR[0] and RAR[14]. + */ + hw->mac.ops.rar_set(hw, hw->mac.addr, + hw->mac.rar_entry_count - 1); + return; +} + +/** + * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum + * @hw: pointer to the HW structure + * + * Verifies that the EEPROM has completed the update. After updating the + * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If + * the checksum fix is not implemented, we need to set the bit and update + * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, + * we need to return bad checksum. + **/ +STATIC s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_fix_nvm_checksum_82571"); + + if (nvm->type != e1000_nvm_flash_hw) + return E1000_SUCCESS; + + /* Check bit 4 of word 10h. If it is 0, firmware is done updating + * 10h-12h. Checksum may need to be fixed. + */ + ret_val = nvm->ops.read(hw, 0x10, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x10)) { + /* Read 0x23 and check bit 15. This bit is a 1 + * when the checksum has already been fixed. If + * the checksum is still wrong and this bit is a + * 1, we need to return bad checksum. Otherwise, + * we need to set this bit to a 1 and update the + * checksum. + */ + ret_val = nvm->ops.read(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x8000)) { + data |= 0x8000; + ret_val = nvm->ops.write(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + ret_val = nvm->ops.update(hw); + if (ret_val) + return ret_val; + } + } + + return E1000_SUCCESS; +} + + +/** + * e1000_read_mac_addr_82571 - Read device MAC address + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_read_mac_addr_82571"); + + if (hw->mac.type == e1000_82571) { + s32 ret_val; + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + } + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_power_down_phy_copper_82571 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +STATIC void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_mac_info *mac = &hw->mac; + + if (!phy->ops.check_reset_block) + return; + + /* If the management interface is not enabled, then power down */ + if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +STATIC void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82571"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); + + E1000_READ_REG(hw, E1000_IAC); + E1000_READ_REG(hw, E1000_ICRXOC); + + E1000_READ_REG(hw, E1000_ICRXPTC); + E1000_READ_REG(hw, E1000_ICRXATC); + E1000_READ_REG(hw, E1000_ICTXPTC); + E1000_READ_REG(hw, E1000_ICTXATC); + E1000_READ_REG(hw, E1000_ICTXQEC); + E1000_READ_REG(hw, E1000_ICTXQMTC); + E1000_READ_REG(hw, E1000_ICRXDMTC); +} diff --git a/drivers/net/e1000/base/e1000_82571.h b/drivers/net/e1000/base/e1000_82571.h new file mode 100644 index 00000000..c8037b61 --- /dev/null +++ b/drivers/net/e1000/base/e1000_82571.h @@ -0,0 +1,65 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_82571_H_ +#define _E1000_82571_H_ + +#define ID_LED_RESERVED_F746 0xF746 +#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ + +/* Intr Throttling - RW */ +#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n))) + +#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAC_MASK_82574 0x01F00000 + +#define E1000_IVAR_INT_ALLOC_VALID 0x8 + +/* Manageability Operation Mode mask */ +#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 + +#define E1000_BASE1000T_STATUS 10 +#define E1000_IDLE_ERROR_COUNT_MASK 0xFF +#define E1000_RECEIVE_ERROR_COUNTER 21 +#define E1000_RECEIVE_ERROR_MAX 0xFFFF +bool e1000_check_phy_82574(struct e1000_hw *hw); +bool e1000_get_laa_state_82571(struct e1000_hw *hw); +void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state); + +#endif diff --git a/drivers/net/e1000/base/e1000_82575.c b/drivers/net/e1000/base/e1000_82575.c new file mode 100644 index 00000000..723885d7 --- /dev/null +++ b/drivers/net/e1000/base/e1000_82575.c @@ -0,0 +1,3778 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +/* + * 82575EB Gigabit Network Connection + * 82575EB Gigabit Backplane Connection + * 82575GB Gigabit Network Connection + * 82576 Gigabit Network Connection + * 82576 Quad Port Gigabit Mezzanine Adapter + * 82580 Gigabit Network Connection + * I350 Gigabit Network Connection + */ + +#include "e1000_api.h" +#include "e1000_i210.h" + +STATIC s32 e1000_init_phy_params_82575(struct e1000_hw *hw); +STATIC s32 e1000_init_mac_params_82575(struct e1000_hw *hw); +STATIC s32 e1000_acquire_phy_82575(struct e1000_hw *hw); +STATIC void e1000_release_phy_82575(struct e1000_hw *hw); +STATIC s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); +STATIC void e1000_release_nvm_82575(struct e1000_hw *hw); +STATIC s32 e1000_check_for_link_82575(struct e1000_hw *hw); +STATIC s32 e1000_check_for_link_media_swap(struct e1000_hw *hw); +STATIC s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); +STATIC s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +STATIC s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); +STATIC s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data); +STATIC s32 e1000_reset_hw_82575(struct e1000_hw *hw); +STATIC s32 e1000_reset_hw_82580(struct e1000_hw *hw); +STATIC s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, + u32 offset, u16 *data); +STATIC s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, + u32 offset, u16 data); +STATIC s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); +STATIC s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw); +STATIC s32 e1000_get_media_type_82575(struct e1000_hw *hw); +STATIC s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw); +STATIC s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data); +STATIC s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, + u32 offset, u16 data); +STATIC void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); +STATIC s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); +STATIC s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +STATIC s32 e1000_get_phy_id_82575(struct e1000_hw *hw); +STATIC void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); +STATIC bool e1000_sgmii_active_82575(struct e1000_hw *hw); +STATIC s32 e1000_reset_init_script_82575(struct e1000_hw *hw); +STATIC s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); +STATIC void e1000_config_collision_dist_82575(struct e1000_hw *hw); +STATIC void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); +STATIC void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw); +STATIC void e1000_power_up_serdes_link_82575(struct e1000_hw *hw); +STATIC s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw); +STATIC s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw); +STATIC s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw); +STATIC s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw); +STATIC s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +STATIC s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +STATIC s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw); +STATIC s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw); +STATIC void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value); +STATIC void e1000_clear_vfta_i350(struct e1000_hw *hw); + +STATIC void e1000_i2c_start(struct e1000_hw *hw); +STATIC void e1000_i2c_stop(struct e1000_hw *hw); +STATIC s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data); +STATIC s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data); +STATIC s32 e1000_get_i2c_ack(struct e1000_hw *hw); +STATIC s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data); +STATIC s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data); +STATIC void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); +STATIC void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); +STATIC s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data); +STATIC bool e1000_get_i2c_data(u32 *i2cctl); + +STATIC const u16 e1000_82580_rxpbs_table[] = { + 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; +#define E1000_82580_RXPBS_TABLE_SIZE \ + (sizeof(e1000_82580_rxpbs_table) / \ + sizeof(e1000_82580_rxpbs_table[0])) + + +/** + * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +STATIC bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw) +{ + u32 reg = 0; + bool ext_mdio = false; + + DEBUGFUNC("e1000_sgmii_uses_mdio_82575"); + + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + reg = E1000_READ_REG(hw, E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + reg = E1000_READ_REG(hw, E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: + break; + } + return ext_mdio; +} + +/** + * e1000_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_phy_params_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext; + + DEBUGFUNC("e1000_init_phy_params_82575"); + + phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic; + phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82575; + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + phy->ops.acquire = e1000_acquire_phy_82575; + phy->ops.check_reset_block = e1000_check_reset_block_generic; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.get_cfg_done = e1000_get_cfg_done_82575; + phy->ops.release = e1000_release_phy_82575; + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + + if (e1000_sgmii_active_82575(hw)) { + phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = e1000_phy_hw_reset_generic; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + e1000_reset_mdicnfg_82580(hw); + + if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; + phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; + } else { + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + phy->ops.read_reg = e1000_read_phy_reg_82580; + phy->ops.write_reg = e1000_write_phy_reg_82580; + break; + case e1000_i210: + case e1000_i211: + phy->ops.read_reg = e1000_read_phy_reg_gs40g; + phy->ops.write_reg = e1000_write_phy_reg_gs40g; + break; + default: + phy->ops.read_reg = e1000_read_phy_reg_igp; + phy->ops.write_reg = e1000_write_phy_reg_igp; + } + } + + /* Set phy->phy_addr and phy->id. */ + ret_val = e1000_get_phy_id_82575(hw); + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + if (phy->id == I347AT4_E_PHY_ID || + phy->id == M88E1112_E_PHY_ID || + phy->id == M88E1340M_E_PHY_ID) + phy->ops.get_cable_length = + e1000_get_cable_length_m88_gen2; + else if (phy->id == M88E1543_E_PHY_ID || + phy->id == M88E1512_E_PHY_ID) + phy->ops.get_cable_length = + e1000_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + /* Check if this PHY is confgured for media swap. */ + if (phy->id == M88E1112_E_PHY_ID) { + u16 data; + + ret_val = phy->ops.write_reg(hw, + E1000_M88E1112_PAGE_ADDR, + 2); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, + E1000_M88E1112_MAC_CTRL_1, + &data); + if (ret_val) + goto out; + + data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> + E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; + if (data == E1000_M88E1112_AUTO_COPPER_SGMII || + data == E1000_M88E1112_AUTO_COPPER_BASEX) + hw->mac.ops.check_for_link = + e1000_check_for_link_media_swap; + } + if (phy->id == M88E1512_E_PHY_ID) { + ret_val = e1000_initialize_M88E1512_phy(hw); + if (ret_val) + goto out; + } + if (phy->id == M88E1543_E_PHY_ID) { + ret_val = e1000_initialize_M88E1543_phy(hw); + if (ret_val) + goto out; + } + break; + case IGP03E1000_E_PHY_ID: + case IGP04E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.get_info = e1000_get_phy_info_igp; + phy->ops.get_cable_length = e1000_get_cable_length_igp_2; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + break; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = + e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + break; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_82575"); + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->word_size = 1 << size; + if (hw->mac.type < e1000_i210) { + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? + 16 : 8; + break; + } + if (nvm->word_size == (1 << 15)) + nvm->page_size = 128; + + nvm->type = e1000_nvm_eeprom_spi; + } else { + nvm->type = e1000_nvm_flash_hw; + } + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_82575; + nvm->ops.release = e1000_release_nvm_82575; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = e1000_read_nvm_eerd; + else + nvm->ops.read = e1000_read_nvm_spi; + + nvm->ops.write = e1000_write_nvm_spi; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_82575; + + /* override generic family function pointers for specific descendants */ + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = e1000_validate_nvm_checksum_82580; + nvm->ops.update = e1000_update_nvm_checksum_82580; + break; + case e1000_i350: + case e1000_i354: + nvm->ops.validate = e1000_validate_nvm_checksum_i350; + nvm->ops.update = e1000_update_nvm_checksum_i350; + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + + DEBUGFUNC("e1000_init_mac_params_82575"); + + /* Derives media type */ + e1000_get_media_type_82575(hw); + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set uta register count */ + mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + if (mac->type == e1000_82576) + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + if (mac->type == e1000_82580) + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + if (mac->type == e1000_i350 || mac->type == e1000_i354) + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + + /* Enable EEE default settings for EEE supported devices */ + if (mac->type >= e1000_i350) + dev_spec->eee_disable = false; + + /* Allow a single clear of the SW semaphore on I210 and newer */ + if (mac->type >= e1000_i210) + dev_spec->clear_semaphore_once = true; + + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = + !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK); + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; + /* reset */ + if (mac->type >= e1000_82580) + mac->ops.reset_hw = e1000_reset_hw_82580; + else + mac->ops.reset_hw = e1000_reset_hw_82575; + /* hw initialization */ + if ((mac->type == e1000_i210) || (mac->type == e1000_i211)) + mac->ops.init_hw = e1000_init_hw_i210; + else + mac->ops.init_hw = e1000_init_hw_82575; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_generic; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575; + /* physical interface shutdown */ + mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575; + /* physical interface power up */ + mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_82575; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_82575; + /* configure collision distance */ + mac->ops.config_collision_dist = e1000_config_collision_dist_82575; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) { + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_i350; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_i350; + } else { + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + } + if (hw->mac.type >= e1000_82580) + mac->ops.validate_mdi_setting = + e1000_validate_mdi_setting_crossover_generic; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* blink LED */ + mac->ops.blink_led = e1000_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_generic; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_generic; + mac->ops.led_off = e1000_led_off_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_82575; + /* acquire SW_FW sync */ + mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575; + mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575; + if (mac->type >= e1000_i210) { + mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210; + mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210; + } + + /* set lan id for port to determine which phy lock to use */ + hw->mac.ops.set_lan_id(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_82575 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82575"); + + hw->mac.ops.init_params = e1000_init_mac_params_82575; + hw->nvm.ops.init_params = e1000_init_nvm_params_82575; + hw->phy.ops.init_params = e1000_init_phy_params_82575; + hw->mbx.ops.init_params = e1000_init_mbx_params_pf; +} + +/** + * e1000_acquire_phy_82575 - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. + **/ +STATIC s32 e1000_acquire_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + DEBUGFUNC("e1000_acquire_phy_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * e1000_release_phy_82575 - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. + **/ +STATIC void e1000_release_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + DEBUGFUNC("e1000_release_phy_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the serial gigabit media independent + * interface and stores the retrieved information in data. + **/ +STATIC s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + DEBUGFUNC("e1000_read_phy_reg_sgmii_82575"); + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + DEBUGOUT1("PHY Address %u is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + **/ +STATIC s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + DEBUGFUNC("e1000_write_phy_reg_sgmii_82575"); + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_get_phy_id_82575 - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + **/ +STATIC s32 e1000_get_phy_id_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_id; + u32 ctrl_ext; + u32 mdic; + + DEBUGFUNC("e1000_get_phy_id_82575"); + + /* some i354 devices need an extra read for phy id */ + if (hw->mac.type == e1000_i354) + e1000_get_phy_id(hw); + + /* + * For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ + if (!e1000_sgmii_active_82575(hw)) { + phy->addr = 1; + ret_val = e1000_get_phy_id(hw); + goto out; + } + + if (e1000_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + mdic = E1000_READ_REG(hw, E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + mdic = E1000_READ_REG(hw, E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + ret_val = e1000_get_phy_id(hw); + goto out; + } + + /* Power on sgmii phy if it is disabled */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); + E1000_WRITE_FLUSH(hw); + msec_delay(300); + + /* + * The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { + ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); + if (ret_val == E1000_SUCCESS) { + DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", + phy_id, phy->addr); + /* + * At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { + DEBUGOUT1("PHY address %u was unreadable\n", + phy->addr); + } + } + + /* A valid PHY type couldn't be found. */ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; + } else { + ret_val = e1000_get_phy_id(hw); + } + + /* restore previous sfp cage power state */ + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + **/ +STATIC s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + struct e1000_phy_info *phy = &hw->phy; + + DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); + + /* + * This isn't a true "hard" reset, but is the only reset + * available to us at this time. + */ + + DEBUGOUT("Soft resetting SGMII attached PHY...\n"); + + if (!(hw->phy.ops.write_reg)) + goto out; + + /* + * SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + goto out; + + if (phy->id == M88E1512_E_PHY_ID) + ret_val = e1000_initialize_M88E1512_phy(hw); +out: + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +STATIC s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82575"); + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +STATIC s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82580"); + + data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; + + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } else { + data &= ~E1000_82580_PM_D0_LPLU; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } + + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_82580"); + + data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_82580_PM_D3_LPLU; + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } + + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); + return E1000_SUCCESS; +} + +/** + * e1000_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +STATIC s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_acquire_nvm_82575"); + + ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + /* + * Check if there is some access + * error this access may hook on + */ + if (hw->mac.type == e1000_i350) { + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT | + E1000_EECD_TIMEOUT)) { + /* Clear all access error flags */ + E1000_WRITE_REG(hw, E1000_EECD, eecd | + E1000_EECD_ERROR_CLR); + DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); + } + } + + if (hw->mac.type == e1000_82580) { + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & E1000_EECD_BLOCKED) { + /* Clear access error flag */ + E1000_WRITE_REG(hw, E1000_EECD, eecd | + E1000_EECD_BLOCKED); + DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); + } + } + + ret_val = e1000_acquire_nvm_generic(hw); + if (ret_val) + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + +out: + return ret_val; +} + +/** + * e1000_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +STATIC void e1000_release_nvm_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_82575"); + + e1000_release_nvm_generic(hw); + + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +STATIC s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; + + DEBUGFUNC("e1000_acquire_swfw_sync_82575"); + + while (i < timeout) { + if (e1000_get_hw_semaphore_generic(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +STATIC void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("e1000_release_swfw_sync_82575"); + + while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) + ; /* Empty */ + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); +} + +/** + * e1000_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +STATIC s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + DEBUGFUNC("e1000_get_cfg_done_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) + break; + msec_delay(1); + timeout--; + } + if (!timeout) + DEBUGOUT("MNG configuration cycle has not completed.\n"); + + /* If EEPROM is not marked present, init the PHY manually */ + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && + (hw->phy.type == e1000_phy_igp_3)) + e1000_phy_init_script_igp3(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +STATIC s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_link_up_info_82575"); + + if (hw->phy.media_type != e1000_media_type_copper) + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, + duplex); + + return ret_val; +} + +/** + * e1000_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + **/ +STATIC s32 e1000_check_for_link_82575(struct e1000_hw *hw) +{ + s32 ret_val; + u16 speed, duplex; + + DEBUGFUNC("e1000_check_for_link_82575"); + + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, + &duplex); + /* + * Use this flag to determine if link needs to be checked or + * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + } else { + ret_val = e1000_check_for_copper_link_generic(hw); + } + + return ret_val; +} + +/** + * e1000_check_for_link_media_swap - Check which M88E1112 interface linked + * @hw: pointer to the HW structure + * + * Poll the M88E1112 interfaces to see which interface achieved link. + */ +STATIC s32 e1000_check_for_link_media_swap(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + u8 port = 0; + + DEBUGFUNC("e1000_check_for_link_media_swap"); + + /* Check for copper. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_COPPER; + + /* Check for other. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_OTHER; + + /* Determine if a swap needs to happen. */ + if (port && (hw->dev_spec._82575.media_port != port)) { + hw->dev_spec._82575.media_port = port; + hw->dev_spec._82575.media_changed = true; + } + + if (port == E1000_MEDIA_PORT_COPPER) { + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + e1000_check_for_link_82575(hw); + } else { + e1000_check_for_link_82575(hw); + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +STATIC void e1000_power_up_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_power_up_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ + reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* Power up the laser */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + E1000_WRITE_FLUSH(hw); + msec_delay(1); +} + +/** + * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Using the physical coding sub-layer (PCS), retrieve the current speed and + * duplex, then store the values in the pointers provided. + **/ +STATIC s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, + u16 *speed, u16 *duplex) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 pcs; + u32 status; + + DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); + + /* + * Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ + pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); + + /* + * The link up bit determines when link is up on autoneg. + */ + if (pcs & E1000_PCS_LSTS_LINK_OK) { + mac->serdes_has_link = true; + + /* Detect and store PCS speed */ + if (pcs & E1000_PCS_LSTS_SPEED_1000) + *speed = SPEED_1000; + else if (pcs & E1000_PCS_LSTS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + /* Detect and store PCS duplex */ + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + /* Check if it is an I354 2.5Gb backplane connection. */ + if (mac->type == e1000_i354) { + status = E1000_READ_REG(hw, E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + *speed = SPEED_2500; + *duplex = FULL_DUPLEX; + DEBUGOUT("2500 Mbs, "); + DEBUGOUT("Full Duplex\n"); + } + } + + } else { + mac->serdes_has_link = false; + *speed = 0; + *duplex = 0; + } + + return E1000_SUCCESS; +} + +/** + * e1000_shutdown_serdes_link_82575 - Remove link during power down + * @hw: pointer to the HW structure + * + * In the case of serdes shut down sfp and PCS on driver unload + * when management pass thru is not enabled. + **/ +void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_shutdown_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return; + + if (!e1000_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ + reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* shutdown the laser */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + E1000_WRITE_FLUSH(hw); + msec_delay(1); + } + + return; +} + +/** + * e1000_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +STATIC s32 e1000_reset_hw_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_reset_hw_82575"); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + /* set the completion timeout for interface */ + ret_val = e1000_set_pcie_completion_timeout(hw); + if (ret_val) + DEBUGOUT("PCI-E Set completion timeout has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES)) + e1000_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + + return ret_val; +} + +/** + * e1000_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +s32 e1000_init_hw_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + DEBUGFUNC("e1000_init_hw_82575"); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address */ + e1000_init_rx_addrs_generic(hw, rar_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + DEBUGOUT("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* Set the default MTU size */ + hw->dev_spec._82575.mtu = 1500; + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82575(hw); + + return ret_val; +} + +/** + * e1000_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +STATIC s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u32 phpm_reg; + + DEBUGFUNC("e1000_setup_copper_link_82575"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Clear Go Link Disconnect bit on supported devices */ + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i210: + case e1000_i211: + phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); + break; + default: + break; + } + + ret_val = e1000_setup_serdes_link_82575(hw); + if (ret_val) + goto out; + + if (e1000_sgmii_active_82575(hw)) { + /* allow time for SFP cage time to power up phy */ + msec_delay(300); + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + goto out; + } + } + switch (hw->phy.type) { + case e1000_phy_i210: + case e1000_phy_m88: + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + ret_val = e1000_copper_link_setup_m88_gen2(hw); + break; + default: + ret_val = e1000_copper_link_setup_m88(hw); + break; + } + break; + case e1000_phy_igp_3: + ret_val = e1000_copper_link_setup_igp(hw); + break; + case e1000_phy_82580: + ret_val = e1000_copper_link_setup_82577(hw); + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + goto out; + + ret_val = e1000_setup_copper_link_generic(hw); +out: + return ret_val; +} + +/** + * e1000_setup_serdes_link_82575 - Setup link for serdes + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is + * used on copper connections where the serialized gigabit media independent + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + **/ +STATIC s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) +{ + u32 ctrl_ext, ctrl_reg, reg, anadv_reg; + bool pcs_autoneg; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_setup_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return ret_val; + + /* + * On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + + /* power on the sfp cage if present */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); + ctrl_reg |= E1000_CTRL_SLU; + + /* set both sw defined pins on 82575/82576*/ + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; + + reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; + + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* sgmii mode lets the phy handle forcing speed/duplex */ + pcs_autoneg = true; + /* autoneg time out should be disabled for SGMII mode */ + reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = false; + /* fall through to default case */ + default: + if (hw->mac.type == e1000_82575 || + hw->mac.type == e1000_82576) { + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) + pcs_autoneg = false; + } + + /* + * non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | + E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; + } + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); + + /* + * New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + if (pcs_autoneg) { + /* Set PCS register for autoneg */ + reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + + /* Disable force flow control for autoneg */ + reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; + + /* Configure flow control advertisement for autoneg */ + anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); + anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); + + switch (hw->fc.requested_mode) { + case e1000_fc_full: + case e1000_fc_rx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + anadv_reg |= E1000_TXCW_PAUSE; + break; + case e1000_fc_tx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + break; + default: + break; + } + + E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg); + + DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + } else { + /* Set PCS register for forced link */ + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + + /* Force flow control for forced link */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + + DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + + E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); + + if (!pcs_autoneg && !e1000_sgmii_active_82575(hw)) + e1000_force_mac_fc_generic(hw); + + return ret_val; +} + +/** + * e1000_get_media_type_82575 - derives current media type. + * @hw: pointer to the HW structure + * + * The media type is chosen reflecting few settings. + * The following are taken into account: + * - link mode set in the current port Init Control Word #3 + * - current link mode settings in CSR register + * - MDIO vs. I2C PHY control interface chosen + * - SFP module media type + **/ +STATIC s32 e1000_get_media_type_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext = 0; + u32 link_mode = 0; + + /* Set internal phy as default */ + dev_spec->sgmii_active = false; + dev_spec->module_plugged = false; + + /* Get CSR setting */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + + /* extract link mode setting */ + link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + + switch (link_mode) { + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + case E1000_CTRL_EXT_LINK_MODE_GMII: + hw->phy.media_type = e1000_media_type_copper; + break; + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* Get phy control interface type set (MDIO vs. I2C)*/ + if (e1000_sgmii_uses_mdio_82575(hw)) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + break; + } + /* fall through for I2C based SGMII */ + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* read media type from SFP EEPROM */ + ret_val = e1000_set_sfp_media_type_82575(hw); + if ((ret_val != E1000_SUCCESS) || + (hw->phy.media_type == e1000_media_type_unknown)) { + /* + * If media type was not identified then return media + * type defined by the CTRL_EXT settings. + */ + hw->phy.media_type = e1000_media_type_internal_serdes; + + if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + } + + break; + } + + /* do not change link mode for 100BaseFX */ + if (dev_spec->eth_flags.e100_base_fx) + break; + + /* change current link mode setting */ + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + + if (hw->phy.media_type == e1000_media_type_copper) + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; + else + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + break; + } + + return ret_val; +} + +/** + * e1000_set_sfp_media_type_82575 - derives SFP module media type. + * @hw: pointer to the HW structure + * + * The media type is chosen based on SFP module. + * compatibility flags retrieved from SFP ID EEPROM. + **/ +STATIC s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_ERR_CONFIG; + u32 ctrl_ext = 0; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags; + u8 tranceiver_type = 0; + s32 timeout = 3; + + /* Turn I2C interface ON and power on sfp cage */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); + + E1000_WRITE_FLUSH(hw); + + /* Read SFP module data */ + while (timeout) { + ret_val = e1000_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), + &tranceiver_type); + if (ret_val == E1000_SUCCESS) + break; + msec_delay(100); + timeout--; + } + if (ret_val != E1000_SUCCESS) + goto out; + + ret_val = e1000_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), + (u8 *)eth_flags); + if (ret_val != E1000_SUCCESS) + goto out; + + /* Check if there is some SFP module plugged and powered */ + if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || + (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { + dev_spec->module_plugged = true; + if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e100_base_fx) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e1000_base_t) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_copper; + } else { + hw->phy.media_type = e1000_media_type_unknown; + DEBUGOUT("PHY module has not been recognized\n"); + goto out; + } + } else { + hw->phy.media_type = e1000_media_type_unknown; + } + ret_val = E1000_SUCCESS; +out: + /* Restore I2C interface setting */ + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + return ret_val; +} + +/** + * e1000_valid_led_default_82575 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +STATIC s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_82575"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_82575_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT; + break; + } + } +out: + return ret_val; +} + +/** + * e1000_sgmii_active_82575 - Return sgmii state + * @hw: pointer to the HW structure + * + * 82575 silicon has a serialized gigabit media independent interface (sgmii) + * which can be enabled for use in the embedded applications. Simply + * return the current state of the sgmii interface. + **/ +STATIC bool e1000_sgmii_active_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + return dev_spec->sgmii_active; +} + +/** + * e1000_reset_init_script_82575 - Inits HW defaults after reset + * @hw: pointer to the HW structure + * + * Inits recommended HW defaults after a reset when there is no EEPROM + * detected. This is only for the 82575. + **/ +STATIC s32 e1000_reset_init_script_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_reset_init_script_82575"); + + if (hw->mac.type == e1000_82575) { + DEBUGOUT("Running reset init script for 82575\n"); + /* SerDes configuration via SERDESCTRL */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15); + + /* CCM configuration via CCMCTL register */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00); + + /* PCIe lanes configuration */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81); + + /* PCIe PLL Configuration */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00); + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr_82575 - Read device MAC address + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_read_mac_addr_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_mac_addr_82575"); + + /* + * If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_mac_addr_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_config_collision_dist_82575 - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +STATIC void e1000_config_collision_dist_82575(struct e1000_hw *hw) +{ + u32 tctl_ext; + + DEBUGFUNC("e1000_config_collision_dist_82575"); + + tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT); + + tctl_ext &= ~E1000_TCTL_EXT_COLD; + tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT; + + E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +STATIC void e1000_power_down_phy_copper_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + + if (!(phy->ops.check_reset_block)) + return; + + /* If the management interface is not enabled, then power down */ + if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +STATIC void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82575"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); + + E1000_READ_REG(hw, E1000_IAC); + E1000_READ_REG(hw, E1000_ICRXOC); + + E1000_READ_REG(hw, E1000_ICRXPTC); + E1000_READ_REG(hw, E1000_ICRXATC); + E1000_READ_REG(hw, E1000_ICTXPTC); + E1000_READ_REG(hw, E1000_ICTXATC); + E1000_READ_REG(hw, E1000_ICTXQEC); + E1000_READ_REG(hw, E1000_ICTXQMTC); + E1000_READ_REG(hw, E1000_ICRXDMTC); + + E1000_READ_REG(hw, E1000_CBTMPC); + E1000_READ_REG(hw, E1000_HTDPMC); + E1000_READ_REG(hw, E1000_CBRMPC); + E1000_READ_REG(hw, E1000_RPTHC); + E1000_READ_REG(hw, E1000_HGPTC); + E1000_READ_REG(hw, E1000_HTCBDPC); + E1000_READ_REG(hw, E1000_HGORCL); + E1000_READ_REG(hw, E1000_HGORCH); + E1000_READ_REG(hw, E1000_HGOTCL); + E1000_READ_REG(hw, E1000_HGOTCH); + E1000_READ_REG(hw, E1000_LENERRS); + + /* This register should not be read in copper configurations */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) || + e1000_sgmii_active_82575(hw)) + E1000_READ_REG(hw, E1000_SCVPC); +} + +/** + * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * + * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + **/ +void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + DEBUGFUNC("e1000_rx_fifo_flush_82575"); + + /* disable IPv6 options as per hardware errata */ + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + rfctl |= E1000_RFCTL_IPV6_EX_DIS; + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + + if (hw->mac.type != e1000_82575 || + !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); + E1000_WRITE_REG(hw, E1000_RXDCTL(i), + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + msec_delay(1); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i)); + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + DEBUGOUT("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + + rlpml = E1000_READ_REG(hw, E1000_RLPML); + E1000_WRITE_REG(hw, E1000_RLPML, 0); + + rctl = E1000_READ_REG(hw, E1000_RCTL); + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); + temp_rctl |= E1000_RCTL_LPE; + + E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl); + E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN); + E1000_WRITE_FLUSH(hw); + msec_delay(2); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + + E1000_WRITE_REG(hw, E1000_RLPML, rlpml); + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + E1000_READ_REG(hw, E1000_ROC); + E1000_READ_REG(hw, E1000_RNBC); + E1000_READ_REG(hw, E1000_MPC); +} + +/** + * e1000_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 200ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +STATIC s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw) +{ + u32 gcr = E1000_READ_REG(hw, E1000_GCR); + s32 ret_val = E1000_SUCCESS; + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & E1000_GCR_CMPL_TMOUT_MASK) + goto out; + + /* + * if capababilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & E1000_GCR_CAP_VER2)) { + gcr |= E1000_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* + * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + + ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; + + E1000_WRITE_REG(hw, E1000_GCR, gcr); + return ret_val; +} + +/** + * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) +{ + u32 reg_val, reg_offset; + + switch (hw->mac.type) { + case e1000_82576: + reg_offset = E1000_DTXSWC; + break; + case e1000_i350: + case e1000_i354: + reg_offset = E1000_TXSWC; + break; + default: + return; + } + + reg_val = E1000_READ_REG(hw, reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + } else { + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + E1000_WRITE_REG(hw, reg_offset, reg_val); +} + +/** + * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables L2 switch loopback functionality. + **/ +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) +{ + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: + dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); + break; + case e1000_i350: + case e1000_i354: + dtxswc = E1000_READ_REG(hw, E1000_TXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); + break; + default: + /* Currently no other hardware supports loopback */ + break; + } + + +} + +/** + * e1000_vmdq_set_replication_pf - enable or disable vmdq replication + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables replication of packets across multiple pools. + **/ +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) +{ + u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); + + if (enable) + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; + else + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; + + E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); +} + +/** + * e1000_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +STATIC s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_phy_reg_82580"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +STATIC s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_phy_reg_82580"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +STATIC s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 mdicnfg; + u16 nvm_data = 0; + + DEBUGFUNC("e1000_reset_mdicnfg_82580"); + + if (hw->mac.type != e1000_82580) + goto out; + if (!e1000_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; + E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); +out: + return ret_val; +} + +/** + * e1000_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +STATIC s32 e1000_reset_hw_82580(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + + DEBUGFUNC("e1000_reset_hw_82580"); + + hw->dev_spec._82575.global_device_reset = false; + + /* 82580 does not reliably do global_device_reset due to hw errata */ + if (hw->mac.type == e1000_82580) + global_device_reset = false; + + /* Get current control state. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + /* Determine whether or not a global dev reset is requested */ + if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw, + swmbsw_mask)) + global_device_reset = false; + + if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + switch (hw->device_id) { + case E1000_DEV_ID_DH89XXCC_SGMII: + break; + default: + E1000_WRITE_FLUSH(hw); + break; + } + + /* Add delay to insure DEV_RST or RST has time to complete */ + msec_delay(5); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* clear global device reset status bit */ + E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + ret_val = e1000_reset_mdicnfg_82580(hw); + if (ret_val) + DEBUGOUT("Could not reset MDICNFG based on EEPROM\n"); + + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + + /* Release semaphore */ + if (global_device_reset) + hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); + + return ret_val; +} + +/** + * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. + * This function converts the retrieved value into the correct table value + * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +u16 e1000_rxpbs_adjust_82580(u32 data) +{ + u16 ret_val = 0; + + if (data < E1000_82580_RXPBS_TABLE_SIZE) + ret_val = e1000_82580_rxpbs_table[data]; + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val = E1000_SUCCESS; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_validate_nvm_checksum_with_offset"); + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + DEBUGOUT("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum_with_offset"); + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + DEBUGOUT("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +STATIC s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + DEBUGFUNC("e1000_validate_nvm_checksum_82580"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if chekcsums compatibility bit is set validate checksums + * for all 4 ports. */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +STATIC s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + DEBUGFUNC("e1000_update_nvm_checksum_82580"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n"); + goto out; + } + + if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +STATIC s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 j; + u16 nvm_offset; + + DEBUGFUNC("e1000_validate_nvm_checksum_i350"); + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +STATIC s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 j; + u16 nvm_offset; + + DEBUGFUNC("e1000_update_nvm_checksum_i350"); + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * __e1000_access_emi_reg - Read/write EMI register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + **/ +STATIC s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val; + + DEBUGFUNC("__e1000_access_emi_reg"); + + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); + + return ret_val; +} + +/** + * e1000_read_emi_reg - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + **/ +s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) +{ + DEBUGFUNC("e1000_read_emi_reg"); + + return __e1000_access_emi_reg(hw, addr, data, true); +} + +/** + * e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvell 1512 to work correctly with Avoton. + **/ +s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_initialize_M88E1512_phy"); + + /* Check if this is correct PHY. */ + if (phy->id != M88E1512_E_PHY_ID) + goto out; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + msec_delay(1000); +out: + return ret_val; +} + +/** + * e1000_initialize_M88E1543_phy - Initialize M88E1543 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvell 1543 to work correctly with Avoton. + **/ +s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_initialize_M88E1543_phy"); + + /* Check if this is correct PHY. */ + if (phy->id != M88E1543_E_PHY_ID) + goto out; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0xC00D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Switch to PHY page 1. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1); + if (ret_val) + goto out; + + /* Change mode to 1000BASE-X/SGMII and autoneg enable; reset */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + msec_delay(1000); +out: + return ret_val; +} + +/** + * e1000_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1g: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + u32 ipcnfg, eeer; + + DEBUGFUNC("e1000_set_eee_i350"); + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + goto out; + ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG); + eeer = E1000_READ_REG(hw, E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU); + + if (adv100M) + ipcnfg |= E1000_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= E1000_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; + + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & E1000_EEE_SU_LPI_CLK_STP) + DEBUGOUT("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg); + E1000_WRITE_REG(hw, E1000_EEER, eeer); + E1000_READ_REG(hw, E1000_IPCNFG); + E1000_READ_REG(hw, E1000_EEER); +out: + + return E1000_SUCCESS; +} + +/** + * e1000_set_eee_i354 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1g: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE legacy mode based on setting in dev_spec structure. + * + **/ +s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_data; + + DEBUGFUNC("e1000_set_eee_i354"); + + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + if (!hw->dev_spec._82575.eee_disable) { + /* Switch to PHY page 18. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, + &phy_data); + if (ret_val) + goto out; + + phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, + phy_data); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + /* Turn on EEE advertisement. */ + ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + if (adv100M) + phy_data |= E1000_EEE_ADV_100_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; + + if (adv1G) + phy_data |= E1000_EEE_ADV_1000_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; + + ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } else { + /* Turn off EEE advertisement. */ + ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | + E1000_EEE_ADV_1000_SUPPORTED); + ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } + +out: + return ret_val; +} + +/** + * e1000_get_eee_status_i354 - Get EEE status + * @hw: pointer to the HW structure + * @status: EEE status + * + * Get EEE status by guessing based on whether Tx or Rx LPI indications have + * been received. + **/ +s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_data; + + DEBUGFUNC("e1000_get_eee_status_i354"); + + /* Check if EEE is supported on this device. */ + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, + E1000_PCS_STATUS_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | + E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; + +out: + return ret_val; +} + +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ + +/** + * e1000_clear_vfta_i350 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_i350(struct e1000_hw *hw) +{ + u32 offset; + int i; + + DEBUGFUNC("e1000_clear_vfta_350"); + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + for (i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + int i; + + DEBUGFUNC("e1000_write_vfta_350"); + + for (i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + + E1000_WRITE_FLUSH(hw); +} + + +/** + * e1000_set_i2c_bb - Enable I2C bit-bang + * @hw: pointer to the HW structure + * + * Enable I2C bit-bang interface + * + **/ +s32 e1000_set_i2c_bb(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext, i2cparams; + + DEBUGFUNC("e1000_set_i2c_bb"); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_I2C_ENA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS); + i2cparams |= E1000_I2CBB_EN; + i2cparams |= E1000_I2C_DATA_OE_N; + i2cparams |= E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams); + E1000_WRITE_FLUSH(hw); + + return ret_val; +} + +/** + * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + **/ +s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + s32 status = E1000_SUCCESS; + u32 max_retry = 10; + u32 retry = 1; + u16 swfw_mask = 0; + + bool nack = true; + + DEBUGFUNC("e1000_read_i2c_byte_generic"); + + swfw_mask = E1000_SWFW_PHY0_SM; + + do { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) + != E1000_SUCCESS) { + status = E1000_ERR_SWFW_SYNC; + goto read_byte_out; + } + + e1000_i2c_start(hw); + + /* Device Address and write indication */ + status = e1000_clock_out_i2c_byte(hw, dev_addr); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, byte_offset); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_start(hw); + + /* Device Address and read indication */ + status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_in_i2c_byte(hw, data); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_bit(hw, nack); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_stop(hw); + break; + +fail: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(100); + e1000_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte read error - Retrying.\n"); + else + DEBUGOUT("I2C byte read error.\n"); + + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +read_byte_out: + + return status; +} + +/** + * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + **/ +s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + s32 status = E1000_SUCCESS; + u32 max_retry = 1; + u32 retry = 0; + u16 swfw_mask = 0; + + DEBUGFUNC("e1000_write_i2c_byte_generic"); + + swfw_mask = E1000_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) { + status = E1000_ERR_SWFW_SYNC; + goto write_byte_out; + } + + do { + e1000_i2c_start(hw); + + status = e1000_clock_out_i2c_byte(hw, dev_addr); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, byte_offset); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, data); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_stop(hw); + break; + +fail: + e1000_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte write error - Retrying.\n"); + else + DEBUGOUT("I2C byte write error.\n"); + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +write_byte_out: + + return status; +} + +/** + * e1000_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + **/ +STATIC void e1000_i2c_start(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_i2c_start"); + + /* Start condition must begin with data and clock high */ + e1000_set_i2c_data(hw, &i2cctl, 1); + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + usec_delay(E1000_I2C_T_SU_STA); + + e1000_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + usec_delay(E1000_I2C_T_HD_STA); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + +} + +/** + * e1000_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + **/ +STATIC void e1000_i2c_stop(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_i2c_stop"); + + /* Stop condition must begin with data low and clock high */ + e1000_set_i2c_data(hw, &i2cctl, 0); + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + usec_delay(E1000_I2C_T_SU_STO); + + e1000_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + usec_delay(E1000_I2C_T_BUF); +} + +/** + * e1000_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +STATIC s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data) +{ + s32 i; + bool bit = 0; + + DEBUGFUNC("e1000_clock_in_i2c_byte"); + + *data = 0; + for (i = 7; i >= 0; i--) { + e1000_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } + + return E1000_SUCCESS; +} + +/** + * e1000_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +STATIC s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data) +{ + s32 status = E1000_SUCCESS; + s32 i; + u32 i2cctl; + bool bit = 0; + + DEBUGFUNC("e1000_clock_out_i2c_byte"); + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = e1000_clock_out_i2c_bit(hw, bit); + + if (status != E1000_SUCCESS) + break; + } + + /* Release SDA line (set high) */ + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + i2cctl |= E1000_I2C_DATA_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); + E1000_WRITE_FLUSH(hw); + + return status; +} + +/** + * e1000_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +STATIC s32 e1000_get_i2c_ack(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + u32 i = 0; + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + u32 timeout = 10; + bool ack = true; + + DEBUGFUNC("e1000_get_i2c_ack"); + + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + /* Wait until SCL returns high */ + for (i = 0; i < timeout; i++) { + usec_delay(1); + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + if (i2cctl & E1000_I2C_CLK_IN) + break; + } + if (!(i2cctl & E1000_I2C_CLK_IN)) + return E1000_ERR_I2C; + + ack = e1000_get_i2c_data(&i2cctl); + if (ack) { + DEBUGOUT("I2C ack was not received.\n"); + status = E1000_ERR_I2C; + } + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + + return status; +} + +/** + * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +STATIC s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_clock_in_i2c_bit"); + + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + *data = e1000_get_i2c_data(&i2cctl); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + + return E1000_SUCCESS; +} + +/** + * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +STATIC s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_clock_out_i2c_bit"); + + status = e1000_set_i2c_data(hw, &i2cctl, data); + if (status == E1000_SUCCESS) { + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + usec_delay(E1000_I2C_T_LOW); + } else { + status = E1000_ERR_I2C; + DEBUGOUT1("I2C data was not set to %X\n", data); + } + + return status; +} +/** + * e1000_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + **/ +STATIC void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) +{ + DEBUGFUNC("e1000_raise_i2c_clk"); + + *i2cctl |= E1000_I2C_CLK_OUT; + *i2cctl &= ~E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* SCL rise time (1000ns) */ + usec_delay(E1000_I2C_T_RISE); +} + +/** + * e1000_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + **/ +STATIC void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) +{ + + DEBUGFUNC("e1000_lower_i2c_clk"); + + *i2cctl &= ~E1000_I2C_CLK_OUT; + *i2cctl &= ~E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + usec_delay(E1000_I2C_T_FALL); +} + +/** + * e1000_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +STATIC s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data) +{ + s32 status = E1000_SUCCESS; + + DEBUGFUNC("e1000_set_i2c_data"); + + if (data) + *i2cctl |= E1000_I2C_DATA_OUT; + else + *i2cctl &= ~E1000_I2C_DATA_OUT; + + *i2cctl &= ~E1000_I2C_DATA_OE_N; + *i2cctl |= E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA); + + *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + if (data != e1000_get_i2c_data(i2cctl)) { + status = E1000_ERR_I2C; + DEBUGOUT1("Error - I2C data was not set to %X.\n", data); + } + + return status; +} + +/** + * e1000_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + **/ +STATIC bool e1000_get_i2c_data(u32 *i2cctl) +{ + bool data; + + DEBUGFUNC("e1000_get_i2c_data"); + + if (*i2cctl & E1000_I2C_DATA_IN) + data = 1; + else + data = 0; + + return data; +} + +/** + * e1000_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +void e1000_i2c_bus_clear(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + u32 i; + + DEBUGFUNC("e1000_i2c_bus_clear"); + + e1000_i2c_start(hw); + + e1000_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + usec_delay(E1000_I2C_T_LOW); + } + + e1000_i2c_start(hw); + + /* Put the i2c bus back to default state */ + e1000_i2c_stop(hw); +} + diff --git a/drivers/net/e1000/base/e1000_82575.h b/drivers/net/e1000/base/e1000_82575.h new file mode 100644 index 00000000..c4986841 --- /dev/null +++ b/drivers/net/e1000/base/e1000_82575.h @@ -0,0 +1,521 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_82575_H_ +#define _E1000_82575_H_ + +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) +/* + * Receive Address Register Count + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * These entries are also used for MAC-based filtering. + */ +/* + * For 82576, there are an additional set of RARs that begin at an offset + * separate from the first set of RARs. + */ +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 + +#ifdef E1000_BIT_FIELDS +struct e1000_adv_data_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u32 datalen:16; /* Data buffer length */ + u32 rsvd:4; + u32 dtyp:4; /* Descriptor type */ + u32 dcmd:8; /* Descriptor command */ + } config; + } lower; + union { + u32 data; + struct { + u32 status:4; /* Descriptor status */ + u32 idx:4; + u32 popts:6; /* Packet Options */ + u32 paylen:18; /* Payload length */ + } options; + } upper; +}; + +#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ +#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ +#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ +#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ +#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ +#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ +#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */ +#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADV_DCMD_RS 0x8 /* Report Status */ +#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ +#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ +/* Extended Device Control */ +#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ + +struct e1000_adv_context_desc { + union { + u32 ip_config; + struct { + u32 iplen:9; + u32 maclen:7; + u32 vlan_tag:16; + } fields; + } ip_setup; + u32 seq_num; + union { + u64 l4_config; + struct { + u32 mkrloc:9; + u32 tucmd:11; + u32 dtyp:4; + u32 adv:8; + u32 rsvd:4; + u32 idx:4; + u32 l4len:8; + u32 mss:16; + } fields; + } l4_setup; +}; +#endif + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +#define E1000_TX_HEAD_WB_ENABLE 0x1 +#define E1000_TX_SEQNUM_WB_ENABLE 0x2 + +#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 +#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002 + +#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8 +#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \ + E1000_VMRCTL_MIRROR_PORT_SHIFT) +#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0) +#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1) +#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2) + +#define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) + +#define E1000_EICR_RX_QUEUE ( \ + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + +#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE +#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE + +#define EIMS_ENABLE_MASK ( \ + E1000_EIMS_RX_QUEUE | \ + E1000_EIMS_TX_QUEUE | \ + E1000_EIMS_TCP_TIMER | \ + E1000_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F +#define E1000_RXDADV_RSSTYPE_SHIFT 12 +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_SPLITHEADER_EN 0x00001000 +#define E1000_RXDADV_SPH 0x8000 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ +#define E1000_RXDADV_ERR_HBO 0x00800000 + +/* RSS Hash results */ +#define E1000_RXDADV_RSSTYPE_NONE 0x00000000 +#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor */ +#define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0 +#define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00 +#define E1000_RXDADV_PKTTYPE_NONE 0x00000000 +#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ + +#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* LinkSec results */ +/* Security Processing bit Indication */ +#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */ +#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */ +#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +/* 1st & Last TSO-full iSCSI PDU*/ +#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 +#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 +/* Req requires Markers and CRC */ +#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF +/* Adv ctxt IPSec ESP len mask */ +#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF + +/* Additional Transmit Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */ +/* Tx Queue Arbitration Priority 0=low, 1=high */ +#define E1000_TXDCTL_PRIORITY 0x08000000 + +/* Additional Receive Descriptor Control definitions */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */ +#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */ + +/* Additional interrupt register bit definitions */ +#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */ +#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ +#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ + +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE (1 << 26) +#define E1000_ETQF_IMM_INT (1 << 29) +#define E1000_ETQF_1588 (1 << 30) +#define E1000_ETQF_QUEUE_ENABLE (1 << 31) +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + */ +#define E1000_ETQF_FILTER_EAPOL 0 + +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000 +#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 7 + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_LLE_SHIFT 16 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ + +/* Easy defines for setting default pool, would normally be left a zero */ +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + +/* Other useful VMD_CTL register defines */ +#define E1000_VT_CTL_IGNORE_MAC (1 << 28) +#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) +#define E1000_VT_CTL_VM_REPL_EN (1 << 30) + +/* Per VM Offload register setup */ +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */ +#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */ +#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */ +#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_TCTL_EXT_COLD 0x000FFC00 +#define E1000_TCTL_EXT_COLD_SHIFT 10 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + +#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) + +#define ALL_QUEUES 0xFFFF + +/* Rx packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable); +void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf); +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable); +s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); +s32 e1000_init_hw_82575(struct e1000_hw *hw); + +enum e1000_promisc_type { + e1000_promisc_disabled = 0, /* all promisc modes disabled */ + e1000_promisc_unicast = 1, /* unicast promiscuous enabled */ + e1000_promisc_multicast = 2, /* multicast promiscuous enabled */ + e1000_promisc_enabled = 3, /* both uni and multicast promisc */ + e1000_num_promisc_types +}; + +void e1000_vfta_set_vf(struct e1000_hw *, u16, bool); +void e1000_rlpml_set_vf(struct e1000_hw *, u16); +s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type); +u16 e1000_rxpbs_adjust_82580(u32 data); +s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data); +s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M); +s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M); +s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *); +s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw); +s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw); + +/* I2C SDA and SCL timing parameters for standard mode */ +#define E1000_I2C_T_HD_STA 4 +#define E1000_I2C_T_LOW 5 +#define E1000_I2C_T_HIGH 4 +#define E1000_I2C_T_SU_STA 5 +#define E1000_I2C_T_HD_DATA 5 +#define E1000_I2C_T_SU_DATA 1 +#define E1000_I2C_T_RISE 1 +#define E1000_I2C_T_FALL 1 +#define E1000_I2C_T_SU_STO 4 +#define E1000_I2C_T_BUF 5 + +s32 e1000_set_i2c_bb(struct e1000_hw *hw); +s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +void e1000_i2c_bus_clear(struct e1000_hw *hw); +#endif /* _E1000_82575_H_ */ diff --git a/drivers/net/e1000/base/e1000_api.c b/drivers/net/e1000/base/e1000_api.c new file mode 100644 index 00000000..bbfcae88 --- /dev/null +++ b/drivers/net/e1000/base/e1000_api.c @@ -0,0 +1,1363 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "e1000_api.h" + +/** + * e1000_init_mac_params - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the MAC + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_mac_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->mac.ops.init_params) { + ret_val = hw->mac.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("MAC Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mac.init_mac_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the NVM + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_nvm_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->nvm.ops.init_params) { + ret_val = hw->nvm.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("NVM Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("nvm.init_nvm_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_phy_params - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_phy_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->phy.ops.init_params) { + ret_val = hw->phy.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("PHY Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("phy.init_phy_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_mbx_params - Initialize mailbox function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_mbx_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->mbx.ops.init_params) { + ret_val = hw->mbx.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("Mailbox Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mbx.init_mbx_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * device ID stored in the hw structure. + * MUST BE FIRST FUNCTION CALLED (explicitly or through + * e1000_setup_init_funcs()). + **/ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_set_mac_type"); + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + mac->type = e1000_82542; + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + mac->type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + mac->type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + mac->type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + mac->type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + mac->type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + mac->type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + mac->type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + mac->type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + mac->type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + mac->type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + mac->type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_82571EB_COPPER: + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: + case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571PT_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: + mac->type = e1000_82571; + break; + case E1000_DEV_ID_82572EI: + case E1000_DEV_ID_82572EI_COPPER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82572EI_SERDES: + mac->type = e1000_82572; + break; + case E1000_DEV_ID_82573E: + case E1000_DEV_ID_82573E_IAMT: + case E1000_DEV_ID_82573L: + mac->type = e1000_82573; + break; + case E1000_DEV_ID_82574L: + case E1000_DEV_ID_82574LA: + mac->type = e1000_82574; + break; + case E1000_DEV_ID_82583V: + mac->type = e1000_82583; + break; + case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + case E1000_DEV_ID_80003ES2LAN_COPPER_SPT: + case E1000_DEV_ID_80003ES2LAN_SERDES_SPT: + mac->type = e1000_80003es2lan; + break; + case E1000_DEV_ID_ICH8_IFE: + case E1000_DEV_ID_ICH8_IFE_GT: + case E1000_DEV_ID_ICH8_IFE_G: + case E1000_DEV_ID_ICH8_IGP_M: + case E1000_DEV_ID_ICH8_IGP_M_AMT: + case E1000_DEV_ID_ICH8_IGP_AMT: + case E1000_DEV_ID_ICH8_IGP_C: + case E1000_DEV_ID_ICH8_82567V_3: + mac->type = e1000_ich8lan; + break; + case E1000_DEV_ID_ICH9_IFE: + case E1000_DEV_ID_ICH9_IFE_GT: + case E1000_DEV_ID_ICH9_IFE_G: + case E1000_DEV_ID_ICH9_IGP_M: + case E1000_DEV_ID_ICH9_IGP_M_AMT: + case E1000_DEV_ID_ICH9_IGP_M_V: + case E1000_DEV_ID_ICH9_IGP_AMT: + case E1000_DEV_ID_ICH9_BM: + case E1000_DEV_ID_ICH9_IGP_C: + case E1000_DEV_ID_ICH10_R_BM_LM: + case E1000_DEV_ID_ICH10_R_BM_LF: + case E1000_DEV_ID_ICH10_R_BM_V: + mac->type = e1000_ich9lan; + break; + case E1000_DEV_ID_ICH10_D_BM_LM: + case E1000_DEV_ID_ICH10_D_BM_LF: + case E1000_DEV_ID_ICH10_D_BM_V: + mac->type = e1000_ich10lan; + break; + case E1000_DEV_ID_PCH_D_HV_DM: + case E1000_DEV_ID_PCH_D_HV_DC: + case E1000_DEV_ID_PCH_M_HV_LM: + case E1000_DEV_ID_PCH_M_HV_LC: + mac->type = e1000_pchlan; + break; + case E1000_DEV_ID_PCH2_LV_LM: + case E1000_DEV_ID_PCH2_LV_V: + mac->type = e1000_pch2lan; + break; + case E1000_DEV_ID_PCH_LPT_I217_LM: + case E1000_DEV_ID_PCH_LPT_I217_V: + case E1000_DEV_ID_PCH_LPTLP_I218_LM: + case E1000_DEV_ID_PCH_LPTLP_I218_V: + case E1000_DEV_ID_PCH_I218_LM2: + case E1000_DEV_ID_PCH_I218_V2: + case E1000_DEV_ID_PCH_I218_LM3: + case E1000_DEV_ID_PCH_I218_V3: + mac->type = e1000_pch_lpt; + break; + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: + case E1000_DEV_ID_82576_SERDES_QUAD: + mac->type = e1000_82576; + break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + mac->type = e1000_82580; + break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + case E1000_DEV_ID_I350_DA4: + mac->type = e1000_i350; + break; + case E1000_DEV_ID_I210_COPPER_FLASHLESS: + case E1000_DEV_ID_I210_SERDES_FLASHLESS: + case E1000_DEV_ID_I210_COPPER: + case E1000_DEV_ID_I210_COPPER_OEM1: + case E1000_DEV_ID_I210_COPPER_IT: + case E1000_DEV_ID_I210_FIBER: + case E1000_DEV_ID_I210_SERDES: + case E1000_DEV_ID_I210_SGMII: + mac->type = e1000_i210; + break; + case E1000_DEV_ID_I211_COPPER: + mac->type = e1000_i211; + break; + case E1000_DEV_ID_82576_VF: + case E1000_DEV_ID_82576_VF_HV: + mac->type = e1000_vfadapt; + break; + case E1000_DEV_ID_I350_VF: + case E1000_DEV_ID_I350_VF_HV: + mac->type = e1000_vfadapt_i350; + break; + + case E1000_DEV_ID_I354_BACKPLANE_1GBPS: + case E1000_DEV_ID_I354_SGMII: + case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: + mac->type = e1000_i354; + break; + default: + /* Should never have loaded on this device */ + ret_val = -E1000_ERR_MAC_INIT; + break; + } + + return ret_val; +} + +/** + * e1000_setup_init_funcs - Initializes function pointers + * @hw: pointer to the HW structure + * @init_device: true will initialize the rest of the function pointers + * getting the device ready for use. false will only set + * MAC type and the function pointers for the other init + * functions. Passing false will not generate any hardware + * reads or writes. + * + * This function must be called by a driver in order to use the rest + * of the 'shared' code files. Called by drivers only. + **/ +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device) +{ + s32 ret_val; + + /* Can't do much good without knowing the MAC type. */ + ret_val = e1000_set_mac_type(hw); + if (ret_val) { + DEBUGOUT("ERROR: MAC type could not be set properly.\n"); + goto out; + } + + if (!hw->hw_addr) { + DEBUGOUT("ERROR: Registers not mapped\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Init function pointers to generic implementations. We do this first + * allowing a driver module to override it afterward. + */ + e1000_init_mac_ops_generic(hw); + e1000_init_phy_ops_generic(hw); + e1000_init_nvm_ops_generic(hw); + e1000_init_mbx_ops_generic(hw); + + /* + * Set up the init function pointers. These are functions within the + * adapter family file that sets up function pointers for the rest of + * the functions in that family. + */ + switch (hw->mac.type) { + case e1000_82542: + e1000_init_function_pointers_82542(hw); + break; + case e1000_82543: + case e1000_82544: + e1000_init_function_pointers_82543(hw); + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + e1000_init_function_pointers_82540(hw); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + e1000_init_function_pointers_82541(hw); + break; + case e1000_82571: + case e1000_82572: + case e1000_82573: + case e1000_82574: + case e1000_82583: + e1000_init_function_pointers_82571(hw); + break; + case e1000_80003es2lan: + e1000_init_function_pointers_80003es2lan(hw); + break; + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + e1000_init_function_pointers_ich8lan(hw); + break; + case e1000_82575: + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + e1000_init_function_pointers_82575(hw); + break; + case e1000_i210: + case e1000_i211: + e1000_init_function_pointers_i210(hw); + break; + case e1000_vfadapt: + e1000_init_function_pointers_vf(hw); + break; + case e1000_vfadapt_i350: + e1000_init_function_pointers_vf(hw); + break; + default: + DEBUGOUT("Hardware not supported\n"); + ret_val = -E1000_ERR_CONFIG; + break; + } + + /* + * Initialize the rest of the function pointers. These require some + * register reads/writes in some cases. + */ + if (!(ret_val) && init_device) { + ret_val = e1000_init_mac_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_nvm_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_phy_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_mbx_params(hw); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_get_bus_info - Obtain bus information for adapter + * @hw: pointer to the HW structure + * + * This will obtain information about the HW bus for which the + * adapter is attached and stores it in the hw structure. This is a + * function pointer entry point called by drivers. + **/ +s32 e1000_get_bus_info(struct e1000_hw *hw) +{ + if (hw->mac.ops.get_bus_info) + return hw->mac.ops.get_bus_info(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * This clears the VLAN filter table on the adapter. This is a function + * pointer entry point called by drivers. + **/ +void e1000_clear_vfta(struct e1000_hw *hw) +{ + if (hw->mac.ops.clear_vfta) + hw->mac.ops.clear_vfta(hw); +} + +/** + * e1000_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: the 32-bit offset in which to write the value to. + * @value: the 32-bit value to write at location offset. + * + * This writes a 32-bit value to a 32-bit offset in the VLAN filter + * table. This is a function pointer entry point called by drivers. + **/ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + if (hw->mac.ops.write_vfta) + hw->mac.ops.write_vfta(hw, offset, value); +} + +/** + * e1000_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates the Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count) +{ + if (hw->mac.ops.update_mc_addr_list) + hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, + mc_addr_count); +} + +/** + * e1000_force_mac_fc - Force MAC flow control + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Currently no func pointer exists + * and all implementations are handled in the generic version of this + * function. + **/ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + return e1000_force_mac_fc_generic(hw); +} + +/** + * e1000_check_for_link - Check/Store link connection + * @hw: pointer to the HW structure + * + * This checks the link condition of the adapter and stores the + * results in the hw->mac structure. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.check_for_link) + return hw->mac.ops.check_for_link(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_check_mng_mode - Check management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has manageability enabled. + * This is a function pointer entry point called by drivers. + **/ +bool e1000_check_mng_mode(struct e1000_hw *hw) +{ + if (hw->mac.ops.check_mng_mode) + return hw->mac.ops.check_mng_mode(hw); + + return false; +} + +/** + * e1000_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + return e1000_mng_write_dhcp_info_generic(hw, buffer, length); +} + +/** + * e1000_reset_hw - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + if (hw->mac.ops.reset_hw) + return hw->mac.ops.reset_hw(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_init_hw - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + if (hw->mac.ops.init_hw) + return hw->mac.ops.init_hw(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_setup_link - Configures link and flow control + * @hw: pointer to the HW structure + * + * This configures link and flow control settings for the adapter. This + * is a function pointer entry point called by drivers. While modules can + * also call this, they probably call their own version of this function. + **/ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.setup_link) + return hw->mac.ops.setup_link(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_get_speed_and_duplex - Returns current speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to a 16-bit value to store the speed + * @duplex: pointer to a 16-bit value to store the duplex. + * + * This returns the speed and duplex of the adapter in the two 'out' + * variables passed in. This is a function pointer entry point called + * by drivers. + **/ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + if (hw->mac.ops.get_link_up_info) + return hw->mac.ops.get_link_up_info(hw, speed, duplex); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_setup_led - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.setup_led) + return hw->mac.ops.setup_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores SW controllable LED + * @hw: pointer to the HW structure + * + * This restores the SW controllable LED to the value saved off by + * e1000_setup_led. This is a function pointer entry point called by drivers. + **/ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.cleanup_led) + return hw->mac.ops.cleanup_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_blink_led - Blink SW controllable LED + * @hw: pointer to the HW structure + * + * This starts the adapter LED blinking. Request the LED to be setup first + * and cleaned up after. This is a function pointer entry point called by + * drivers. + **/ +s32 e1000_blink_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.blink_led) + return hw->mac.ops.blink_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_id_led_init - store LED configurations in SW + * @hw: pointer to the HW structure + * + * Initializes the LED config in SW. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_id_led_init(struct e1000_hw *hw) +{ + if (hw->mac.ops.id_led_init) + return hw->mac.ops.id_led_init(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_led_on(struct e1000_hw *hw) +{ + if (hw->mac.ops.led_on) + return hw->mac.ops.led_on(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_led_off(struct e1000_hw *hw) +{ + if (hw->mac.ops.led_off) + return hw->mac.ops.led_off(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_reset_adaptive - Reset adaptive IFS + * @hw: pointer to the HW structure + * + * Resets the adaptive IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + e1000_reset_adaptive_generic(hw); +} + +/** + * e1000_update_adaptive - Update adaptive IFS + * @hw: pointer to the HW structure + * + * Updates adapter IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + e1000_update_adaptive_generic(hw); +} + +/** + * e1000_disable_pcie_master - Disable PCI-Express master access + * @hw: pointer to the HW structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. Currently no func pointer exists and all implementations are + * handled in the generic version of this function. + **/ +s32 e1000_disable_pcie_master(struct e1000_hw *hw) +{ + return e1000_disable_pcie_master_generic(hw); +} + +/** + * e1000_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + if (hw->mac.ops.config_collision_dist) + hw->mac.ops.config_collision_dist(hw); +} + +/** + * e1000_rar_set - Sets a receive address register + * @hw: pointer to the HW structure + * @addr: address to set the RAR to + * @index: the RAR to set + * + * Sets a Receive Address Register (RAR) to the specified address. + **/ +int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + if (hw->mac.ops.rar_set) + return hw->mac.ops.rar_set(hw, addr, index); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state + * @hw: pointer to the HW structure + * + * Ensures that the MDI/MDIX SW state is valid. + **/ +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (hw->mac.ops.validate_mdi_setting) + return hw->mac.ops.validate_mdi_setting(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr - Determines address location in multicast table + * @hw: pointer to the HW structure + * @mc_addr: Multicast address to hash. + * + * This hashes an address to determine its location in the multicast + * table. Currently no func pointer exists and all implementations + * are handled in the generic version of this function. + **/ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + return e1000_hash_mc_addr_generic(hw, mc_addr); +} + +/** + * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + return e1000_enable_tx_pkt_filtering_generic(hw); +} + +/** + * e1000_mng_host_if_write - Writes to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum) +{ + return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum); +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + return e1000_mng_write_cmd_header_generic(hw, hdr); +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + return e1000_mng_enable_host_if_generic(hw); +} + +/** + * e1000_check_reset_block - Verifies PHY can be reset + * @hw: pointer to the HW structure + * + * Checks if the PHY is in a state that can be reset or if manageability + * has it tied up. This is a function pointer entry point called by drivers. + **/ +s32 e1000_check_reset_block(struct e1000_hw *hw) +{ + if (hw->phy.ops.check_reset_block) + return hw->phy.ops.check_reset_block(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg - Reads PHY register + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the buffer to store the 16-bit read. + * + * Reads the PHY register and returns the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - Writes PHY register + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->phy.ops.write_reg) + return hw->phy.ops.write_reg(hw, offset, data); + + return E1000_SUCCESS; +} + +/** + * e1000_release_phy - Generic release PHY + * @hw: pointer to the HW structure + * + * Return if silicon family does not require a semaphore when accessing the + * PHY. + **/ +void e1000_release_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.release) + hw->phy.ops.release(hw); +} + +/** + * e1000_acquire_phy - Generic acquire PHY + * @hw: pointer to the HW structure + * + * Return success if silicon family does not require a semaphore when + * accessing the PHY. + **/ +s32 e1000_acquire_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.acquire) + return hw->phy.ops.acquire(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_cfg_on_link_up - Configure PHY upon link up + * @hw: pointer to the HW structure + **/ +s32 e1000_cfg_on_link_up(struct e1000_hw *hw) +{ + if (hw->phy.ops.cfg_on_link_up) + return hw->phy.ops.cfg_on_link_up(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_kmrn_reg - Reads register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the location to store the 16-bit value read. + * + * Reads a register out of the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return e1000_read_kmrn_reg_generic(hw, offset, data); +} + +/** + * e1000_write_kmrn_reg - Writes register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes a register to the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return e1000_write_kmrn_reg_generic(hw, offset, data); +} + +/** + * e1000_get_cable_length - Retrieves cable length estimation + * @hw: pointer to the HW structure + * + * This function estimates the cable length and stores them in + * hw->phy.min_length and hw->phy.max_length. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_get_cable_length(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_cable_length) + return hw->phy.ops.get_cable_length(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_info - Retrieves PHY information from registers + * @hw: pointer to the HW structure + * + * This function gets some information from various PHY registers and + * populates hw->phy values with it. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_info) + return hw->phy.ops.get_info(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - Hard PHY reset + * @hw: pointer to the HW structure + * + * Performs a hard PHY reset. This is a function pointer entry point called + * by drivers. + **/ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_commit - Soft PHY reset + * @hw: pointer to the HW structure + * + * Performs a soft PHY reset on those that apply. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_phy_commit(struct e1000_hw *hw) +{ + if (hw->phy.ops.commit) + return hw->phy.ops.commit(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_set_d0_lplu_state - Sets low power link up state for D0 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D0 + * and SmartSpeed is disabled when active is true, else clear lplu for D0 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d0_lplu_state) + return hw->phy.ops.set_d0_lplu_state(hw, active); + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d3_lplu_state) + return hw->phy.ops.set_d3_lplu_state(hw, active); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - Reads MAC address + * @hw: pointer to the HW structure + * + * Reads the MAC address out of the adapter and stores it in the HW structure. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + if (hw->mac.ops.read_mac_addr) + return hw->mac.ops.read_mac_addr(hw); + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_read_pba_string - Read device part number string + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size) +{ + return e1000_read_pba_string_generic(hw, pba_num, pba_num_size); +} + +/** + * e1000_read_pba_length - Read device part number string length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size) +{ + return e1000_read_pba_length_generic(hw, pba_num_size); +} + +/** + * e1000_read_pba_num - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num) +{ + return e1000_read_pba_num_generic(hw, pba_num); +} + +/** + * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Validates the NVM checksum is correct. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->nvm.ops.validate) + return hw->nvm.ops.validate(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Updates the NVM checksum. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +s32 e1000_update_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->nvm.ops.update) + return hw->nvm.ops.update(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_reload_nvm - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000_reload_nvm(struct e1000_hw *hw) +{ + if (hw->nvm.ops.reload) + hw->nvm.ops.reload(hw); +} + +/** + * e1000_read_nvm - Reads NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to read + * @data: pointer to the properly sized buffer for the data. + * + * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->nvm.ops.read) + return hw->nvm.ops.read(hw, offset, words, data); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_write_nvm - Writes to NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to write + * @data: pointer to the properly sized buffer for the data. + * + * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->nvm.ops.write) + return hw->nvm.ops.write(hw, offset, words, data); + + return E1000_SUCCESS; +} + +/** + * e1000_write_8bit_ctrl_reg - Writes 8bit Control register + * @hw: pointer to the HW structure + * @reg: 32bit register offset + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, + u8 data) +{ + return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data); +} + +/** + * e1000_power_up_phy - Restores link in case of PHY power down + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void e1000_power_up_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.power_up) + hw->phy.ops.power_up(hw); + + e1000_setup_link(hw); +} + +/** + * e1000_power_down_phy - Power down PHY + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void e1000_power_down_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.power_down) + hw->phy.ops.power_down(hw); +} + +/** + * e1000_power_up_fiber_serdes_link - Power up serdes link + * @hw: pointer to the HW structure + * + * Power on the optics and PCS. + **/ +void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.power_up_serdes) + hw->mac.ops.power_up_serdes(hw); +} + +/** + * e1000_shutdown_fiber_serdes_link - Remove link during power down + * @hw: pointer to the HW structure + * + * Shutdown the optics and PCS on driver unload. + **/ +void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.shutdown_serdes) + hw->mac.ops.shutdown_serdes(hw); +} + diff --git a/drivers/net/e1000/base/e1000_api.h b/drivers/net/e1000/base/e1000_api.h new file mode 100644 index 00000000..0bc471d9 --- /dev/null +++ b/drivers/net/e1000/base/e1000_api.h @@ -0,0 +1,167 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_API_H_ +#define _E1000_API_H_ + +#include "e1000_hw.h" + +extern void e1000_init_function_pointers_82542(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82543(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82540(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82571(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82541(struct e1000_hw *hw); +extern void e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw); +extern void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); +extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw); +extern void e1000_init_function_pointers_vf(struct e1000_hw *hw); +extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw); +extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw); +extern void e1000_init_function_pointers_i210(struct e1000_hw *hw); + +s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr); +s32 e1000_set_mac_type(struct e1000_hw *hw); +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device); +s32 e1000_init_mac_params(struct e1000_hw *hw); +s32 e1000_init_nvm_params(struct e1000_hw *hw); +s32 e1000_init_phy_params(struct e1000_hw *hw); +s32 e1000_init_mbx_params(struct e1000_hw *hw); +s32 e1000_get_bus_info(struct e1000_hw *hw); +void e1000_clear_vfta(struct e1000_hw *hw); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +s32 e1000_force_mac_fc(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex); +s32 e1000_disable_pcie_master(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count); +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_check_reset_block(struct e1000_hw *hw); +s32 e1000_blink_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_id_led_init(struct e1000_hw *hw); +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +s32 e1000_get_cable_length(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, + u8 data); +s32 e1000_get_phy_info(struct e1000_hw *hw); +void e1000_release_phy(struct e1000_hw *hw); +s32 e1000_acquire_phy(struct e1000_hw *hw); +s32 e1000_cfg_on_link_up(struct e1000_hw *hw); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_commit(struct e1000_hw *hw); +void e1000_power_up_phy(struct e1000_hw *hw); +void e1000_power_down_phy(struct e1000_hw *hw); +s32 e1000_read_mac_addr(struct e1000_hw *hw); +s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num); +s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size); +s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size); +void e1000_reload_nvm(struct e1000_hw *hw); +s32 e1000_update_nvm_checksum(struct e1000_hw *hw); +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw); +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); +bool e1000_check_mng_mode(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if(struct e1000_hw *hw); +s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); +u32 e1000_translate_register_82542(u32 reg); + + + +/* + * TBI_ACCEPT macro definition: + * + * This macro requires: + * a = a pointer to struct e1000_hw + * status = the 8 bit status field of the Rx descriptor with EOP set + * errors = the 8 bit error field of the Rx descriptor with EOP set + * length = the sum of all the length fields of the Rx descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * min_frame_size = the minimum frame length we want to accept. + * max_frame_size = the maximum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +#define TBI_ACCEPT(a, status, errors, length, last_byte, \ + min_frame_size, max_frame_size) \ + (e1000_tbi_sbp_enabled_82543(a) && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((min_frame_size) - VLAN_TAG_SIZE)) && \ + ((length) <= ((max_frame_size) + 1))) : \ + (((length) > (min_frame_size)) && \ + ((length) <= ((max_frame_size) + VLAN_TAG_SIZE + 1))))) + +#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b)) +#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */ +#endif /* _E1000_API_H_ */ diff --git a/drivers/net/e1000/base/e1000_defines.h b/drivers/net/e1000/base/e1000_defines.h new file mode 100644 index 00000000..69aa1f23 --- /dev/null +++ b/drivers/net/e1000/base/e1000_defines.h @@ -0,0 +1,1504 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC E1000_WUFC_LNKC +#define E1000_WUS_MAG E1000_WUFC_MAG +#define E1000_WUS_EX E1000_WUFC_EX +#define E1000_WUS_MC E1000_WUFC_MC +#define E1000_WUS_BC E1000_WUFC_BC + +/* Extended Device Control */ +#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */ +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */ +/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ +#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +/* Physical Func Reset Done Indication */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +/* Offset of the link mode field in Ctrl Ext register */ +#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_LSECCK 0x00001000 +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#if !defined(EXTERNAL_RELEASE) || defined(E1000E_MQ) +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#endif /* !EXTERNAL_RELEASE || E1000E_MQ */ +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +/* Enable MNG packets to host memory */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 + +#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ +#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ +#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ +#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define E1000_RCTL_RDMTS_HEX 0x00010000 +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x01 +#define E1000_SWFW_PHY0_SM 0x02 +#define E1000_SWFW_PHY1_SM 0x04 +#define E1000_SWFW_CSR_SM 0x08 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 +#define E1000_SWFW_SW_MNG_SM 0x400 + +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ +#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ +#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_CONNSW_PHYSD 0x400 +#define E1000_CONNSW_PHY_PDN 0x800 +#define E1000_CONNSW_SERDESD 0x200 +#define E1000_CONNSW_AUTOSENSE_CONF 0x2 +#define E1000_CONNSW_AUTOSENSE_EN 0x1 +#define E1000_PCS_CFG_PCS_EN 8 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_10 0 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ +#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus spd 50-66MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus spd 66-100MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus spd 100-133MHz*/ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define PHY_FORCE_TIME 20 + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_PHY_LED0_MODE_MASK 0x00000007 +#define E1000_PHY_LED0_IVRT 0x00000008 +#define E1000_PHY_LED0_MASK 0x0000001F + +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable Tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Transmit Arbitration Count */ +#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 +#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 +#define E1000_RFCTL_LEF 0x00040000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +#define ETHERNET_FCS_SIZE 4 +#define MAX_JUMBO_FRAME_SIZE 0x3F00 +#define E1000_TX_PTR_GAP 0x1F + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +/* Low Power IDLE Control */ +#define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */ + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB */ +#define E1000_PBA_10K 0x000A /* 10KB */ +#define E1000_PBA_12K 0x000C /* 12KB */ +#define E1000_PBA_14K 0x000E /* 14KB */ +#define E1000_PBA_16K 0x0010 /* 16KB */ +#define E1000_PBA_18K 0x0012 +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_26K 0x001A +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_35K 0x0023 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB */ +#define E1000_PBA_64K 0x0040 /* 64KB */ + +#define E1000_PBA_RXA_MASK 0xFFFF + +#define E1000_PBS_16K E1000_PBA_16K + +/* Uncorrectable/correctable ECC Error counts and enable bits */ +#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF +#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 +#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 +#define E1000_PBECCSTS_ECC_ENABLE 0x00010000 + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* Rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ +#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ +#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ +#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ +#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ +#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ +#define E1000_ICR_FER 0x00400000 /* Fatal Error */ + +#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/ +#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */ + +/* PBA ECC Register */ +#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ +#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */ +#define E1000_PBA_ECC_CORR_EN 0x00000001 /* Enable ECC error correction */ +#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ +#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 on ECC error */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ +#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */ +#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ +#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ +#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ +#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ +#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ +#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ +#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */ +#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */ +#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */ + +#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/ +#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */ +/* Extended Interrupt Mask Set */ +#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ + +/* Extended Interrupt Cause Set */ +#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +#define E1000_EITR_ITR_INT_MASK 0x0000FFFF +/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define E1000_EITR_INTERVAL 0x00007FFC + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +/* Enable the counting of descriptors still to be processed. */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 +#define E1000_RAH_QUEUE_MASK_82575 0x000C0000 +#define E1000_RAH_POOL_1 0x00040000 + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_MBX 15 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 +#define E1000_ERR_I2C 19 +#define E1000_ERR_INVM_VALUE_NOT_FOUND 20 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ + +#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000 +#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000 + +#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000 +#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000 + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 +#define E1000_TIMINCA_INCPERIOD_SHIFT 24 +#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF + +#define E1000_TSICR_TXTS 0x00000002 +#define E1000_TSIM_TXTS 0x00000002 +/* TUPLE Filtering Configuration */ +#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */ +#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */ +#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */ +/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */ +#define E1000_TTQF_PROTOCOL_TCP 0x0 +/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ +#define E1000_TTQF_PROTOCOL_UDP 0x1 +/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ +#define E1000_TTQF_PROTOCOL_SCTP 0x2 +#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */ +#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */ +#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */ +#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */ +#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + +#define E1000_MEDIA_PORT_COPPER 1 +#define E1000_MEDIA_PORT_OTHER 2 +#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2 +#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3 +#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ +#define E1000_M88E1112_MAC_CTRL_1 0x10 +#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ +#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 +#define E1000_M88E1112_PAGE_ADDR 0x16 +#define E1000_M88E1112_STATUS 0x01 + +#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */ +#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */ +#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */ + +/* I350 EEE defines */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +/* EEE status */ +#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */ +#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */ +#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +#define E1000_M88E1543_EEE_CTRL_1 0x0 +#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_M88E1543_FIBER_CTRL 0x0 /* Fiber Control Register */ +#define E1000_EEE_ADV_DEV_I354 7 +#define E1000_EEE_ADV_ADDR_I354 60 +#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ +#define E1000_PCS_STATUS_DEV_I354 3 +#define E1000_PCS_STATUS_ADDR_I354 1 +#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 +#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 +#define E1000_M88E1512_CFG_REG_1 0x0010 +#define E1000_M88E1512_CFG_REG_2 0x0011 +#define E1000_M88E1512_CFG_REG_3 0x0007 +#define E1000_M88E1512_MODE 0x0014 +#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ +#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ +/* PCI Express Control */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* mPHY address control and data registers */ +#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */ +#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +#define E1000_MPHY_DATA 0x0E10 /* Data Register */ + +/* AFE CSR Offset for PCS CLK */ +#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 +/* Override for near end digital loopback. */ +#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +/* 1=Repeater/switch device port 0=DTE device */ +#define CR_1000T_REPEATER_DTE 0x0400 +/* 1=Configure PHY as Master 0=Configure PHY as Slave */ +#define CR_1000T_MS_VALUE 0x0800 +/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ +#define CR_1000T_MS_ENABLE 0x1000 +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ + +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */ +#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */ +#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */ +#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_NVM_GRANT_ATTEMPTS +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */ +#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ +#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 + +#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ +/* Secure FLASH mode requires removing MSb */ +#define E1000_I210_FW_PTR_MASK 0x7FFF +/* Firmware code revision field word offset*/ +#define E1000_I210_FW_VER_OFFSET 328 + +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define E1000_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_VERSION 0x0005 +#define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */ +#define NVM_PHY_CLASS_WORD 0x0007 +#define E1000_I210_NVM_FW_MODULE_PTR 0x0010 +#define E1000_I350_NVM_FW_MODULE_PTR 0x0051 +#define NVM_FUTURE_INIT_WORD1 0x0019 +#define NVM_ETRACK_WORD 0x0042 +#define NVM_ETRACK_HIWORD 0x0043 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* NVM version defines */ +#define NVM_MAJOR_MASK 0xF000 +#define NVM_MINOR_MASK 0x0FF0 +#define NVM_IMAGE_ID_MASK 0x000F +#define NVM_COMB_VER_MASK 0x00FF +#define NVM_MAJOR_SHIFT 12 +#define NVM_MINOR_SHIFT 4 +#define NVM_COMB_VER_SHFT 8 +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETRACK_SHIFT 16 +#define NVM_ETRACK_VALID 0x8000 +#define NVM_NEW_DEC_MASK 0x0F00 +#define NVM_HEX_CONV 16 +#define NVM_HEX_TENS 10 + +/* FW version defines */ +/* Offset of "Loader patch ptr" in Firmware Header */ +#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01 +/* Patch generation hour & minutes */ +#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04 +/* Patch generation month & day */ +#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05 +/* Patch generation year */ +#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06 +/* Patch major & minor numbers */ +#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07 + +#define NVM_MAC_ADDR 0x0000 +#define NVM_SUB_DEV_ID 0x000B +#define NVM_SUB_VEN_ID 0x000C +#define NVM_DEV_ID 0x000D +#define NVM_VEN_ID 0x000E +#define NVM_INIT_CTRL_2 0x000F +#define NVM_INIT_CTRL_4 0x0013 +#define NVM_LED_1_CFG 0x001C +#define NVM_LED_0_2_CFG 0x001F + +#define NVM_COMPAT_VALID_CSUM 0x0001 +#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 + +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 + +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0) + +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */ +/* Offset of Link Mode bits for 82575/82576 */ +#define NVM_WORD24_LNK_MODE_OFFSET 8 +/* Offset of Link Mode bits for 82580 up */ +#define NVM_WORD24_82580_LNK_MODE_OFFSET 4 + + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 +#define NVM_WORD0F_SWPDIO_EXT_MASK 0x00F0 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 + +/* length of string needed to store PBA number */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +/* PBA (printed board assembly) number words */ +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_PHY_CLASS_A 0x8000 +#define NVM_SERDES_AMPLITUDE_MASK 0x000F +#define NVM_SIZE_MASK 0x1C00 +#define NVM_SIZE_SHIFT 10 +#define NVM_WORD_SIZE_BASE_SHIFT 6 +#define NVM_SWDPIO_EXT_SHIFT 4 + +/* NVM Commands - Microwire */ +#define NVM_READ_OPCODE_MICROWIRE 0x6 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_MICROWIRE 0x5 /* NVM write opcode */ +#define NVM_ERASE_OPCODE_MICROWIRE 0x7 /* NVM erase opcode */ +#define NVM_EWEN_OPCODE_MICROWIRE 0x13 /* NVM erase/write enable */ +#define NVM_EWDS_OPCODE_MICROWIRE 0x10 /* NVM erase/write disable */ + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 +#define PCIE_DEVICE_CONTROL2 0x28 + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 +#define PCIX_STATUS_LO_FUNC_MASK 0x7 +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 +#define PCIE_LINK_SPEED_MASK 0x0F +#define PCIE_LINK_SPEED_2500 0x01 +#define PCIE_LINK_SPEED_5000 0x02 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. + * I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1543_E_PHY_ID 0x01410EA0 +#define M88E1512_E_PHY_ID 0x01410DD0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define M88E1340M_E_PHY_ID 0x01410DF0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define BME1000_E_PHY_ID 0x01410CB0 +#define BME1000_E_PHY_ID_R2 0x01410CB1 +#define I82577_E_PHY_ID 0x01540050 +#define I82578_E_PHY_ID 0x004DD040 +#define I82579_E_PHY_ID 0x01540090 +#define I217_E_PHY_ID 0x015400A0 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define I210_I_PHY_ID 0x01410C00 +#define IGP04E1000_E_PHY_ID 0x02A80391 +#define M88_VENDOR 0x0141 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */ +/* MDI Crossover Mode bits 6:5 Manual MDI configuration */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* Intel I347AT4 Registers */ +#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* I347AT4 Extended PHY Specific Control Register */ + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* I347AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* M88E1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020 +#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C + +/* BME1000 PHY Specific Control Register */ +#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */ +#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */ +#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */ + +/* MAC Specific Control Register */ +#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21) + +#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +/* Kumeran Mode Control */ +#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16) +#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */ + +/* MDI Control */ +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 + +/* SerDes Control */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +/* LinkSec register fields */ +#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000 +#define E1000_LSECTXCAP_SUM_SHIFT 16 +#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000 +#define E1000_LSECRXCAP_SUM_SHIFT 16 + +#define E1000_LSECTXCTRL_EN_MASK 0x00000003 +#define E1000_LSECTXCTRL_DISABLE 0x0 +#define E1000_LSECTXCTRL_AUTH 0x1 +#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define E1000_LSECTXCTRL_AISCI 0x00000020 +#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define E1000_LSECRXCTRL_EN_MASK 0x0000000C +#define E1000_LSECRXCTRL_EN_SHIFT 2 +#define E1000_LSECRXCTRL_DISABLE 0x0 +#define E1000_LSECRXCTRL_CHECK 0x1 +#define E1000_LSECRXCTRL_STRICT 0x2 +#define E1000_LSECRXCTRL_DROP 0x3 +#define E1000_LSECRXCTRL_PLSH 0x00000040 +#define E1000_LSECRXCTRL_RP 0x00000080 +#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +/* Tx Rate-Scheduler Config fields */ +#define E1000_RTTBCNRC_RS_ENA 0x80000000 +#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define E1000_RTTBCNRC_RF_INT_SHIFT 14 +#define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + +/* DMA Coalescing register fields */ +/* DMA Coalescing Watchdog Timer */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF +/* DMA Coalescing Rx Threshold */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 +#define E1000_DMACR_DMACTHR_SHIFT 16 +/* Lx when no PCIe transactions */ +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +/* DMA Coalescing BMC-to-OS Watchdog Enable */ +#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 + +/* DMA Coalescing Transmit Threshold */ +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +/* Rx Traffic Rate Threshold */ +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF +/* Rx packet rate in current window */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 + +/* DMA Coal Rx Traffic Current Count */ +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF + +/* Flow ctrl Rx Threshold High val */ +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +/* Lx power decision based on DMA coal */ +#define E1000_PCIEMISC_LX_DECISION 0x00000080 + +#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ +#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */ +#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */ +#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ + + +/* Proxy Filter Control */ +#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */ +#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */ +#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */ +#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */ +#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */ +#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */ +#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */ +#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ +#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */ +/* Proxy Status */ +#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */ + +/* Firmware Status */ +#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */ +/* VF Control */ +#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */ + +#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */ +/* Lan ID bit field offset in status register */ +#define E1000_STATUS_LAN_ID_OFFSET 2 +#define E1000_VFTA_ENTRIES 128 +#ifndef E1000_UNUSEDARG +#define E1000_UNUSEDARG +#endif /* E1000_UNUSEDARG */ +#ifndef ERROR_REPORT +#define ERROR_REPORT(fmt) do { } while (0) +#endif /* ERROR_REPORT */ +#endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/e1000/base/e1000_hw.h b/drivers/net/e1000/base/e1000_hw.h new file mode 100644 index 00000000..e4e4f764 --- /dev/null +++ b/drivers/net/e1000/base/e1000_hw.h @@ -0,0 +1,1030 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 +#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA +#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 +#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 +#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F +#define E1000_DEV_ID_82572EI 0x10B9 +#define E1000_DEV_ID_82573E 0x108B +#define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A +#define E1000_DEV_ID_82574L 0x10D3 +#define E1000_DEV_ID_82574LA 0x10F6 +#define E1000_DEV_ID_82583V 0x150C +#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 +#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 +#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA +#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB +#define E1000_DEV_ID_ICH8_82567V_3 0x1501 +#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 +#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A +#define E1000_DEV_ID_ICH8_IGP_C 0x104B +#define E1000_DEV_ID_ICH8_IFE 0x104C +#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 +#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 +#define E1000_DEV_ID_ICH8_IGP_M 0x104D +#define E1000_DEV_ID_ICH9_IGP_M 0x10BF +#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 +#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB +#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD +#define E1000_DEV_ID_ICH9_BM 0x10E5 +#define E1000_DEV_ID_ICH9_IGP_C 0x294C +#define E1000_DEV_ID_ICH9_IFE 0x10C0 +#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 +#define E1000_DEV_ID_ICH9_IFE_G 0x10C2 +#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC +#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD +#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE +#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE +#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF +#define E1000_DEV_ID_ICH10_D_BM_V 0x1525 +#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA +#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB +#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF +#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 +#define E1000_DEV_ID_PCH2_LV_LM 0x1502 +#define E1000_DEV_ID_PCH2_LV_V 0x1503 +#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A +#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B +#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A +#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 +#define E1000_DEV_ID_PCH_I218_LM2 0x15A0 +#define E1000_DEV_ID_PCH_I218_V2 0x15A1 +#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_DEV_ID_82576_VF_HV 0x152D +#define E1000_DEV_ID_I350_VF 0x1520 +#define E1000_DEV_ID_I350_VF_HV 0x152F +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I350_DA4 0x1546 +#define E1000_DEV_ID_I210_COPPER 0x1533 +#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534 +#define E1000_DEV_ID_I210_COPPER_IT 0x1535 +#define E1000_DEV_ID_I210_FIBER 0x1536 +#define E1000_DEV_ID_I210_SERDES 0x1537 +#define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C +#define E1000_DEV_ID_I211_COPPER 0x1539 +#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 +#define E1000_DEV_ID_I354_SGMII 0x1F41 +#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 + +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82542, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_82571, + e1000_82572, + e1000_82573, + e1000_82574, + e1000_82583, + e1000_80003es2lan, + e1000_ich8lan, + e1000_ich9lan, + e1000_ich10lan, + e1000_pchlan, + e1000_pch2lan, + e1000_pch_lpt, + e1000_82575, + e1000_82576, + e1000_82580, + e1000_i350, + e1000_i354, + e1000_i210, + e1000_i211, + e1000_vfadapt, + e1000_vfadapt_i350, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_eeprom_microwire, + e1000_nvm_flash_hw, + e1000_nvm_invm, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, + e1000_nvm_override_microwire_small, + e1000_nvm_override_microwire_large +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_bm, + e1000_phy_82578, + e1000_phy_82577, + e1000_phy_82579, + e1000_phy_i217, + e1000_phy_82580, + e1000_phy_vf, + e1000_phy_i210, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +enum e1000_ffe_config { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +}; + +enum e1000_dsp_config { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress, + e1000_serdes_link_autoneg_complete, + e1000_serdes_link_forced_up +}; + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + /* length of buffers 1-3 */ + __le16 length[PS_PAGE_BUFFERS]; + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_vf_stats { + u64 base_gprc; + u64 base_gptc; + u64 base_gorc; + u64 base_gotc; + u64 base_mprc; + u64 base_gotlbc; + u64 base_gptlbc; + u64 base_gorlbc; + u64 base_gprlbc; + + u32 last_gprc; + u32 last_gptc; + u32 last_gorc; + u32 last_gotc; + u32 last_mprc; + u32 last_gotlbc; + u32 last_gptlbc; + u32 last_gorlbc; + u32 last_gprlbc; + + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 mprc; + u64 gotlbc; + u64 gptlbc; + u64 gorlbc; + u64 gprlbc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "e1000_mac.h" +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_manage.h" +#include "e1000_mbx.h" + +/* Function pointers for the MAC. */ +struct e1000_mac_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*id_led_init)(struct e1000_hw *); + s32 (*blink_led)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + void (*set_lan_id)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + void (*shutdown_serdes)(struct e1000_hw *); + void (*power_up_serdes)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + s32 (*setup_led)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*config_collision_dist)(struct e1000_hw *); + int (*rar_set)(struct e1000_hw *, u8*, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*validate_mdi_setting)(struct e1000_hw *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); +}; + +/* When to use various PHY register access functions: + * + * Func Caller + * Function Does Does When to use + * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * X_reg L,P,A n/a for simple PHY reg accesses + * X_reg_locked P,A L for multiple accesses of different regs + * on different pages + * X_reg_page A L,P for multiple accesses of different regs + * on the same page + * + * Where X=[read|write], L=locking, P=sets page, A=register access + * + */ +struct e1000_phy_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*acquire)(struct e1000_hw *); + s32 (*cfg_on_link_up)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_info)(struct e1000_hw *); + s32 (*set_page)(struct e1000_hw *, u16); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw *, u32, u16); + void (*power_up)(struct e1000_hw *); + void (*power_down)(struct e1000_hw *); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); +}; + +/* Function pointers for the NVM. */ +struct e1000_nvm_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + void (*reload)(struct e1000_hw *); + s32 (*update)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[ETH_ADDR_LEN]; + u8 perm_addr[ETH_ADDR_LEN]; + + enum e1000_mac_type type; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + bool report_tx_early; + enum e1000_serdes_link_state serdes_link_state; + bool serdes_has_link; + bool tx_pkt_filtering; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* FC mode in effect */ + enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw *, u16); + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82541 { + enum e1000_dsp_config dsp_config; + enum e1000_ffe_config ffe_config; + u16 spd_default; + bool phy_init_script; +}; + +struct e1000_dev_spec_82542 { + bool dma_fairness; +}; + +struct e1000_dev_spec_82543 { + u32 tbi_compatibility; + bool dma_fairness; + bool init_phy_disabled; +}; + +struct e1000_dev_spec_82571 { + bool laa_is_present; + u32 smb_counter; + E1000_MUTEX swflag_mutex; +}; + +struct e1000_dev_spec_80003es2lan { + bool mdic_wa_enable; +}; + +struct e1000_shadow_ram { + u16 value; + bool modified; +}; + +#define E1000_SHADOW_RAM_WORDS 2048 + +#ifdef ULP_SUPPORT +/* I218 PHY Ultra Low Power (ULP) states */ +enum e1000_ulp_state { + e1000_ulp_state_unknown, + e1000_ulp_state_off, + e1000_ulp_state_on, +}; + +#endif /* ULP_SUPPORT */ +struct e1000_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct e1000_shadow_ram shadow_ram[E1000_SHADOW_RAM_WORDS]; + E1000_MUTEX nvm_mutex; + E1000_MUTEX swflag_mutex; + bool nvm_k1_enabled; + bool eee_disable; + u16 eee_lp_ability; +#ifdef ULP_SUPPORT + enum e1000_ulp_state ulp_state; +#endif /* NAHUM6LP_HW && ULP_SUPPORT */ + u16 lat_enc; + u16 max_ltr_enc; + bool smbus_disable; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool module_plugged; + bool clear_semaphore_once; + u32 mtu; + struct sfp_e1000_flags eth_flags; + u8 media_port; + bool media_changed; +}; + +struct e1000_dev_spec_vf { + u32 vf_number; + u32 v2p_mailbox; +}; + +struct e1000_hw { + void *back; + + u8 *hw_addr; + u8 *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82541 _82541; + struct e1000_dev_spec_82542 _82542; + struct e1000_dev_spec_82543 _82543; + struct e1000_dev_spec_82571 _82571; + struct e1000_dev_spec_80003es2lan _80003es2lan; + struct e1000_dev_spec_ich8lan ich8lan; + struct e1000_dev_spec_82575 _82575; + struct e1000_dev_spec_vf vf; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +#include "e1000_82541.h" +#include "e1000_82543.h" +#include "e1000_82571.h" +#include "e1000_80003es2lan.h" +#include "e1000_ich8lan.h" +#include "e1000_82575.h" +#include "e1000_i210.h" + +/* These functions must be implemented by drivers */ +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); + +#endif diff --git a/drivers/net/e1000/base/e1000_i210.c b/drivers/net/e1000/base/e1000_i210.c new file mode 100644 index 00000000..277331c4 --- /dev/null +++ b/drivers/net/e1000/base/e1000_i210.c @@ -0,0 +1,1033 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "e1000_api.h" + + +STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw); +STATIC void e1000_release_nvm_i210(struct e1000_hw *hw); +STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw); +STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +STATIC s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw); +STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data); + +/** + * e1000_acquire_nvm_i210 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_acquire_nvm_i210"); + + ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); + + return ret_val; +} + +/** + * e1000_release_nvm_i210 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +STATIC void e1000_release_nvm_i210(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_i210"); + + e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + DEBUGFUNC("e1000_acquire_swfw_sync_i210"); + + while (i < timeout) { + if (e1000_get_hw_semaphore_i210(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_release_swfw_sync_i210 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("e1000_release_swfw_sync_i210"); + + while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS) + ; /* Empty */ + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); +} + +/** + * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_i210"); + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._82575.clear_semaphore_once) { + hw->dev_spec._82575.clear_semaphore_once = false; + e1000_put_hw_semaphore_generic(hw); + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + **/ +s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = E1000_SUCCESS; + u16 i, count; + + DEBUGFUNC("e1000_read_nvm_srrd_i210"); + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + status = e1000_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status != E1000_SUCCESS) + break; + } + + return status; +} + +/** + * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + **/ +s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = E1000_SUCCESS; + u16 i, count; + + DEBUGFUNC("e1000_write_nvm_srwr_i210"); + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + status = e1000_write_nvm_srwr(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status != E1000_SUCCESS) + break; + } + + return status; +} + +/** + * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + **/ +STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_nvm_srwr"); + + /* + * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + (data[i] << E1000_NVM_RW_REG_DATA) | + E1000_NVM_RW_REG_START; + + E1000_WRITE_REG(hw, E1000_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (E1000_NVM_RW_REG_DONE & + E1000_READ_REG(hw, E1000_SRWR)) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(5); + } + + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("Shadow RAM write EEWR timed out\n"); + break; + } + } + +out: + return ret_val; +} + +/** e1000_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +STATIC s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) +{ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + + DEBUGFUNC("e1000_read_invm_word_i210"); + + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) + break; + if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) + i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) + i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); + DEBUGOUT2("Read INVM Word 0x%02x = %x", + address, *data); + status = E1000_SUCCESS; + break; + } + } + } + if (status != E1000_SUCCESS) + DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address); + return status; +} + +/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211 + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Wrapper function to return data formerly found in the NVM. + **/ +STATIC s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset, + u16 E1000_UNUSEDARG words, u16 *data) +{ + s32 ret_val = E1000_SUCCESS; + UNREFERENCED_1PARAMETER(words); + + DEBUGFUNC("e1000_read_invm_i210"); + + /* Only the MAC addr is required to be present in the iNVM */ + switch (offset) { + case NVM_MAC_ADDR: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]); + ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); + ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); + if (ret_val != E1000_SUCCESS) + DEBUGOUT("MAC Addr not found in iNVM\n"); + break; + case NVM_INIT_CTRL_2: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_INIT_CTRL_2_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_INIT_CTRL_4: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_INIT_CTRL_4_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_LED_1_CFG: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_LED_1_CFG_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_LED_0_2_CFG: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_LED_0_2_CFG_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_ID_LED_SETTINGS: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = ID_LED_RESERVED_FFFF; + ret_val = E1000_SUCCESS; + } + break; + case NVM_SUB_DEV_ID: + *data = hw->subsystem_device_id; + break; + case NVM_SUB_VEN_ID: + *data = hw->subsystem_vendor_id; + break; + case NVM_DEV_ID: + *data = hw->device_id; + break; + case NVM_VEN_ID: + *data = hw->vendor_id; + break; + default: + DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset); + *data = NVM_RESERVED_WORD; + break; + } + return ret_val; +} + +/** + * e1000_read_invm_version - Reads iNVM version and image type + * @hw: pointer to the HW structure + * @invm_ver: version structure for the version read + * + * Reads iNVM version and image type. + **/ +s32 e1000_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver) +{ + u32 *record = NULL; + u32 *next_record = NULL; + u32 i = 0; + u32 invm_dword = 0; + u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / + E1000_INVM_RECORD_SIZE_IN_BYTES); + u32 buffer[E1000_INVM_SIZE]; + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u16 version = 0; + + DEBUGFUNC("e1000_read_invm_version"); + + /* Read iNVM memory */ + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + buffer[i] = invm_dword; + } + + /* Read version number */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have first version location used */ + if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { + version = 0; + status = E1000_SUCCESS; + break; + } + /* Check if we have second version location used */ + else if ((i == 1) && + ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = E1000_SUCCESS; + break; + } + /* + * Check if we have odd version location + * used and it is the last one used + */ + else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && + ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && + (i != 1))) { + version = (*next_record & E1000_INVM_VER_FIELD_TWO) + >> 13; + status = E1000_SUCCESS; + break; + } + /* + * Check if we have even version location + * used and it is the last one used + */ + else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && + ((*record & 0x3) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = E1000_SUCCESS; + break; + } + } + + if (status == E1000_SUCCESS) { + invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) + >> E1000_INVM_MAJOR_SHIFT; + invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; + } + /* Read Image Type */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have image type in first location used */ + if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { + invm_ver->invm_img_type = 0; + status = E1000_SUCCESS; + break; + } + /* Check if we have image type in first location used */ + else if ((((*record & 0x3) == 0) && + ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || + ((((*record & 0x3) != 0) && (i != 1)))) { + invm_ver->invm_img_type = + (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; + status = E1000_SUCCESS; + break; + } + } + return status; +} + +/** + * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); + + DEBUGFUNC("e1000_validate_nvm_checksum_i210"); + + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + + /* + * Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = e1000_read_nvm_eerd; + + status = e1000_validate_nvm_checksum_generic(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + return status; +} + + +/** + * e1000_update_nvm_checksum_i210 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + **/ +s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum_i210"); + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("EEPROM read failed\n"); + goto out; + } + + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + /* + * Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val != E1000_SUCCESS) { + hw->nvm.ops.release(hw); + DEBUGOUT("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = e1000_update_flash_i210(hw); + } else { + ret_val = E1000_ERR_SWFW_SYNC; + } +out: + return ret_val; +} + +/** + * e1000_get_flash_presence_i210 - Check if flash device is detected. + * @hw: pointer to the HW structure + * + **/ +bool e1000_get_flash_presence_i210(struct e1000_hw *hw) +{ + u32 eec = 0; + bool ret_val = false; + + DEBUGFUNC("e1000_get_flash_presence_i210"); + + eec = E1000_READ_REG(hw, E1000_EECD); + + if (eec & E1000_EECD_FLASH_DETECTED_I210) + ret_val = true; + + return ret_val; +} + +/** + * e1000_update_flash_i210 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + * + **/ +s32 e1000_update_flash_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 flup; + + DEBUGFUNC("e1000_update_flash_i210"); + + ret_val = e1000_pool_flash_update_done_i210(hw); + if (ret_val == -E1000_ERR_NVM) { + DEBUGOUT("Flash update time out\n"); + goto out; + } + + flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210; + E1000_WRITE_REG(hw, E1000_EECD, flup); + + ret_val = e1000_pool_flash_update_done_i210(hw); + if (ret_val == E1000_SUCCESS) + DEBUGOUT("Flash update complete\n"); + else + DEBUGOUT("Flash update time out\n"); + +out: + return ret_val; +} + +/** + * e1000_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + + DEBUGFUNC("e1000_pool_flash_update_done_i210"); + + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { + reg = E1000_READ_REG(hw, E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(5); + } + + return ret_val; +} + +/** + * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize the i210/i211 NVM parameters and function pointers. + **/ +STATIC s32 e1000_init_nvm_params_i210(struct e1000_hw *hw) +{ + s32 ret_val; + struct e1000_nvm_info *nvm = &hw->nvm; + + DEBUGFUNC("e1000_init_nvm_params_i210"); + + ret_val = e1000_init_nvm_params_82575(hw); + nvm->ops.acquire = e1000_acquire_nvm_i210; + nvm->ops.release = e1000_release_nvm_i210; + nvm->ops.valid_led_default = e1000_valid_led_default_i210; + if (e1000_get_flash_presence_i210(hw)) { + hw->nvm.type = e1000_nvm_flash_hw; + nvm->ops.read = e1000_read_nvm_srrd_i210; + nvm->ops.write = e1000_write_nvm_srwr_i210; + nvm->ops.validate = e1000_validate_nvm_checksum_i210; + nvm->ops.update = e1000_update_nvm_checksum_i210; + } else { + hw->nvm.type = e1000_nvm_invm; + nvm->ops.read = e1000_read_invm_i210; + nvm->ops.write = e1000_null_write_nvm; + nvm->ops.validate = e1000_null_ops_generic; + nvm->ops.update = e1000_null_ops_generic; + } + return ret_val; +} + +/** + * e1000_init_function_pointers_i210 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_i210(struct e1000_hw *hw) +{ + e1000_init_function_pointers_82575(hw); + hw->nvm.ops.init_params = e1000_init_nvm_params_i210; + + return; +} + +/** + * e1000_valid_led_default_i210 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_i210"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_I210_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT_I210; + break; + } + } +out: + return ret_val; +} + +/** + * __e1000_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + **/ +STATIC s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val; + + DEBUGFUNC("__e1000_access_xmdio_reg"); + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * e1000_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + **/ +s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) +{ + DEBUGFUNC("e1000_read_xmdio_reg"); + + return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * e1000_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + **/ +s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) +{ + DEBUGFUNC("e1000_read_xmdio_reg"); + + return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * e1000_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +STATIC s32 e1000_pll_workaround_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; + u16 nvm_word, phy_word, pci_word, tmp_nvm; + int i; + + /* Get and set needed register values */ + wuc = E1000_READ_REG(hw, E1000_WUC); + mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; + E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ + ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, + &nvm_word); + if (ret_val != E1000_SUCCESS) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ + e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | + E1000_PHY_PLL_FREQ_REG), &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { + ret_val = E1000_SUCCESS; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + E1000_WRITE_REG(hw, E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); + E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); + + e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; + e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + msec_delay(1); + pci_word &= ~E1000_PCI_PMCSR_D3; + e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); + E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ + E1000_WRITE_REG(hw, E1000_WUC, wuc); + } + /* restore MDICNFG setting */ + E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); + return ret_val; +} + +/** + * e1000_get_cfg_done_i210 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +STATIC s32 e1000_get_cfg_done_i210(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + DEBUGFUNC("e1000_get_cfg_done_i210"); + + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask) + break; + msec_delay(1); + timeout--; + } + if (!timeout) + DEBUGOUT("MNG configuration cycle has not completed.\n"); + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw_i210 - Init hw for I210/I211 + * @hw: pointer to the HW structure + * + * Called to initialize hw for i210 hw family. + **/ +s32 e1000_init_hw_i210(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_init_hw_i210"); + if ((hw->mac.type >= e1000_i210) && + !(e1000_get_flash_presence_i210(hw))) { + ret_val = e1000_pll_workaround_i210(hw); + if (ret_val != E1000_SUCCESS) + return ret_val; + } + hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210; + ret_val = e1000_init_hw_82575(hw); + return ret_val; +} diff --git a/drivers/net/e1000/base/e1000_i210.h b/drivers/net/e1000/base/e1000_i210.h new file mode 100644 index 00000000..1a6f1dd4 --- /dev/null +++ b/drivers/net/e1000/base/e1000_i210.h @@ -0,0 +1,110 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_I210_H_ +#define _E1000_I210_H_ + +bool e1000_get_flash_presence_i210(struct e1000_hw *hw); +s32 e1000_update_flash_i210(struct e1000_hw *hw); +s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw); +s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw); +s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver); +s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, + u16 *data); +s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, + u16 data); +s32 e1000_init_hw_i210(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 +#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 + +#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \ + (u8)((invm_dword) & 0x7) +#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \ + (u8)(((invm_dword) & 0x0000FE00) >> 9) +#define INVM_DWORD_TO_WORD_DATA(invm_dword) \ + (u16)(((invm_dword) & 0xFFFF0000) >> 16) + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04, + E1000_INVM_INVALIDATED_STRUCTURE = 0x0F, +}; + +#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 +#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 +#define E1000_INVM_ULT_BYTES_SIZE 8 +#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 +#define E1000_INVM_VER_FIELD_ONE 0x1FF8 +#define E1000_INVM_VER_FIELD_TWO 0x7FE000 +#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 + +#define E1000_INVM_MAJOR_MASK 0x3F0 +#define E1000_INVM_MINOR_MASK 0xF +#define E1000_INVM_MAJOR_SHIFT 4 + +#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_OFF2)) +#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +/* NVM offset defaults for I211 devices */ +#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 +#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 +#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 +#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C + +/* PLL Defines */ +#define E1000_PCI_PMCSR 0x44 +#define E1000_PCI_PMCSR_D3 0x03 +#define E1000_MAX_PLL_TRIES 5 +#define E1000_PHY_PLL_UNCONF 0xFF +#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000 +#define E1000_PHY_PLL_FREQ_REG 0x000E +#define E1000_INVM_DEFAULT_AL 0x202F +#define E1000_INVM_AUTOLOAD 0x0A +#define E1000_INVM_PLL_WO_VAL 0x0010 + +#endif diff --git a/drivers/net/e1000/base/e1000_ich8lan.c b/drivers/net/e1000/base/e1000_ich8lan.c new file mode 100644 index 00000000..89d07e90 --- /dev/null +++ b/drivers/net/e1000/base/e1000_ich8lan.c @@ -0,0 +1,5366 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +/* 82562G 10/100 Network Connection + * 82562G-2 10/100 Network Connection + * 82562GT 10/100 Network Connection + * 82562GT-2 10/100 Network Connection + * 82562V 10/100 Network Connection + * 82562V-2 10/100 Network Connection + * 82566DC-2 Gigabit Network Connection + * 82566DC Gigabit Network Connection + * 82566DM-2 Gigabit Network Connection + * 82566DM Gigabit Network Connection + * 82566MC Gigabit Network Connection + * 82566MM Gigabit Network Connection + * 82567LM Gigabit Network Connection + * 82567LF Gigabit Network Connection + * 82567V Gigabit Network Connection + * 82567LM-2 Gigabit Network Connection + * 82567LF-2 Gigabit Network Connection + * 82567V-2 Gigabit Network Connection + * 82567LF-3 Gigabit Network Connection + * 82567LM-3 Gigabit Network Connection + * 82567LM-4 Gigabit Network Connection + * 82577LM Gigabit Network Connection + * 82577LC Gigabit Network Connection + * 82578DM Gigabit Network Connection + * 82578DC Gigabit Network Connection + * 82579LM Gigabit Network Connection + * 82579V Gigabit Network Connection + * Ethernet Connection I217-LM + * Ethernet Connection I217-V + * Ethernet Connection I218-V + * Ethernet Connection I218-LM + * Ethernet Connection (2) I218-LM + * Ethernet Connection (2) I218-V + * Ethernet Connection (3) I218-LM + * Ethernet Connection (3) I218-V + */ + +#include "e1000_api.h" + +STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state); +STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw); +STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw); +STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw); +STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); +STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); +STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); +STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); +STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw); +#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT +STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, + u8 *mc_addr_list, + u32 mc_addr_count); +#endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */ +STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); +STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, + bool active); +STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, + u16 *data); +STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); +STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); +STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); +STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw); +STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); +STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw); +STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw); +STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); +STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 *data); +STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data); +STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, + u32 offset, u16 *data); +STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte); +STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); +STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw); +STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); +STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw); +STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); + +/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ +/* Offset 04h HSFSTS */ +union ich8_hws_flash_status { + struct ich8_hsfsts { + u16 flcdone:1; /* bit 0 Flash Cycle Done */ + u16 flcerr:1; /* bit 1 Flash Cycle Error */ + u16 dael:1; /* bit 2 Direct Access error Log */ + u16 berasesz:2; /* bit 4:3 Sector Erase Size */ + u16 flcinprog:1; /* bit 5 flash cycle in Progress */ + u16 reserved1:2; /* bit 13:6 Reserved */ + u16 reserved2:6; /* bit 13:6 Reserved */ + u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ + u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ + } hsf_status; + u16 regval; +}; + +/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ +/* Offset 06h FLCTL */ +union ich8_hws_flash_ctrl { + struct ich8_hsflctl { + u16 flcgo:1; /* 0 Flash Cycle Go */ + u16 flcycle:2; /* 2:1 Flash Cycle */ + u16 reserved:5; /* 7:3 Reserved */ + u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ + u16 flockdn:6; /* 15:10 Reserved */ + } hsf_ctrl; + u16 regval; +}; + +/* ICH Flash Region Access Permissions */ +union ich8_hws_flash_regacc { + struct ich8_flracc { + u32 grra:8; /* 0:7 GbE region Read Access */ + u32 grwa:8; /* 8:15 GbE region Write Access */ + u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ + u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ + } hsf_flregacc; + u16 regval; +}; + +/** + * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers + * @hw: pointer to the HW structure + * + * Test access to the PHY registers by reading the PHY ID registers. If + * the PHY ID is already known (e.g. resume path) compare it with known ID, + * otherwise assume the read PHY ID is correct if it is valid. + * + * Assumes the sw/fw/hw semaphore is already acquired. + **/ +STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) +{ + u16 phy_reg = 0; + u32 phy_id = 0; + s32 ret_val = 0; + u16 retry_count; + u32 mac_reg = 0; + + for (retry_count = 0; retry_count < 2; retry_count++) { + ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg); + if (ret_val || (phy_reg == 0xFFFF)) + continue; + phy_id = (u32)(phy_reg << 16); + + ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg); + if (ret_val || (phy_reg == 0xFFFF)) { + phy_id = 0; + continue; + } + phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); + break; + } + + if (hw->phy.id) { + if (hw->phy.id == phy_id) + goto out; + } else if (phy_id) { + hw->phy.id = phy_id; + hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); + goto out; + } + + /* In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + if (hw->mac.type < e1000_pch_lpt) { + hw->phy.ops.release(hw); + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (!ret_val) + ret_val = e1000_get_phy_id(hw); + hw->phy.ops.acquire(hw); + } + + if (ret_val) + return false; +out: + if (hw->mac.type == e1000_pch_lpt) { + /* Only unforce SMBus if ME is not active */ + if (!(E1000_READ_REG(hw, E1000_FWSM) & + E1000_ICH_FWSM_FW_VALID)) { + /* Unforce SMBus mode in PHY */ + hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg); + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + } + } + + return true; +} + +/** + * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value + * @hw: pointer to the HW structure + * + * Toggling the LANPHYPC pin value fully power-cycles the PHY and is + * used to reset the PHY to a quiescent state when necessary. + **/ +STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) +{ + u32 mac_reg; + + DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt"); + + /* Set Phy Config Counter to 50msec */ + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3); + mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg); + + /* Toggle LANPHYPC Value bit */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL); + mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; + mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; + E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); + E1000_WRITE_FLUSH(hw); + usec_delay(10); + mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; + E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); + E1000_WRITE_FLUSH(hw); + + if (hw->mac.type < e1000_pch_lpt) { + msec_delay(50); + } else { + u16 count = 20; + + do { + msec_delay(5); + } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) & + E1000_CTRL_EXT_LPCD) && count--); + + msec_delay(30); + } +} + +/** + * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds + * @hw: pointer to the HW structure + * + * Workarounds/flow necessary for PHY initialization during driver load + * and resume paths. + **/ +STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) +{ + u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM); + s32 ret_val; + + DEBUGFUNC("e1000_init_phy_workarounds_pchlan"); + + /* Gate automatic PHY configuration by hardware on managed and + * non-managed 82579 and newer adapters. + */ + e1000_gate_hw_phy_config_ich8lan(hw, true); + +#ifdef ULP_SUPPORT + /* It is not possible to be certain of the current state of ULP + * so forcibly disable it. + */ + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; + +#endif /* ULP_SUPPORT */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + DEBUGOUT("Failed to initialize PHY flow\n"); + goto out; + } + + /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is + * inaccessible and resetting the PHY is not blocked, toggle the + * LANPHYPC Value bit to force the interconnect to PCIe mode. + */ + switch (hw->mac.type) { + case e1000_pch_lpt: + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* Before toggling LANPHYPC, see if PHY is accessible by + * forcing MAC to SMBus mode first. + */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + + /* Wait 50 milliseconds for MAC to finish any retries + * that it might be trying to perform from previous + * attempts to acknowledge any phy read requests. + */ + msec_delay(50); + + /* fall-through */ + case e1000_pch2lan: + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* fall-through */ + case e1000_pchlan: + if ((hw->mac.type == e1000_pchlan) && + (fwsm & E1000_ICH_FWSM_FW_VALID)) + break; + + if (hw->phy.ops.check_reset_block(hw)) { + DEBUGOUT("Required LANPHYPC toggle blocked by ME\n"); + ret_val = -E1000_ERR_PHY; + break; + } + + /* Toggle LANPHYPC Value bit */ + e1000_toggle_lanphypc_pch_lpt(hw); + if (hw->mac.type >= e1000_pch_lpt) { + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* Toggling LANPHYPC brings the PHY out of SMBus mode + * so ensure that the MAC is also out of SMBus mode + */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + ret_val = -E1000_ERR_PHY; + } + break; + default: + break; + } + + hw->phy.ops.release(hw); + if (!ret_val) { + + /* Check to see if able to reset PHY. Print error if not */ + if (hw->phy.ops.check_reset_block(hw)) { + ERROR_REPORT("Reset blocked by ME\n"); + goto out; + } + + /* Reset the PHY before any access to it. Doing so, ensures + * that the PHY is in a known good state before we read/write + * PHY registers. The generic reset is sufficient here, + * because we haven't determined the PHY type yet. + */ + ret_val = e1000_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + /* On a successful reset, possibly need to wait for the PHY + * to quiesce to an accessible state before returning control + * to the calling function. If the PHY does not quiesce, then + * return E1000E_BLK_PHY_RESET, as this is the condition that + * the PHY is in. + */ + ret_val = hw->phy.ops.check_reset_block(hw); + if (ret_val) + ERROR_REPORT("ME blocked access to PHY after reset\n"); + } + +out: + /* Ungate automatic PHY configuration on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) { + msec_delay(10); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + return ret_val; +} + +/** + * e1000_init_phy_params_pchlan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("e1000_init_phy_params_pchlan"); + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.acquire = e1000_acquire_swflag_ich8lan; + phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; + phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; + phy->ops.set_page = e1000_set_page_igp; + phy->ops.read_reg = e1000_read_phy_reg_hv; + phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; + phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; + phy->ops.release = e1000_release_swflag_ich8lan; + phy->ops.reset = e1000_phy_hw_reset_ich8lan; + phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.write_reg = e1000_write_phy_reg_hv; + phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; + phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + phy->id = e1000_phy_unknown; + + ret_val = e1000_init_phy_workarounds_pchlan(hw); + if (ret_val) + return ret_val; + + if (phy->id == e1000_phy_unknown) + switch (hw->mac.type) { + default: + ret_val = e1000_get_phy_id(hw); + if (ret_val) + return ret_val; + if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) + break; + /* fall-through */ + case e1000_pch2lan: + case e1000_pch_lpt: + /* In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + ret_val = e1000_get_phy_id(hw); + if (ret_val) + return ret_val; + break; + } + phy->type = e1000_get_phy_type_from_id(phy->id); + + switch (phy->type) { + case e1000_phy_82577: + case e1000_phy_82579: + case e1000_phy_i217: + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = + e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.commit = e1000_phy_sw_reset_generic; + break; + case e1000_phy_82578: + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + return ret_val; +} + +/** + * e1000_init_phy_params_ich8lan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 i = 0; + + DEBUGFUNC("e1000_init_phy_params_ich8lan"); + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.acquire = e1000_acquire_swflag_ich8lan; + phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; + phy->ops.get_cable_length = e1000_get_cable_length_igp_2; + phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; + phy->ops.read_reg = e1000_read_phy_reg_igp; + phy->ops.release = e1000_release_swflag_ich8lan; + phy->ops.reset = e1000_phy_hw_reset_ich8lan; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan; + phy->ops.write_reg = e1000_write_phy_reg_igp; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + + /* We may need to do this twice - once for IGP and if that fails, + * we'll set BM func pointers and try again + */ + ret_val = e1000_determine_phy_address(hw); + if (ret_val) { + phy->ops.write_reg = e1000_write_phy_reg_bm; + phy->ops.read_reg = e1000_read_phy_reg_bm; + ret_val = e1000_determine_phy_address(hw); + if (ret_val) { + DEBUGOUT("Cannot determine PHY addr. Erroring out\n"); + return ret_val; + } + } + + phy->id = 0; + while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) && + (i++ < 100)) { + msec_delay(1); + ret_val = e1000_get_phy_id(hw); + if (ret_val) + return ret_val; + } + + /* Verify phy id */ + switch (phy->id) { + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked; + phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked; + phy->ops.get_info = e1000_get_phy_info_igp; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy->type = e1000_phy_ife; + phy->autoneg_mask = E1000_ALL_NOT_GIG; + phy->ops.get_info = e1000_get_phy_info_ife; + phy->ops.check_polarity = e1000_check_polarity_ife; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; + break; + case BME1000_E_PHY_ID: + phy->type = e1000_phy_bm; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg = e1000_read_phy_reg_bm; + phy->ops.write_reg = e1000_write_phy_reg_bm; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + break; + default: + return -E1000_ERR_PHY; + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific NVM parameters and function + * pointers. + **/ +STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 gfpreg, sector_base_addr, sector_end_addr; + u16 i; + + DEBUGFUNC("e1000_init_nvm_params_ich8lan"); + + /* Can't read flash registers if the register set isn't mapped. */ + nvm->type = e1000_nvm_flash_sw; + if (!hw->flash_address) { + DEBUGOUT("ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } + + gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG); + + /* sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. + */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; + + /* find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. + */ + nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT); + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + + nvm->word_size = E1000_SHADOW_RAM_WORDS; + + /* Clear shadow ram */ + for (i = 0; i < nvm->word_size; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + + E1000_MUTEX_INIT(&dev_spec->nvm_mutex); + E1000_MUTEX_INIT(&dev_spec->swflag_mutex); + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_ich8lan; + nvm->ops.release = e1000_release_nvm_ich8lan; + nvm->ops.read = e1000_read_nvm_ich8lan; + nvm->ops.update = e1000_update_nvm_checksum_ich8lan; + nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan; + nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan; + nvm->ops.write = e1000_write_nvm_ich8lan; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_ich8lan - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific MAC parameters and function + * pointers. + **/ +STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; +#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) + u16 pci_cfg; +#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */ + + DEBUGFUNC("e1000_init_mac_params_ich8lan"); + + /* Set media type function pointer */ + hw->phy.media_type = e1000_media_type_copper; + + /* Set mta register count */ + mac->mta_reg_count = 32; + /* Set rar entry count */ + mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; + if (mac->type == e1000_ich8lan) + mac->rar_entry_count--; + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC subsystem not supported */ + mac->arc_subsystem_valid = false; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_ich8lan; + /* function id */ + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_ich8lan; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_ich8lan; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_ich8lan; + /* physical interface setup */ + mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan; + + /* LED and other operations */ + switch (mac->type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* blink LED */ + mac->ops.blink_led = e1000_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_ich8lan; + mac->ops.led_off = e1000_led_off_ich8lan; + break; + case e1000_pch2lan: + mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch2lan; + /* fall-through */ + case e1000_pch_lpt: +#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT + /* multicast address update for pch2 */ + mac->ops.update_mc_addr_list = + e1000_update_mc_addr_list_pch2lan; + /* fall-through */ +#endif + case e1000_pchlan: +#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) + /* save PCH revision_id */ + e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg); + hw->revision_id = (u8)(pci_cfg &= 0x000F); +#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */ + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_pchlan; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_pchlan; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_pchlan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_pchlan; + mac->ops.led_off = e1000_led_off_pchlan; + break; + default: + break; + } + + if (mac->type == e1000_pch_lpt) { + mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch_lpt; + mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt; + } + + /* Enable PCS Lock-loss workaround for ICH8 */ + if (mac->type == e1000_ich8lan) + e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true); + + return E1000_SUCCESS; +} + +/** + * __e1000_access_emi_reg_locked - Read/write EMI register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + * + * This helper function assumes the SW/FW/HW Semaphore is already acquired. + **/ +STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val; + + DEBUGFUNC("__e1000_access_emi_reg_locked"); + + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA, + data); + else + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, + *data); + + return ret_val; +} + +/** + * e1000_read_emi_reg_locked - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + * + * Assumes the SW/FW/HW Semaphore is already acquired. + **/ +s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) +{ + DEBUGFUNC("e1000_read_emi_reg_locked"); + + return __e1000_access_emi_reg_locked(hw, addr, data, true); +} + +/** + * e1000_write_emi_reg_locked - Write Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be written to the EMI address + * + * Assumes the SW/FW/HW Semaphore is already acquired. + **/ +s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) +{ + DEBUGFUNC("e1000_read_emi_reg_locked"); + + return __e1000_access_emi_reg_locked(hw, addr, &data, false); +} + +/** + * e1000_set_eee_pchlan - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE based on setting in dev_spec structure, the duplex of + * the link and the EEE capabilities of the link partner. The LPI Control + * register bits will remain set only if/when link is up. + * + * EEE LPI must not be asserted earlier than one second after link is up. + * On 82579, EEE LPI should not be enabled until such time otherwise there + * can be link issues with some switches. Other devices can have EEE LPI + * enabled immediately upon link up since they have a timer in hardware which + * prevents LPI from being asserted too early. + **/ +s32 e1000_set_eee_pchlan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + s32 ret_val; + u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; + + DEBUGFUNC("e1000_set_eee_pchlan"); + + switch (hw->phy.type) { + case e1000_phy_82579: + lpa = I82579_EEE_LP_ABILITY; + pcs_status = I82579_EEE_PCS_STATUS; + adv_addr = I82579_EEE_ADVERTISEMENT; + break; + case e1000_phy_i217: + lpa = I217_EEE_LP_ABILITY; + pcs_status = I217_EEE_PCS_STATUS; + adv_addr = I217_EEE_ADVERTISEMENT; + break; + default: + return E1000_SUCCESS; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); + if (ret_val) + goto release; + + /* Clear bits that enable EEE in various speeds */ + lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK; + + /* Enable EEE if not disabled by user */ + if (!dev_spec->eee_disable) { + /* Save off link partner's EEE ability */ + ret_val = e1000_read_emi_reg_locked(hw, lpa, + &dev_spec->eee_lp_ability); + if (ret_val) + goto release; + + /* Read EEE advertisement */ + ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); + if (ret_val) + goto release; + + /* Enable EEE only for speeds in which the link partner is + * EEE capable and for which we advertise EEE. + */ + if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) + lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; + + if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { + hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data); + if (data & NWAY_LPAR_100TX_FD_CAPS) + lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; + else + /* EEE is not supported in 100Half, so ignore + * partner's EEE in 100 ability if full-duplex + * is not advertised. + */ + dev_spec->eee_lp_ability &= + ~I82579_EEE_100_SUPPORTED; + } + } + + if (hw->phy.type == e1000_phy_82579) { + ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, + &data); + if (ret_val) + goto release; + + data &= ~I82579_LPI_100_PLL_SHUT; + ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, + data); + } + + /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ + ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); + if (ret_val) + goto release; + + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications + * preventing further DMA write requests. Workaround the issue by disabling + * the de-assertion of the clock request when in 1Gpbs mode. + * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link + * speeds in order to avoid Tx hangs. + **/ +STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) +{ + u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); + u32 status = E1000_READ_REG(hw, E1000_STATUS); + s32 ret_val = E1000_SUCCESS; + u16 reg; + + if (link && (status & E1000_STATUS_SPEED_1000)) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + ®); + if (ret_val) + goto release; + + ret_val = + e1000_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + reg & + ~E1000_KMRNCTRLSTA_K1_ENABLE); + if (ret_val) + goto release; + + usec_delay(10); + + E1000_WRITE_REG(hw, E1000_FEXTNVM6, + fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); + + ret_val = + e1000_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + reg); +release: + hw->phy.ops.release(hw); + } else { + /* clear FEXTNVM6 bit 8 on link down or 10/100 */ + fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; + + if (!link || ((status & E1000_STATUS_SPEED_100) && + (status & E1000_STATUS_FD))) + goto update_fextnvm6; + + ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®); + if (ret_val) + return ret_val; + + /* Clear link status transmit timeout */ + reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; + + if (status & E1000_STATUS_SPEED_100) { + /* Set inband Tx timeout to 5x10us for 100Half */ + reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Do not extend the K1 entry latency for 100Half */ + fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } else { + /* Set inband Tx timeout to 50x10us for 10Full/Half */ + reg |= 50 << + I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Extend the K1 entry latency for 10 Mbps */ + fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } + + ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg); + if (ret_val) + return ret_val; + +update_fextnvm6: + E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6); + } + + return ret_val; +} + +#ifdef ULP_SUPPORT +/** + * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP + * @hw: pointer to the HW structure + * @to_sx: boolean indicating a system power state transition to Sx + * + * When link is down, configure ULP mode to significantly reduce the power + * to the PHY. If on a Manageability Engine (ME) enabled system, tell the + * ME firmware to start the ULP configuration. If not on an ME enabled + * system, configure the ULP mode by software. + */ +s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) +{ + u32 mac_reg; + s32 ret_val = E1000_SUCCESS; + u16 phy_reg; + + if ((hw->mac.type < e1000_pch_lpt) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) || + (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) || + (hw->device_id == E1000_DEV_ID_PCH_I218_V2) || + (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on)) + return 0; + + if (!to_sx) { + int i = 0; + /* Poll up to 5 seconds for Cable Disconnected indication */ + while (!(E1000_READ_REG(hw, E1000_FEXT) & + E1000_FEXT_PHY_CABLE_DISCONNECTED)) { + /* Bail if link is re-acquired */ + if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) + return -E1000_ERR_PHY; + if (i++ == 100) + break; + + msec_delay(50); + } + DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n", + (E1000_READ_REG(hw, E1000_FEXT) & + E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", + i * 50); + if (!(E1000_READ_REG(hw, E1000_FEXT) & + E1000_FEXT_PHY_CABLE_DISCONNECTED)) + return 0; + } + + if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) { + /* Request ME configure ULP mode in the PHY */ + mac_reg = E1000_READ_REG(hw, E1000_H2ME); + mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS; + E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); + + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + /* During S0 Idle keep the phy in PCI-E mode */ + if (hw->dev_spec.ich8lan.smbus_disable) + goto skip_smbus; + + /* Force SMBus mode in PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); + if (ret_val) + goto release; + phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Force SMBus mode in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + +skip_smbus: + if (!to_sx) { + /* Change the 'Link Status Change' interrupt to trigger + * on 'Cable Status Change' + */ + ret_val = e1000_read_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_OP_MODES, + &phy_reg); + if (ret_val) + goto release; + phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC; + e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, + phy_reg); + } + + /* Set Inband ULP Exit, Reset to SMBus mode and + * Disable SMBus Release on PERST# in PHY + */ + ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); + if (ret_val) + goto release; + phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS | + I218_ULP_CONFIG1_DISABLE_SMB_PERST); + if (to_sx) { + if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC) + phy_reg |= I218_ULP_CONFIG1_WOL_HOST; + else + phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; + + phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; + phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT; + } else { + phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; + phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP; + phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; + } + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Set Disable SMBus Release on PERST# in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7); + mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST; + E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg); + + /* Commit ULP changes in PHY by starting auto ULP configuration */ + phy_reg |= I218_ULP_CONFIG1_START; + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + if (!to_sx) { + /* Disable Tx so that the MAC doesn't send any (buffered) + * packets to the PHY. + */ + mac_reg = E1000_READ_REG(hw, E1000_TCTL); + mac_reg &= ~E1000_TCTL_EN; + E1000_WRITE_REG(hw, E1000_TCTL, mac_reg); + } + +release: + hw->phy.ops.release(hw); +out: + if (ret_val) + DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val); + else + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on; + + return ret_val; +} + +/** + * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP + * @hw: pointer to the HW structure + * @force: boolean indicating whether or not to force disabling ULP + * + * Un-configure ULP mode when link is up, the system is transitioned from + * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled + * system, poll for an indication from ME that ULP has been un-configured. + * If not on an ME enabled system, un-configure the ULP mode by software. + * + * During nominal operation, this function is called when link is acquired + * to disable ULP mode (force=false); otherwise, for example when unloading + * the driver or during Sx->S0 transitions, this is called with force=true + * to forcibly disable ULP. + + * When the cable is plugged in while the device is in D0, a Cable Status + * Change interrupt is generated which causes this function to be called + * to partially disable ULP mode and restart autonegotiation. This function + * is then called again due to the resulting Link Status Change interrupt + * to finish cleaning up after the ULP flow. + */ +s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) +{ + s32 ret_val = E1000_SUCCESS; + u32 mac_reg; + u16 phy_reg; + int i = 0; + + if ((hw->mac.type < e1000_pch_lpt) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) || + (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) || + (hw->device_id == E1000_DEV_ID_PCH_I218_V2) || + (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off)) + return 0; + + if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) { + if (force) { + /* Request ME un-configure ULP mode in the PHY */ + mac_reg = E1000_READ_REG(hw, E1000_H2ME); + mac_reg &= ~E1000_H2ME_ULP; + mac_reg |= E1000_H2ME_ENFORCE_SETTINGS; + E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); + } + + /* Poll up to 100msec for ME to clear ULP_CFG_DONE */ + while (E1000_READ_REG(hw, E1000_FWSM) & + E1000_FWSM_ULP_CFG_DONE) { + if (i++ == 10) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + msec_delay(10); + } + DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); + + if (force) { + mac_reg = E1000_READ_REG(hw, E1000_H2ME); + mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS; + E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); + } else { + /* Clear H2ME.ULP after ME ULP configuration */ + mac_reg = E1000_READ_REG(hw, E1000_H2ME); + mac_reg &= ~E1000_H2ME_ULP; + E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); + + /* Restore link speed advertisements and restart + * Auto-negotiation + */ + if (hw->mac.autoneg) { + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) + goto out; + } else { + ret_val = e1000_setup_copper_link_generic(hw); + if (ret_val) + goto out; + } + ret_val = e1000_oem_bits_config_ich8lan(hw, true); + } + + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + /* Revert the change to the 'Link Status Change' + * interrupt to trigger on 'Cable Status Change' + */ + ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, + &phy_reg); + if (ret_val) + goto release; + phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC; + e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg); + + if (force) + /* Toggle LANPHYPC Value bit */ + e1000_toggle_lanphypc_pch_lpt(hw); + + /* Unforce SMBus mode in PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); + if (ret_val) { + /* The MAC might be in PCIe mode, so temporarily force to + * SMBus mode in order to access the PHY. + */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + + msec_delay(50); + + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, + &phy_reg); + if (ret_val) + goto release; + } + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + + /* When ULP mode was previously entered, K1 was disabled by the + * hardware. Re-Enable K1 in the PHY when exiting ULP. + */ + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg); + if (ret_val) + goto release; + phy_reg |= HV_PM_CTRL_K1_ENABLE; + e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg); + + /* Clear ULP enabled configuration */ + ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); + if (ret_val) + goto release; + /* CSC interrupt received due to ULP Indication */ + if ((phy_reg & I218_ULP_CONFIG1_IND) || force) { + phy_reg &= ~(I218_ULP_CONFIG1_IND | + I218_ULP_CONFIG1_STICKY_ULP | + I218_ULP_CONFIG1_RESET_TO_SMBUS | + I218_ULP_CONFIG1_WOL_HOST | + I218_ULP_CONFIG1_INBAND_EXIT | + I218_ULP_CONFIG1_DISABLE_SMB_PERST); + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Commit ULP changes by starting auto ULP configuration */ + phy_reg |= I218_ULP_CONFIG1_START; + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Clear Disable SMBus Release on PERST# in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7); + mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST; + E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg); + + if (!force) { + hw->phy.ops.release(hw); + + if (hw->mac.autoneg) + e1000_phy_setup_autoneg(hw); + + e1000_sw_lcd_config_ich8lan(hw); + + e1000_oem_bits_config_ich8lan(hw, true); + + /* Set ULP state to unknown and return non-zero to + * indicate no link (yet) and re-enter on the next LSC + * to finish disabling ULP flow. + */ + hw->dev_spec.ich8lan.ulp_state = + e1000_ulp_state_unknown; + + return 1; + } + } + + /* Re-enable Tx */ + mac_reg = E1000_READ_REG(hw, E1000_TCTL); + mac_reg |= E1000_TCTL_EN; + E1000_WRITE_REG(hw, E1000_TCTL, mac_reg); + +release: + hw->phy.ops.release(hw); + if (force) { + hw->phy.ops.reset(hw); + msec_delay(50); + } +out: + if (ret_val) + DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val); + else + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off; + + return ret_val; +} + +#endif /* ULP_SUPPORT */ +/** + * e1000_check_for_copper_link_ich8lan - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val, tipg_reg = 0; + u16 emi_addr, emi_val = 0; + bool link = false; + u16 phy_reg; + + DEBUGFUNC("e1000_check_for_copper_link_ich8lan"); + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return E1000_SUCCESS; + + if ((hw->mac.type < e1000_pch_lpt) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || + (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + } else { + /* Check the MAC's STATUS register to determine link state + * since the PHY could be inaccessible while in ULP mode. + */ + link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); + if (link) + ret_val = e1000_disable_ulp_lpt_lp(hw, false); + else + ret_val = e1000_enable_ulp_lpt_lp(hw, false); + if (ret_val) + return ret_val; + } + + if (hw->mac.type == e1000_pchlan) { + ret_val = e1000_k1_gig_workaround_hv(hw, link); + if (ret_val) + return ret_val; + } + + /* When connected at 10Mbps half-duplex, some parts are excessively + * aggressive resulting in many collisions. To avoid this, increase + * the IPG and reduce Rx latency in the PHY. + */ + if (((hw->mac.type == e1000_pch2lan) || + (hw->mac.type == e1000_pch_lpt)) && link) { + u16 speed, duplex; + + e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex); + tipg_reg = E1000_READ_REG(hw, E1000_TIPG); + tipg_reg &= ~E1000_TIPG_IPGT_MASK; + + if (duplex == HALF_DUPLEX && speed == SPEED_10) { + tipg_reg |= 0xFF; + /* Reduce Rx latency in analog PHY */ + emi_val = 0; + } else { + /* Roll back the default values */ + tipg_reg |= 0x08; + emi_val = 1; + } + + E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pch2lan) + emi_addr = I82579_RX_CONFIG; + else + emi_addr = I217_RX_CONFIG; + ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); + + hw->phy.ops.release(hw); + + if (ret_val) + return ret_val; + } + + /* I217 Packet Loss issue: + * ensure that FEXTNVM4 Beacon Duration is set correctly + * on power up. + * Set the Beacon Duration for I217 to 8 usec + */ + if (hw->mac.type == e1000_pch_lpt) { + u32 mac_reg; + + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; + E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg); + } + + /* Work-around I218 hang issue */ + if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || + (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) || + (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) { + ret_val = e1000_k1_workaround_lpt_lp(hw, link); + if (ret_val) + return ret_val; + } + /* Clear link partner's EEE ability */ + hw->dev_spec.ich8lan.eee_lp_ability = 0; + + /* Configure K0s minimum time */ + if (hw->mac.type == e1000_pch_lpt) { + e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME); + } + + if (!link) + return E1000_SUCCESS; /* No link detected */ + + mac->get_link_status = false; + + switch (hw->mac.type) { + case e1000_pch2lan: + ret_val = e1000_k1_workaround_lv(hw); + if (ret_val) + return ret_val; + /* fall-thru */ + case e1000_pchlan: + if (hw->phy.type == e1000_phy_82578) { + ret_val = e1000_link_stall_workaround_hv(hw); + if (ret_val) + return ret_val; + } + + /* Workaround for PCHx parts in half-duplex: + * Set the number of preambles removed from the packet + * when it is passed from the PHY to the MAC to prevent + * the MAC from misinterpreting the packet type. + */ + hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); + phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; + + if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) != + E1000_STATUS_FD) + phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); + + hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); + break; + default: + break; + } + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift_generic(hw); + + /* Enable/Disable EEE after link up */ + if (hw->phy.type > e1000_phy_82579) { + ret_val = e1000_set_eee_pchlan(hw); + if (ret_val) + return ret_val; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) + return -E1000_ERR_CONFIG; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + + return ret_val; +} + +/** + * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific function pointers for PHY, MAC, and NVM. + **/ +void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_ich8lan"); + + hw->mac.ops.init_params = e1000_init_mac_params_ich8lan; + hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan; + switch (hw->mac.type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + hw->phy.ops.init_params = e1000_init_phy_params_ich8lan; + break; + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + hw->phy.ops.init_params = e1000_init_phy_params_pchlan; + break; + default: + break; + } +} + +/** + * e1000_acquire_nvm_ich8lan - Acquire NVM mutex + * @hw: pointer to the HW structure + * + * Acquires the mutex for performing NVM operations. + **/ +STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_acquire_nvm_ich8lan"); + + E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex); + + return E1000_SUCCESS; +} + +/** + * e1000_release_nvm_ich8lan - Release NVM mutex + * @hw: pointer to the HW structure + * + * Releases the mutex used while performing NVM operations. + **/ +STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_ich8lan"); + + E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex); + + return; +} + +/** + * e1000_acquire_swflag_ich8lan - Acquire software control flag + * @hw: pointer to the HW structure + * + * Acquires the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_acquire_swflag_ich8lan"); + + E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex); + + while (timeout) { + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) + break; + + msec_delay_irq(1); + timeout--; + } + + if (!timeout) { + DEBUGOUT("SW has already locked the resource.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + timeout = SW_FLAG_TIMEOUT; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); + + while (timeout) { + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) + break; + + msec_delay_irq(1); + timeout--; + } + + if (!timeout) { + DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", + E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + if (ret_val) + E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); + + return ret_val; +} + +/** + * e1000_release_swflag_ich8lan - Release software control flag + * @hw: pointer to the HW structure + * + * Releases the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + DEBUGFUNC("e1000_release_swflag_ich8lan"); + + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); + } else { + DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n"); + } + + E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); + + return; +} + +/** + * e1000_check_mng_mode_ich8lan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has any manageability enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + DEBUGFUNC("e1000_check_mng_mode_ich8lan"); + + fwsm = E1000_READ_REG(hw, E1000_FWSM); + + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_check_mng_mode_pchlan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has iAMT enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) +{ + u32 fwsm; + + DEBUGFUNC("e1000_check_mng_mode_pchlan"); + + fwsm = E1000_READ_REG(hw, E1000_FWSM); + + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_rar_set_pch2lan - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. For 82579, RAR[0] is the base address register that is to + * contain the MAC address but RAR[1-6] are reserved for manageability (ME). + * Use SHRA[0-3] in place of those reserved for ME. + **/ +STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + DEBUGFUNC("e1000_rar_set_pch2lan"); + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); + E1000_WRITE_FLUSH(hw); + return E1000_SUCCESS; + } + + /* RAR[1-6] are owned by manageability. Skip those and program the + * next address into the SHRA register array. + */ + if (index < (u32) (hw->mac.rar_entry_count)) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + if (ret_val) + goto out; + + E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high); + E1000_WRITE_FLUSH(hw); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) && + (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high)) + return E1000_SUCCESS; + + DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", + (index - 1), E1000_READ_REG(hw, E1000_FWSM)); + } + +out: + DEBUGOUT1("Failed to write receive address at index %d\n", index); + return -E1000_ERR_CONFIG; +} + +/** + * e1000_rar_set_pch_lpt - Set receive address registers + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address register array at index to the address passed + * in by addr. For LPT, RAR[0] is the base address register that is to + * contain the MAC address. SHRA[0-10] are the shared receive address + * registers that are shared between the Host and manageability engine (ME). + **/ +STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + u32 wlock_mac; + + DEBUGFUNC("e1000_rar_set_pch_lpt"); + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); + E1000_WRITE_FLUSH(hw); + return E1000_SUCCESS; + } + + /* The manageability engine (ME) can lock certain SHRAR registers that + * it is using - those registers are unavailable for use. + */ + if (index < hw->mac.rar_entry_count) { + wlock_mac = E1000_READ_REG(hw, E1000_FWSM) & + E1000_FWSM_WLOCK_MAC_MASK; + wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; + + /* Check if all SHRAR registers are locked */ + if (wlock_mac == 1) + goto out; + + if ((wlock_mac == 0) || (index <= wlock_mac)) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + + if (ret_val) + goto out; + + E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1), + rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1), + rar_high); + E1000_WRITE_FLUSH(hw); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) && + (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high)) + return E1000_SUCCESS; + } + } + +out: + DEBUGOUT1("Failed to write receive address at index %d\n", index); + return -E1000_ERR_CONFIG; +} + +#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT +/** + * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array of the PCH2 MAC and PHY. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, + u8 *mc_addr_list, + u32 mc_addr_count) +{ + u16 phy_reg = 0; + int i; + s32 ret_val; + + DEBUGFUNC("e1000_update_mc_addr_list_pch2lan"); + + e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) + goto release; + + for (i = 0; i < hw->mac.mta_reg_count; i++) { + hw->phy.ops.write_reg_page(hw, BM_MTA(i), + (u16)(hw->mac.mta_shadow[i] & + 0xFFFF)); + hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1), + (u16)((hw->mac.mta_shadow[i] >> 16) & + 0xFFFF)); + } + + e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + +release: + hw->phy.ops.release(hw); +} + +#endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */ +/** + * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Checks if firmware is blocking the reset of the PHY. + * This is a function pointer entry point only called by + * reset routines. + **/ +STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + bool blocked = false; + int i = 0; + + DEBUGFUNC("e1000_check_reset_block_ich8lan"); + + do { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) { + blocked = true; + msec_delay(10); + continue; + } + blocked = false; + } while (blocked && (i++ < 30)); + return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS; +} + +/** + * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states + * @hw: pointer to the HW structure + * + * Assumes semaphore already acquired. + * + **/ +STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw) +{ + u16 phy_data; + u32 strap = E1000_READ_REG(hw, E1000_STRAP); + u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> + E1000_STRAP_SMT_FREQ_SHIFT; + s32 ret_val; + + strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; + + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~HV_SMB_ADDR_MASK; + phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); + phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; + + if (hw->phy.type == e1000_phy_i217) { + /* Restore SMBus frequency */ + if (freq--) { + phy_data &= ~HV_SMB_ADDR_FREQ_MASK; + phy_data |= (freq & (1 << 0)) << + HV_SMB_ADDR_FREQ_LOW_SHIFT; + phy_data |= (freq & (1 << 1)) << + (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); + } else { + DEBUGOUT("Unsupported SMB frequency in PHY\n"); + } + } + + return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); +} + +/** + * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * + * SW should configure the LCD from the NVM extended configuration region + * as a workaround for certain parts. + **/ +STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; + s32 ret_val = E1000_SUCCESS; + u16 word_addr, reg_data, reg_addr, phy_page = 0; + + DEBUGFUNC("e1000_sw_lcd_config_ich8lan"); + + /* Initialize the PHY from the NVM on ICH platforms. This + * is needed due to an issue where the NVM configuration is + * not properly autoloaded after power transitions. + * Therefore, after each PHY reset, we will load the + * configuration data out of the NVM manually. + */ + switch (hw->mac.type) { + case e1000_ich8lan: + if (phy->type != e1000_phy_igp_3) + return ret_val; + + if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) || + (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) { + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + break; + } + /* Fall-thru */ + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; + break; + default: + return ret_val; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + data = E1000_READ_REG(hw, E1000_FEXTNVM); + if (!(data & sw_cfg_mask)) + goto release; + + /* Make sure HW does not configure LCD from PHY + * extended configuration before SW configuration + */ + data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + if ((hw->mac.type < e1000_pch2lan) && + (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) + goto release; + + cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE); + cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; + cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; + if (!cnf_size) + goto release; + + cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; + cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; + + if (((hw->mac.type == e1000_pchlan) && + !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || + (hw->mac.type > e1000_pchlan)) { + /* HW configures the SMBus address and LEDs when the + * OEM and LCD Write Enable bits are set in the NVM. + * When both NVM bits are cleared, SW will configure + * them instead. + */ + ret_val = e1000_write_smbus_addr(hw); + if (ret_val) + goto release; + + data = E1000_READ_REG(hw, E1000_LEDCTL); + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, + (u16)data); + if (ret_val) + goto release; + } + + /* Configure LCD from extended configuration region. */ + + /* cnf_base_addr is in DWORD */ + word_addr = (u16)(cnf_base_addr << 1); + + for (i = 0; i < cnf_size; i++) { + ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1, + ®_data); + if (ret_val) + goto release; + + ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1), + 1, ®_addr); + if (ret_val) + goto release; + + /* Save off the PHY page for future writes. */ + if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { + phy_page = reg_data; + continue; + } + + reg_addr &= PHY_REG_MASK; + reg_addr |= phy_page; + + ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, + reg_data); + if (ret_val) + goto release; + } + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_k1_gig_workaround_hv - K1 Si workaround + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * If K1 is enabled for 1Gbps, the MAC might stall when transitioning + * from a lower speed. This workaround disables K1 whenever link is at 1Gig + * If link is down, the function will restore the default K1 setting located + * in the NVM. + **/ +STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) +{ + s32 ret_val = E1000_SUCCESS; + u16 status_reg = 0; + bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; + + DEBUGFUNC("e1000_k1_gig_workaround_hv"); + + if (hw->mac.type != e1000_pchlan) + return E1000_SUCCESS; + + /* Wrap the whole flow with the sw flag */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ + if (link) { + if (hw->phy.type == e1000_phy_82578) { + ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); + + if (status_reg == (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + k1_enable = false; + } + + if (hw->phy.type == e1000_phy_82577) { + ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_MASK); + + if (status_reg == (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_1000)) + k1_enable = false; + } + + /* Link stall fix for link up */ + ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), + 0x0100); + if (ret_val) + goto release; + + } else { + /* Link stall fix for link down */ + ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), + 0x4100); + if (ret_val) + goto release; + } + + ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); + +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_configure_k1_ich8lan - Configure K1 power state + * @hw: pointer to the HW structure + * @enable: K1 state to configure + * + * Configure the K1 power state based on the provided parameter. + * Assumes semaphore already acquired. + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + **/ +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) +{ + s32 ret_val; + u32 ctrl_reg = 0; + u32 ctrl_ext = 0; + u32 reg = 0; + u16 kmrn_reg = 0; + + DEBUGFUNC("e1000_configure_k1_ich8lan"); + + ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); + if (ret_val) + return ret_val; + + if (k1_enable) + kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; + else + kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; + + ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); + if (ret_val) + return ret_val; + + usec_delay(20); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); + + reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + reg |= E1000_CTRL_FRCSPD; + E1000_WRITE_REG(hw, E1000_CTRL, reg); + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); + E1000_WRITE_FLUSH(hw); + usec_delay(20); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + usec_delay(20); + + return E1000_SUCCESS; +} + +/** + * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * @d0_state: boolean if entering d0 or d3 device state + * + * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are + * collectively called OEM bits. The OEM Write Enable bit and SW Config bit + * in NVM determines whether HW should configure LPLU and Gbe Disable. + **/ +STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) +{ + s32 ret_val = 0; + u32 mac_reg; + u16 oem_reg; + + DEBUGFUNC("e1000_oem_bits_config_ich8lan"); + + if (hw->mac.type < e1000_pchlan) + return ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pchlan) { + mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) + goto release; + } + + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM); + if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) + goto release; + + mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL); + + ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto release; + + oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); + + if (d0_state) { + if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) + oem_reg |= HV_OEM_BITS_LPLU; + } else { + if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_NOND0A_LPLU)) + oem_reg |= HV_OEM_BITS_LPLU; + } + + /* Set Restart auto-neg to activate the bits */ + if ((d0_state || (hw->mac.type != e1000_pchlan)) && + !hw->phy.ops.check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; + + ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); + +release: + hw->phy.ops.release(hw); + + return ret_val; +} + + +/** + * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_mdio_slow_mode_hv"); + + ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data |= HV_KMRN_MDIO_SLOW; + + ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data); + + return ret_val; +} + +/** + * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 phy_data; + + DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan"); + + if (hw->mac.type != e1000_pchlan) + return E1000_SUCCESS; + + /* Set MDIO slow mode before any other MDIO access */ + if (hw->phy.type == e1000_phy_82577) { + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + } + + if (((hw->phy.type == e1000_phy_82577) && + ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || + ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { + /* Disable generation of early preamble */ + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431); + if (ret_val) + return ret_val; + + /* Preamble tuning for SSC */ + ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, + 0xA204); + if (ret_val) + return ret_val; + } + + if (hw->phy.type == e1000_phy_82578) { + /* Return registers to default by doing a soft reset then + * writing 0x3140 to the control register. + */ + if (hw->phy.revision < 2) { + e1000_phy_sw_reset_generic(hw); + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, + 0x3140); + } + } + + /* Select page 0 */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + hw->phy.addr = 1; + ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); + hw->phy.ops.release(hw); + if (ret_val) + return ret_val; + + /* Configure the K1 Si workaround during phy reset assuming there is + * link so that it disables K1 if link is in 1Gbps. + */ + ret_val = e1000_k1_gig_workaround_hv(hw, true); + if (ret_val) + return ret_val; + + /* Workaround for link disconnects on a busy hub in half duplex */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); + if (ret_val) + goto release; + ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, + phy_data & 0x00FF); + if (ret_val) + goto release; + + /* set MSE higher to enable link to stay up when noise is high */ + ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY + * @hw: pointer to the HW structure + **/ +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) +{ + u32 mac_reg; + u16 i, phy_reg = 0; + s32 ret_val; + + DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) + goto release; + + /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ + for (i = 0; i < (hw->mac.rar_entry_count); i++) { + mac_reg = E1000_READ_REG(hw, E1000_RAL(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), + (u16)((mac_reg >> 16) & 0xFFFF)); + + mac_reg = E1000_READ_REG(hw, E1000_RAH(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), + (u16)((mac_reg & E1000_RAH_AV) + >> 16)); + } + + e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + +release: + hw->phy.ops.release(hw); +} + +#ifndef CRC32_OS_SUPPORT +STATIC u32 e1000_calc_rx_da_crc(u8 mac[]) +{ + u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */ + u32 i, j, mask, crc; + + DEBUGFUNC("e1000_calc_rx_da_crc"); + + crc = 0xffffffff; + for (i = 0; i < 6; i++) { + crc = crc ^ mac[i]; + for (j = 8; j > 0; j--) { + mask = (crc & 1) * (-1); + crc = (crc >> 1) ^ (poly & mask); + } + } + return ~crc; +} + +#endif /* CRC32_OS_SUPPORT */ +/** + * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation + * with 82579 PHY + * @hw: pointer to the HW structure + * @enable: flag to enable/disable workaround when enabling/disabling jumbos + **/ +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) +{ + s32 ret_val = E1000_SUCCESS; + u16 phy_reg, data; + u32 mac_reg; + u16 i; + + DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan"); + + if (hw->mac.type < e1000_pch2lan) + return E1000_SUCCESS; + + /* disable Rx path while enabling/disabling workaround */ + hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg); + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), + phy_reg | (1 << 14)); + if (ret_val) + return ret_val; + + if (enable) { + /* Write Rx addresses (rar_entry_count for RAL/H, and + * SHRAL/H) and initial CRC values to the MAC + */ + for (i = 0; i < hw->mac.rar_entry_count; i++) { + u8 mac_addr[ETH_ADDR_LEN] = {0}; + u32 addr_high, addr_low; + + addr_high = E1000_READ_REG(hw, E1000_RAH(i)); + if (!(addr_high & E1000_RAH_AV)) + continue; + addr_low = E1000_READ_REG(hw, E1000_RAL(i)); + mac_addr[0] = (addr_low & 0xFF); + mac_addr[1] = ((addr_low >> 8) & 0xFF); + mac_addr[2] = ((addr_low >> 16) & 0xFF); + mac_addr[3] = ((addr_low >> 24) & 0xFF); + mac_addr[4] = (addr_high & 0xFF); + mac_addr[5] = ((addr_high >> 8) & 0xFF); + +#ifndef CRC32_OS_SUPPORT + E1000_WRITE_REG(hw, E1000_PCH_RAICC(i), + e1000_calc_rx_da_crc(mac_addr)); +#else /* CRC32_OS_SUPPORT */ + E1000_WRITE_REG(hw, E1000_PCH_RAICC(i), + E1000_CRC32(ETH_ADDR_LEN, mac_addr)); +#endif /* CRC32_OS_SUPPORT */ + } + + /* Write Rx addresses to the PHY */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + /* Enable jumbo frame workaround in the MAC */ + mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG); + mac_reg &= ~(1 << 14); + mac_reg |= (7 << 15); + E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg); + + mac_reg = E1000_READ_REG(hw, E1000_RCTL); + mac_reg |= E1000_RCTL_SECRC; + E1000_WRITE_REG(hw, E1000_RCTL, mac_reg); + + ret_val = e1000_read_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + return ret_val; + ret_val = e1000_write_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data | (1 << 0)); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + return ret_val; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000_write_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + return ret_val; + + /* Enable jumbo frame workaround in the PHY */ + hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + data |= (0x37 << 5); + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data); + if (ret_val) + return ret_val; + hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data); + data &= ~(1 << 13); + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data); + if (ret_val) + return ret_val; + hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (E1000_TX_PTR_GAP << 2); + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100); + if (ret_val) + return ret_val; + hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data); + ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | + (1 << 10)); + if (ret_val) + return ret_val; + } else { + /* Write MAC register values back to h/w defaults */ + mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG); + mac_reg &= ~(0xF << 14); + E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg); + + mac_reg = E1000_READ_REG(hw, E1000_RCTL); + mac_reg &= ~E1000_RCTL_SECRC; + E1000_WRITE_REG(hw, E1000_RCTL, mac_reg); + + ret_val = e1000_read_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + return ret_val; + ret_val = e1000_write_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data & ~(1 << 0)); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + return ret_val; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000_write_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + return ret_val; + + /* Write PHY register values back to h/w defaults */ + hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data); + if (ret_val) + return ret_val; + hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data); + data |= (1 << 13); + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data); + if (ret_val) + return ret_val; + hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (0x8 << 2); + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00); + if (ret_val) + return ret_val; + hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data); + ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & + ~(1 << 10)); + if (ret_val) + return ret_val; + } + + /* re-enable Rx path after enabling/disabling workaround */ + return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & + ~(1 << 14)); +} + +/** + * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan"); + + if (hw->mac.type != e1000_pch2lan) + return E1000_SUCCESS; + + /* Set MDIO slow mode before any other MDIO access */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + /* set MSE higher to enable link to stay up when noise is high */ + ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034); + if (ret_val) + goto release; + /* drop link after 5 times MSE threshold was reached */ + ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_k1_gig_workaround_lv - K1 Si workaround + * @hw: pointer to the HW structure + * + * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps + * Disable K1 for 1000 and 100 speeds + **/ +STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 status_reg = 0; + + DEBUGFUNC("e1000_k1_workaround_lv"); + + if (hw->mac.type != e1000_pch2lan) + return E1000_SUCCESS; + + /* Set K1 beacon duration based on 10Mbs speed */ + ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg); + if (ret_val) + return ret_val; + + if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) + == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { + if (status_reg & + (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { + u16 pm_phy_reg; + + /* LV 1G/100 Packet drop issue wa */ + ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL, + &pm_phy_reg); + if (ret_val) + return ret_val; + pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE; + ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, + pm_phy_reg); + if (ret_val) + return ret_val; + } else { + u32 mac_reg; + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; + E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg); + } + } + + return ret_val; +} + +/** + * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware + * @hw: pointer to the HW structure + * @gate: boolean set to true to gate, false to ungate + * + * Gate/ungate the automatic PHY configuration via hardware; perform + * the configuration via software instead. + **/ +STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) +{ + u32 extcnf_ctrl; + + DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan"); + + if (hw->mac.type < e1000_pch2lan) + return; + + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + + if (gate) + extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; + else + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; + + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); +} + +/** + * e1000_lan_init_done_ich8lan - Check for PHY config completion + * @hw: pointer to the HW structure + * + * Check the appropriate indication the MAC has finished configuring the + * PHY after a software reset. + **/ +STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) +{ + u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; + + DEBUGFUNC("e1000_lan_init_done_ich8lan"); + + /* Wait for basic configuration completes before proceeding */ + do { + data = E1000_READ_REG(hw, E1000_STATUS); + data &= E1000_STATUS_LAN_INIT_DONE; + usec_delay(100); + } while ((!data) && --loop); + + /* If basic configuration is incomplete before the above loop + * count reaches 0, loading the configuration from NVM will + * leave the PHY in a bad state possibly resulting in no link. + */ + if (loop == 0) + DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n"); + + /* Clear the Init Done bit for the next init event */ + data = E1000_READ_REG(hw, E1000_STATUS); + data &= ~E1000_STATUS_LAN_INIT_DONE; + E1000_WRITE_REG(hw, E1000_STATUS, data); +} + +/** + * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 reg; + + DEBUGFUNC("e1000_post_phy_reset_ich8lan"); + + if (hw->phy.ops.check_reset_block(hw)) + return E1000_SUCCESS; + + /* Allow time for h/w to get to quiescent state after reset */ + msec_delay(10); + + /* Perform any necessary post-reset workarounds */ + switch (hw->mac.type) { + case e1000_pchlan: + ret_val = e1000_hv_phy_workarounds_ich8lan(hw); + if (ret_val) + return ret_val; + break; + case e1000_pch2lan: + ret_val = e1000_lv_phy_workarounds_ich8lan(hw); + if (ret_val) + return ret_val; + break; + default: + break; + } + + /* Clear the host wakeup bit after lcd reset */ + if (hw->mac.type >= e1000_pchlan) { + hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®); + reg &= ~BM_WUC_HOST_WU_BIT; + hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg); + } + + /* Configure the LCD with the extended configuration region in NVM */ + ret_val = e1000_sw_lcd_config_ich8lan(hw); + if (ret_val) + return ret_val; + + /* Configure the LCD with the OEM bits in NVM */ + ret_val = e1000_oem_bits_config_ich8lan(hw, true); + + if (hw->mac.type == e1000_pch2lan) { + /* Ungate automatic PHY configuration on non-managed 82579 */ + if (!(E1000_READ_REG(hw, E1000_FWSM) & + E1000_ICH_FWSM_FW_VALID)) { + msec_delay(10); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + /* Set EEE LPI Update Timer to 200usec */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = e1000_write_emi_reg_locked(hw, + I82579_LPI_UPDATE_TIMER, + 0x1387); + hw->phy.ops.release(hw); + } + + return ret_val; +} + +/** + * e1000_phy_hw_reset_ich8lan - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY + * This is a function pointer entry point called by drivers + * or other shared routines. + **/ +STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_phy_hw_reset_ich8lan"); + + /* Gate automatic PHY configuration by hardware on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + + ret_val = e1000_phy_hw_reset_generic(hw); + if (ret_val) + return ret_val; + + return e1000_post_phy_reset_ich8lan(hw); +} + +/** + * e1000_set_lplu_state_pchlan - Set Low Power Link Up state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU state according to the active flag. For PCH, if OEM write + * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set + * the phy speed. This function will manually set the LPLU bit and restart + * auto-neg as hw would do. D3 and D0 LPLU will call the same function + * since it configures the same bit. + **/ +STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 oem_reg; + + DEBUGFUNC("e1000_set_lplu_state_pchlan"); + ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + return ret_val; + + if (active) + oem_reg |= HV_OEM_BITS_LPLU; + else + oem_reg &= ~HV_OEM_BITS_LPLU; + + if (!hw->phy.ops.check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; + + return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg); +} + +/** + * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan"); + + if (phy->type == e1000_phy_ife) + return E1000_SUCCESS; + + phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); + + if (active) { + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return E1000_SUCCESS; + + /* Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else { + phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return E1000_SUCCESS; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D3 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan"); + + phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); + + if (!active) { + phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return E1000_SUCCESS; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return E1000_SUCCESS; + + /* Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + } + + return ret_val; +} + +/** + * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 + * @hw: pointer to the HW structure + * @bank: pointer to the variable that returns the active bank + * + * Reads signature byte from the NVM using the flash access registers. + * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. + **/ +STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) +{ + u32 eecd; + struct e1000_nvm_info *nvm = &hw->nvm; + u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); + u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; + u32 nvm_dword = 0; + u8 sig_byte = 0; + s32 ret_val; + + DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan"); + + switch (hw->mac.type) { + case e1000_ich8lan: + case e1000_ich9lan: + eecd = E1000_READ_REG(hw, E1000_EECD); + if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == + E1000_EECD_SEC1VAL_VALID_MASK) { + if (eecd & E1000_EECD_SEC1VAL) + *bank = 1; + else + *bank = 0; + + return E1000_SUCCESS; + } + DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n"); + /* fall-thru */ + default: + /* set bank to 0 in case flash read fails */ + *bank = 0; + + /* Check bank 0 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 0; + return E1000_SUCCESS; + } + + /* Check bank 1 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + + bank1_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 1; + return E1000_SUCCESS; + } + + DEBUGOUT("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + } +} + +/** + * e1000_read_nvm_ich8lan - Read word(s) from the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words + * @data: Pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM using the flash access registers. + **/ +STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = E1000_SUCCESS; + u32 bank = 0; + u16 i, word; + + DEBUGFUNC("e1000_read_nvm_ich8lan"); + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = E1000_SUCCESS; + for (i = 0; i < words; i++) { + if (dev_spec->shadow_ram[offset+i].modified) { + data[i] = dev_spec->shadow_ram[offset+i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, + act_offset + i, + &word); + if (ret_val) + break; + data[i] = word; + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + DEBUGOUT1("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_flash_cycle_init_ich8lan - Initialize flash + * @hw: pointer to the HW structure + * + * This function does initial flash setup so that a new read/write/erase cycle + * can be started. + **/ +STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) +{ + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + + DEBUGFUNC("e1000_flash_cycle_init_ich8lan"); + + hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); + + /* Check if the flash descriptor is valid */ + if (!hsfsts.hsf_status.fldesvalid) { + DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n"); + return -E1000_ERR_NVM; + } + + /* Clear FCERR and DAEL in hw status by writing 1 */ + hsfsts.hsf_status.flcerr = 1; + hsfsts.hsf_status.dael = 1; + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); + + /* Either we should have a hardware SPI cycle in progress + * bit to check against, in order to start a new cycle or + * FDONE bit should be changed in the hardware so that it + * is 1 after hardware reset, which can then be used as an + * indication whether a cycle is in progress or has been + * completed. + */ + + if (!hsfsts.hsf_status.flcinprog) { + /* There is no cycle running at present, + * so we can start a cycle. + * Begin by setting Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); + ret_val = E1000_SUCCESS; + } else { + s32 i; + + /* Otherwise poll for sometime so the current + * cycle has a chance to end before giving up. + */ + for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { + hsfsts.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFSTS); + if (!hsfsts.hsf_status.flcinprog) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(1); + } + if (ret_val == E1000_SUCCESS) { + /* Successful in waiting for previous cycle to timeout, + * now set the Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, + hsfsts.regval); + } else { + DEBUGOUT("Flash controller busy, cannot get access\n"); + } + } + + return ret_val; +} + +/** + * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) + * @hw: pointer to the HW structure + * @timeout: maximum time to wait for completion + * + * This function starts a flash cycle and waits for its completion. + **/ +STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) +{ + union ich8_hws_flash_ctrl hsflctl; + union ich8_hws_flash_status hsfsts; + u32 i = 0; + + DEBUGFUNC("e1000_flash_cycle_ich8lan"); + + /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ + hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcgo = 1; + + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); + + /* wait till FDONE bit is set to 1 */ + do { + hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcdone) + break; + usec_delay(1); + } while (i++ < timeout); + + if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) + return E1000_SUCCESS; + + return -E1000_ERR_NVM; +} + +/** + * e1000_read_flash_word_ich8lan - Read word from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash word at offset into data. Offset is converted + * to bytes before read. + **/ +STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + DEBUGFUNC("e1000_read_flash_word_ich8lan"); + + if (!data) + return -E1000_ERR_NVM; + + /* Must convert offset into bytes. */ + offset <<= 1; + + return e1000_read_flash_data_ich8lan(hw, offset, 2, data); +} + +/** + * e1000_read_flash_byte_ich8lan - Read byte from flash + * @hw: pointer to the HW structure + * @offset: The offset of the byte to read. + * @data: Pointer to a byte to store the value read. + * + * Reads a single byte from the NVM using the flash access registers. + **/ +STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 *data) +{ + s32 ret_val; + u16 word = 0; + + ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + + if (ret_val) + return ret_val; + + *data = (u8)word; + + return E1000_SUCCESS; +} + +/** + * e1000_read_flash_data_ich8lan - Read byte or word from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte or word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: Pointer to the word to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ +STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + DEBUGFUNC("e1000_read_flash_data_ich8lan"); + + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + usec_delay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != E1000_SUCCESS) + break; + hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (ret_val == E1000_SUCCESS) { + flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0); + if (size == 1) + *data = (u8)(flash_data & 0x000000FF); + else if (size == 2) + *data = (u16)(flash_data & 0x0000FFFF); + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) { + /* Repeat for some time before giving up. */ + continue; + } else if (!hsfsts.hsf_status.flcdone) { + DEBUGOUT("Timeout error - flash cycle did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + + +/** + * e1000_write_nvm_ich8lan - Write word(s) to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to write. + * @words: Size of data to write in words + * @data: Pointer to the word(s) to write at offset. + * + * Writes a byte or word to the NVM using the flash access registers. + **/ +STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 i; + + DEBUGFUNC("e1000_write_nvm_ich8lan"); + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + nvm->ops.acquire(hw); + + for (i = 0; i < words; i++) { + dev_spec->shadow_ram[offset+i].modified = true; + dev_spec->shadow_ram[offset+i].value = data[i]; + } + + nvm->ops.release(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u16 data = 0; + + DEBUGFUNC("e1000_update_nvm_checksum_ich8lan"); + + ret_val = e1000_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { + if (dev_spec->shadow_ram[i].modified) { + data = dev_spec->shadow_ram[i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, i + + old_bank_offset, + &data); + if (ret_val) + break; + } + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD) + data |= E1000_ICH_NVM_SIG_MASK; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + usec_delay(100); + + /* Write the bytes to the new bank. */ + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset, + (u8)data); + if (ret_val) + break; + + usec_delay(100); + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset + 1, + (u8)(data >> 8)); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + DEBUGOUT("Flash commit failed.\n"); + goto release; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); + if (ret_val) + goto release; + + data &= 0xBFFF; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1, + (u8)(data >> 8)); + if (ret_val) + goto release; + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); + + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + nvm->ops.reload(hw); + msec_delay(10); + } + +out: + if (ret_val) + DEBUGOUT1("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. + * If the bit is 0, that the EEPROM had been modified, but the checksum was not + * calculated, in which case we need to calculate the checksum and set bit 6. + **/ +STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + u16 word; + u16 valid_csum_mask; + + DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan"); + + /* Read NVM and check Invalid Image CSUM bit. If this bit is 0, + * the checksum needs to be fixed. This bit is an indication that + * the NVM was prepared by OEM software and did not calculate + * the checksum...a likely scenario. + */ + switch (hw->mac.type) { + case e1000_pch_lpt: + word = NVM_COMPAT; + valid_csum_mask = NVM_COMPAT_VALID_CSUM; + break; + default: + word = NVM_FUTURE_INIT_WORD1; + valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM; + break; + } + + ret_val = hw->nvm.ops.read(hw, word, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & valid_csum_mask)) { + data |= valid_csum_mask; + ret_val = hw->nvm.ops.write(hw, word, 1, &data); + if (ret_val) + return ret_val; + ret_val = hw->nvm.ops.update(hw); + if (ret_val) + return ret_val; + } + + return e1000_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_flash_data_ich8lan - Writes bytes to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte/word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: The byte(s) to write to the NVM. + * + * Writes one/two bytes to the NVM using the flash access registers. + **/ +STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val; + u8 count = 0; + + DEBUGFUNC("e1000_write_ich8_data"); + + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + usec_delay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != E1000_SUCCESS) + break; + hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); + + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); + + if (size == 1) + flash_data = (u32)data & 0x00FF; + else + flash_data = (u32)data; + + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + if (ret_val == E1000_SUCCESS) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) + /* Repeat for some time before giving up. */ + continue; + if (!hsfsts.hsf_status.flcdone) { + DEBUGOUT("Timeout error - flash cycle did not complete.\n"); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + + +/** + * e1000_write_flash_byte_ich8lan - Write a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The index of the byte to read. + * @data: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + **/ +STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 data) +{ + u16 word = (u16)data; + + DEBUGFUNC("e1000_write_flash_byte_ich8lan"); + + return e1000_write_flash_data_ich8lan(hw, offset, 1, word); +} + + + +/** + * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The offset of the byte to write. + * @byte: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + * Goes through a retry algorithm before giving up. + **/ +STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte) +{ + s32 ret_val; + u16 program_retries; + + DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan"); + + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + return ret_val; + + for (program_retries = 0; program_retries < 100; program_retries++) { + DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset); + usec_delay(100); + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (ret_val == E1000_SUCCESS) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return E1000_SUCCESS; +} + +/** + * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM + * @hw: pointer to the HW structure + * @bank: 0 for first bank, 1 for second bank, etc. + * + * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. + * bank N is 4096 * N + flash_reg_addr. + **/ +STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + /* bank size is in 16bit words - adjust to bytes */ + u32 flash_bank_size = nvm->flash_bank_size * 2; + s32 ret_val; + s32 count = 0; + s32 j, iteration, sector_size; + + DEBUGFUNC("e1000_erase_flash_bank_ich8lan"); + + hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); + + /* Determine HW Sector size: Read BERASE bits of hw flash status + * register + * 00: The Hw sector is 256 bytes, hence we need to erase 16 + * consecutive sectors. The start index for the nth Hw sector + * can be calculated as = bank * 4096 + n * 256 + * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. + * The start index for the nth Hw sector can be calculated + * as = bank * 4096 + * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 + * (ich9 only, otherwise error condition) + * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 + */ + switch (hsfsts.hsf_status.berasesz) { + case 0: + /* Hw sector size 256 */ + sector_size = ICH_FLASH_SEG_SIZE_256; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; + break; + case 1: + sector_size = ICH_FLASH_SEG_SIZE_4K; + iteration = 1; + break; + case 2: + sector_size = ICH_FLASH_SEG_SIZE_8K; + iteration = 1; + break; + case 3: + sector_size = ICH_FLASH_SEG_SIZE_64K; + iteration = 1; + break; + default: + return -E1000_ERR_NVM; + } + + /* Start with the base address, then add the sector offset. */ + flash_linear_addr = hw->nvm.flash_base_addr; + flash_linear_addr += (bank) ? flash_bank_size : 0; + + for (j = 0; j < iteration; j++) { + do { + u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; + + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + return ret_val; + + /* Write a value 11 (block Erase) in Flash + * Cycle field in hw flash control + */ + hsflctl.regval = + E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, + hsflctl.regval); + + /* Write the last 24 bits of an index within the + * block into Flash Linear address field in Flash + * Address. + */ + flash_linear_addr += (j * sector_size); + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, + flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, timeout); + if (ret_val == E1000_SUCCESS) + break; + + /* Check if FCERR is set to 1. If 1, + * clear it and try the whole sequence + * a few more times else Done + */ + hsfsts.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) + /* repeat for some time before giving up */ + continue; + else if (!hsfsts.hsf_status.flcdone) + return ret_val; + } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); + } + + return E1000_SUCCESS; +} + +/** + * e1000_valid_led_default_ich8lan - Set the default LED settings + * @hw: pointer to the HW structure + * @data: Pointer to the LED settings + * + * Reads the LED default settings from the NVM to data. If the NVM LED + * settings is all 0's or F's, set the LED default to a valid LED default + * setting. + **/ +STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_ich8lan"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT_ICH8LAN; + + return E1000_SUCCESS; +} + +/** + * e1000_id_led_init_pchlan - store LED configurations + * @hw: pointer to the HW structure + * + * PCH does not control LEDs via the LEDCTL register, rather it uses + * the PHY LED configuration register. + * + * PCH also does not have an "always on" or "always off" mode which + * complicates the ID feature. Instead of using the "on" mode to indicate + * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()), + * use "link_up" mode. The LEDs will still ID on request if there is no + * link based on logic in e1000_led_[on|off]_pchlan(). + **/ +STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; + const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; + u16 data, i, temp, shift; + + DEBUGFUNC("e1000_id_led_init_pchlan"); + + /* Get default ID LED modes */ + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; + shift = (i * 5); + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_on << shift); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_on << shift); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_bus_info_ich8lan - Get/Set the bus type and width + * @hw: pointer to the HW structure + * + * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability + * register, so the the bus width is hard coded. + **/ +STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + + DEBUGFUNC("e1000_get_bus_info_ich8lan"); + + ret_val = e1000_get_bus_info_pcie_generic(hw); + + /* ICH devices are "PCI Express"-ish. They have + * a configuration space, but do not contain + * PCI Express Capability registers, so bus width + * must be hardcoded. + */ + if (bus->width == e1000_bus_width_unknown) + bus->width = e1000_bus_width_pcie_x1; + + return ret_val; +} + +/** + * e1000_reset_hw_ich8lan - Reset the hardware + * @hw: pointer to the HW structure + * + * Does a full reset of the hardware which includes a reset of the PHY and + * MAC. + **/ +STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 kum_cfg; + u32 ctrl, reg; + s32 ret_val; + + DEBUGFUNC("e1000_reset_hw_ich8lan"); + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC + * with the global reset. + */ + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + /* Workaround for ICH8 bit corruption issue in FIFO memory */ + if (hw->mac.type == e1000_ich8lan) { + /* Set Tx and Rx buffer allocation to 8k apiece. */ + E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K); + /* Set Packet Buffer Size to 16k. */ + E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K); + } + + if (hw->mac.type == e1000_pchlan) { + /* Save the NVM K1 bit setting*/ + ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); + if (ret_val) + return ret_val; + + if (kum_cfg & E1000_NVM_K1_ENABLE) + dev_spec->nvm_k1_enabled = true; + else + dev_spec->nvm_k1_enabled = false; + } + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + if (!hw->phy.ops.check_reset_block(hw)) { + /* Full-chip reset requires MAC and PHY reset at the same + * time to make sure the interface between MAC and the + * external PHY is reset. + */ + ctrl |= E1000_CTRL_PHY_RST; + + /* Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if ((hw->mac.type == e1000_pch2lan) && + !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + } + ret_val = e1000_acquire_swflag_ich8lan(hw); + DEBUGOUT("Issuing a global reset to ich8lan\n"); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST)); + /* cannot issue a flush here because it hangs the hardware */ + msec_delay(20); + + /* Set Phy Config Counter to 50msec */ + if (hw->mac.type == e1000_pch2lan) { + reg = E1000_READ_REG(hw, E1000_FEXTNVM3); + reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg); + } + + if (!ret_val) + E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); + + if (ctrl & E1000_CTRL_PHY_RST) { + ret_val = hw->phy.ops.get_cfg_done(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_post_phy_reset_ich8lan(hw); + if (ret_val) + return ret_val; + } + + /* For PCH, this write will make sure that any noise + * will be detected as a CRC error and be dropped rather than show up + * as a bad packet to the DMA engine. + */ + if (hw->mac.type == e1000_pchlan) + E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565); + + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + reg = E1000_READ_REG(hw, E1000_KABGTXD); + reg |= E1000_KABGTXD_BGSQLBIAS; + E1000_WRITE_REG(hw, E1000_KABGTXD, reg); + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw_ich8lan - Initialize the hardware + * @hw: pointer to the HW structure + * + * Prepares the hardware for transmit and receive by doing the following: + * - initialize hardware bits + * - initialize LED identification + * - setup receive address registers + * - setup flow control + * - setup transmit descriptors + * - clear statistics + **/ +STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl_ext, txdctl, snoop; + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_init_hw_ich8lan"); + + e1000_initialize_hw_bits_ich8lan(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + DEBUGOUT("Error initializing identification LED\n"); + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* The 82578 Rx buffer will stall if wakeup is enabled in host and + * the ME. Disable wakeup by clearing the host wakeup bit. + * Reset the phy after disabling host wakeup to reset the Rx buffer. + */ + if (hw->phy.type == e1000_phy_82578) { + hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i); + i &= ~BM_WUC_HOST_WU_BIT; + hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i); + ret_val = e1000_phy_hw_reset_ich8lan(hw); + if (ret_val) + return ret_val; + } + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* Set the transmit descriptor write-back policy for both queues */ + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1)); + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl); + + /* ICH8 has opposite polarity of no_snoop bits. + * By default, we should use snoop behavior. + */ + if (mac->type == e1000_ich8lan) + snoop = PCIE_ICH8_SNOOP_ALL; + else + snoop = (u32) ~(PCIE_NO_SNOOP_ALL); + e1000_set_pcie_no_snoop_generic(hw, snoop); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_ich8lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits + * @hw: pointer to the HW structure + * + * Sets/Clears required hardware bits necessary for correctly setting up the + * hardware for transmit and receive. + **/ +STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_initialize_hw_bits_ich8lan"); + + /* Extended Device Control */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg |= (1 << 22); + /* Enable PHY low-power state when MAC is at D3 w/o WoL */ + if (hw->mac.type >= e1000_pchlan) + reg |= E1000_CTRL_EXT_PHYPDEN; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* Transmit Descriptor Control 0 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = E1000_READ_REG(hw, E1000_TARC(0)); + if (hw->mac.type == e1000_ich8lan) + reg |= (1 << 28) | (1 << 29); + reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); + E1000_WRITE_REG(hw, E1000_TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = E1000_READ_REG(hw, E1000_TARC(1)); + if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + reg |= (1 << 24) | (1 << 26) | (1 << 30); + E1000_WRITE_REG(hw, E1000_TARC(1), reg); + + /* Device Status */ + if (hw->mac.type == e1000_ich8lan) { + reg = E1000_READ_REG(hw, E1000_STATUS); + reg &= ~(1 << 31); + E1000_WRITE_REG(hw, E1000_STATUS, reg); + } + + /* work-around descriptor data corruption issue during nfs v2 udp + * traffic, just disable the nfs filtering capability + */ + reg = E1000_READ_REG(hw, E1000_RFCTL); + reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type == e1000_ich8lan) + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + E1000_WRITE_REG(hw, E1000_RFCTL, reg); + + /* Enable ECC on Lynxpoint */ + if (hw->mac.type == e1000_pch_lpt) { + reg = E1000_READ_REG(hw, E1000_PBECCSTS); + reg |= E1000_PBECCSTS_ECC_ENABLE; + E1000_WRITE_REG(hw, E1000_PBECCSTS, reg); + + reg = E1000_READ_REG(hw, E1000_CTRL); + reg |= E1000_CTRL_MEHE; + E1000_WRITE_REG(hw, E1000_CTRL, reg); + } + + return; +} + +/** + * e1000_setup_link_ich8lan - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_setup_link_ich8lan"); + + if (hw->phy.ops.check_reset_block(hw)) + return E1000_SUCCESS; + + /* ICH parts do not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + if (hw->fc.requested_mode == e1000_fc_default) + hw->fc.requested_mode = e1000_fc_full; + + /* Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Continue to configure the copper link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || + (hw->phy.type == e1000_phy_82577)) { + E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time); + + ret_val = hw->phy.ops.write_reg(hw, + PHY_REG(BM_PORT_CTRL_PAGE, 27), + hw->fc.pause_time); + if (ret_val) + return ret_val; + } + + return e1000_set_fc_watermarks_generic(hw); +} + +/** + * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Configures the kumeran interface to the PHY to wait the appropriate time + * when polling the PHY, then call the generic setup_copper_link to finish + * configuring the copper link. + **/ +STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + DEBUGFUNC("e1000_setup_copper_link_ich8lan"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Set the mac to wait the maximum time between each iteration + * and increase the max iterations when polling the phy; + * this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS, + 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000_write_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + reg_data); + if (ret_val) + return ret_val; + + switch (hw->phy.type) { + case e1000_phy_igp_3: + ret_val = e1000_copper_link_setup_igp(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_bm: + case e1000_phy_82578: + ret_val = e1000_copper_link_setup_m88(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_82577: + case e1000_phy_82579: + ret_val = e1000_copper_link_setup_82577(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_ife: + ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, + ®_data); + if (ret_val) + return ret_val; + + reg_data &= ~IFE_PMC_AUTO_MDIX; + + switch (hw->phy.mdix) { + case 1: + reg_data &= ~IFE_PMC_FORCE_MDIX; + break; + case 2: + reg_data |= IFE_PMC_FORCE_MDIX; + break; + case 0: + default: + reg_data |= IFE_PMC_AUTO_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, + reg_data); + if (ret_val) + return ret_val; + break; + default: + break; + } + + return e1000_setup_copper_link_generic(hw); +} + +/** + * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Calls the PHY specific link setup function and then calls the + * generic setup_copper_link to finish configuring the link for + * Lynxpoint PCH devices + **/ +STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_setup_copper_link_pch_lpt"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + ret_val = e1000_copper_link_setup_82577(hw); + if (ret_val) + return ret_val; + + return e1000_setup_copper_link_generic(hw); +} + +/** + * e1000_get_link_up_info_ich8lan - Get current link speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to store current link speed + * @duplex: pointer to store the current link duplex + * + * Calls the generic get_speed_and_duplex to retrieve the current link + * information and then calls the Kumeran lock loss workaround for links at + * gigabit speeds. + **/ +STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_link_up_info_ich8lan"); + + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); + if (ret_val) + return ret_val; + + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3) && + (*speed == SPEED_1000)) { + ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); + } + + return ret_val; +} + +/** + * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround + * @hw: pointer to the HW structure + * + * Work-around for 82566 Kumeran PCS lock loss: + * On link status change (i.e. PCI reset, speed change) and link is up and + * speed is gigabit- + * 0) if workaround is optionally disabled do nothing + * 1) wait 1ms for Kumeran link to come up + * 2) check Kumeran Diagnostic register PCS lock loss bit + * 3) if not set the link is locked (all is good), otherwise... + * 4) reset the PHY + * 5) repeat up to 10 times + * Note: this is only called for IGP3 copper when speed is 1gb. + **/ +STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + u16 i, data; + bool link; + + DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan"); + + if (!dev_spec->kmrn_lock_loss_workaround_enabled) + return E1000_SUCCESS; + + /* Make sure link is up before proceeding. If not just return. + * Attempting this while link is negotiating fouled up link + * stability + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (!link) + return E1000_SUCCESS; + + for (i = 0; i < 10; i++) { + /* read once to clear */ + ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + /* and again to get new status */ + ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + + /* check for PCS lock */ + if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) + return E1000_SUCCESS; + + /* Issue PHY reset */ + hw->phy.ops.reset(hw); + msec_delay_irq(5); + } + /* Disable GigE link negotiation */ + phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); + phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + /* Call gig speed drop workaround on Gig disable before accessing + * any PHY registers + */ + e1000_gig_downshift_workaround_ich8lan(hw); + + /* unable to acquire PCS lock */ + return -E1000_ERR_PHY; +} + +/** + * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state + * @hw: pointer to the HW structure + * @state: boolean value used to set the current Kumeran workaround state + * + * If ICH8, set the current Kumeran workaround state (enabled - true + * /disabled - false). + **/ +void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + + DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan"); + + if (hw->mac.type != e1000_ich8lan) { + DEBUGOUT("Workaround applies to ICH8 only.\n"); + return; + } + + dev_spec->kmrn_lock_loss_workaround_enabled = state; + + return; +} + +/** + * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 + * @hw: pointer to the HW structure + * + * Workaround for 82566 power-down on D3 entry: + * 1) disable gigabit link + * 2) write VR power-down enable + * 3) read it back + * Continue if successful, else issue LCD reset and repeat + **/ +void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + u16 data; + u8 retry = 0; + + DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan"); + + if (hw->phy.type != e1000_phy_igp_3) + return; + + /* Try the workaround twice (if needed) */ + do { + /* Disable link */ + reg = E1000_READ_REG(hw, E1000_PHY_CTRL); + reg |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg); + + /* Call gig speed drop workaround on Gig disable before + * accessing any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000_gig_downshift_workaround_ich8lan(hw); + + /* Write VR power-down enable */ + hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); + data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + hw->phy.ops.write_reg(hw, IGP3_VR_CTRL, + data | IGP3_VR_CTRL_MODE_SHUTDOWN); + + /* Read it back and test */ + hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); + data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) + break; + + /* Issue PHY reset and repeat at most one more time */ + reg = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST); + retry++; + } while (retry); +} + +/** + * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working + * @hw: pointer to the HW structure + * + * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), + * LPLU, Gig disable, MDIC PHY reset): + * 1) Set Kumeran Near-end loopback + * 2) Clear Kumeran Near-end loopback + * Should only be called for ICH8[m] devices with any 1G Phy. + **/ +void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data; + + DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan"); + + if ((hw->mac.type != e1000_ich8lan) || + (hw->phy.type == e1000_phy_ife)) + return; + + ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + ®_data); + if (ret_val) + return; + reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000_write_kmrn_reg_generic(hw, + E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); + if (ret_val) + return; + reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; + e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); +} + +/** + * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx + * @hw: pointer to the HW structure + * + * During S0 to Sx transition, it is possible the link remains at gig + * instead of negotiating to a lower speed. Before going to Sx, set + * 'Gig Disable' to force link speed negotiation to a lower speed based on + * the LPLU setting in the NVM or custom setting. For PCH and newer parts, + * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also + * needs to be written. + * Parts that support (and are linked to a partner which support) EEE in + * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power + * than 10Mbps w/o EEE. + **/ +void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_suspend_workarounds_ich8lan"); + + phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); + phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; + + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg, device_id = hw->device_id; + + if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || + (device_id == E1000_DEV_ID_PCH_I218_LM3) || + (device_id == E1000_DEV_ID_PCH_I218_V3)) { + u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); + + E1000_WRITE_REG(hw, E1000_FEXTNVM6, + fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (!dev_spec->eee_disable) { + u16 eee_advert; + + ret_val = + e1000_read_emi_reg_locked(hw, + I217_EEE_ADVERTISEMENT, + &eee_advert); + if (ret_val) + goto release; + + /* Disable LPLU if both link partners support 100BaseT + * EEE and 100Full is advertised on both ends of the + * link, and enable Auto Enable LPI since there will + * be no driver to enable LPI while in Sx. + */ + if ((eee_advert & I82579_EEE_100_SUPPORTED) && + (dev_spec->eee_lp_ability & + I82579_EEE_100_SUPPORTED) && + (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) { + phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_NOND0A_LPLU); + + /* Set Auto Enable LPI after link up */ + hw->phy.ops.read_reg_locked(hw, + I217_LPI_GPIO_CTRL, + &phy_reg); + phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI; + hw->phy.ops.write_reg_locked(hw, + I217_LPI_GPIO_CTRL, + phy_reg); + } + } + + /* For i217 Intel Rapid Start Technology support, + * when the system is going into Sx and no manageability engine + * is present, the driver must configure proxy to reset only on + * power good. LPI (Low Power Idle) state must also reset only + * on power good, as well as the MTA (Multicast table array). + * The SMBus release must also be disabled on LCD reset. + */ + if (!(E1000_READ_REG(hw, E1000_FWSM) & + E1000_ICH_FWSM_FW_VALID)) { + /* Enable proxy to reset only on power good. */ + hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL, + &phy_reg); + phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; + hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, + phy_reg); + + /* Set bit enable LPI (EEE) to reset only on + * power good. + */ + hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg); + phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; + hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg); + + /* Disable the SMB release on LCD reset. */ + hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg); + phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; + hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg); + } + + /* Enable MTA to reset for Intel Rapid Start Technology + * Support + */ + hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg); + phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; + hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg); + +release: + hw->phy.ops.release(hw); + } +out: + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + if (hw->mac.type == e1000_ich8lan) + e1000_gig_downshift_workaround_ich8lan(hw); + + if (hw->mac.type >= e1000_pchlan) { + e1000_oem_bits_config_ich8lan(hw, false); + + /* Reset PHY to activate OEM bits on 82577/8 */ + if (hw->mac.type == e1000_pchlan) + e1000_phy_hw_reset_generic(hw); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + e1000_write_smbus_addr(hw); + hw->phy.ops.release(hw); + } + + return; +} + +/** + * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 + * @hw: pointer to the HW structure + * + * During Sx to S0 transitions on non-managed devices or managed devices + * on which PHY resets are not blocked, if the PHY registers cannot be + * accessed properly by the s/w toggle the LANPHYPC value to power cycle + * the PHY. + * On i217, setup Intel Rapid Start Technology. + **/ +u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_resume_workarounds_pchlan"); + if (hw->mac.type < e1000_pch2lan) + return E1000_SUCCESS; + + ret_val = e1000_init_phy_workarounds_pchlan(hw); + if (ret_val) { + DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val); + return ret_val; + } + + /* For i217 Intel Rapid Start Technology support when the system + * is transitioning from Sx and no manageability engine is present + * configure SMBus to restore on reset, disable proxy, and enable + * the reset on MTA (Multicast table array). + */ + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + DEBUGOUT("Failed to setup iRST\n"); + return ret_val; + } + + /* Clear Auto Enable LPI after link up */ + hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg); + phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI; + hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg); + + if (!(E1000_READ_REG(hw, E1000_FWSM) & + E1000_ICH_FWSM_FW_VALID)) { + /* Restore clear on SMB if no manageability engine + * is present + */ + ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, + &phy_reg); + if (ret_val) + goto release; + phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; + hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg); + + /* Disable Proxy */ + hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0); + } + /* Enable reset on MTA */ + ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG, + &phy_reg); + if (ret_val) + goto release; + phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; + hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg); +release: + if (ret_val) + DEBUGOUT1("Error %d in resume workarounds\n", ret_val); + hw->phy.ops.release(hw); + return ret_val; + } + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led_ich8lan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_cleanup_led_ich8lan"); + + if (hw->phy.type == e1000_phy_ife) + return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, + 0); + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); + return E1000_SUCCESS; +} + +/** + * e1000_led_on_ich8lan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_led_on_ich8lan"); + + if (hw->phy.type == e1000_phy_ife) + return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); + return E1000_SUCCESS; +} + +/** + * e1000_led_off_ich8lan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_led_off_ich8lan"); + + if (hw->phy.type == e1000_phy_ife) + return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + return E1000_SUCCESS; +} + +/** + * e1000_setup_led_pchlan - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use. + **/ +STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_setup_led_pchlan"); + + return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, + (u16)hw->mac.ledctl_mode1); +} + +/** + * e1000_cleanup_led_pchlan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_cleanup_led_pchlan"); + + return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, + (u16)hw->mac.ledctl_default); +} + +/** + * e1000_led_on_pchlan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode2; + u32 i, led; + + DEBUGFUNC("e1000_led_on_pchlan"); + + /* If no link, then turn LED on by setting the invert bit + * for each LED that's mode is "link_up" in ledctl_mode2. + */ + if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_led_off_pchlan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode1; + u32 i, led; + + DEBUGFUNC("e1000_led_off_pchlan"); + + /* If no link, then turn LED off by clearing the invert bit + * for each LED that's mode is "link_up" in ledctl_mode1. + */ + if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset + * @hw: pointer to the HW structure + * + * Read appropriate register for the config done bit for completion status + * and configure the PHY through s/w for EEPROM-less parts. + * + * NOTE: some silicon which is EEPROM-less will fail trying to read the + * config done bit, so only an error is logged and continues. If we were + * to return with error, EEPROM-less silicon would not be able to be reset + * or change link. + **/ +STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 bank = 0; + u32 status; + + DEBUGFUNC("e1000_get_cfg_done_ich8lan"); + + e1000_get_cfg_done_generic(hw); + + /* Wait for indication from h/w that it has completed basic config */ + if (hw->mac.type >= e1000_ich10lan) { + e1000_lan_init_done_ich8lan(hw); + } else { + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + ret_val = E1000_SUCCESS; + } + } + + /* Clear PHY Reset Asserted bit */ + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_PHYRA) + E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA); + else + DEBUGOUT("PHY Reset Asserted not set - needs delay\n"); + + /* If EEPROM is not marked present, init the IGP 3 PHY manually */ + if (hw->mac.type <= e1000_ich9lan) { + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && + (hw->phy.type == e1000_phy_igp_3)) { + e1000_phy_init_script_igp3(hw); + } + } else { + if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { + /* Maybe we should do a basic PHY config */ + DEBUGOUT("EEPROM not present\n"); + ret_val = -E1000_ERR_CONFIG; + } + } + + return ret_val; +} + +/** + * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters + * @hw: pointer to the HW structure + * + * Clears hardware counters specific to the silicon family and calls + * clear_hw_cntrs_generic to clear all general purpose counters. + **/ +STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) +{ + u16 phy_data; + s32 ret_val; + + DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); + + E1000_READ_REG(hw, E1000_IAC); + E1000_READ_REG(hw, E1000_ICRXOC); + + /* Clear PHY statistics registers */ + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || + (hw->phy.type == e1000_phy_82577)) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = hw->phy.ops.set_page(hw, + HV_STATS_PAGE << IGP_PAGE_SHIFT); + if (ret_val) + goto release; + hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); +release: + hw->phy.ops.release(hw); + } +} + +/** + * e1000_configure_k0s_lpt - Configure K0s power state + * @hw: pointer to the HW structure + * @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3. + * 0 corresponds to 128ns, each value over 0 doubles the duration. + * @min_time: Minimum Tx idle period allowed - valid values are 0 to 4. + * 0 corresponds to 128ns, each value over 0 doubles the duration. + * + * Configure the K1 power state based on the provided parameter. + * Assumes semaphore already acquired. + * + * Success returns 0, Failure returns: + * -E1000_ERR_PHY (-2) in case of access error + * -E1000_ERR_PARAM (-4) in case of parameters error + **/ +s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time) +{ + s32 ret_val; + u16 kmrn_reg = 0; + + DEBUGFUNC("e1000_configure_k0s_lpt"); + + if (entry_latency > 3 || min_time > 4) + return -E1000_ERR_PARAM; + + ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL, + &kmrn_reg); + if (ret_val) + return ret_val; + + /* for now don't touch the latency */ + kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK); + kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT)); + + ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL, + kmrn_reg); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} diff --git a/drivers/net/e1000/base/e1000_ich8lan.h b/drivers/net/e1000/base/e1000_ich8lan.h new file mode 100644 index 00000000..33e77fb8 --- /dev/null +++ b/drivers/net/e1000/base/e1000_ich8lan.h @@ -0,0 +1,320 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_ICH8LAN_H_ +#define _E1000_ICH8LAN_H_ + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 + +/* Requires up to 10 seconds when MNG might be accessing part. */ +#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 + +#define ICH_CYCLE_READ 0 +#define ICH_CYCLE_WRITE 2 +#define ICH_CYCLE_ERASE 3 + +#define FLASH_GFPREG_BASE_MASK 0x1FFF +#define FLASH_SECTOR_ADDR_SHIFT 12 + +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_8K 8192 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ +/* FW established a valid mode */ +#define E1000_ICH_FWSM_FW_VALID 0x00008000 +#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ +#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 + +#define E1000_ICH_MNG_IAMT_MODE 0x2 + +#define E1000_FWSM_WLOCK_MAC_MASK 0x0380 +#define E1000_FWSM_WLOCK_MAC_SHIFT 7 +#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT) +#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */ +#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */ + +/* Shared Receive Address Registers */ +#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) +#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8)) + +#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT) +#define E1000_H2ME 0x05B50 /* Host to ME */ +#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */ +#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT) +#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */ +#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */ + +#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */ +#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_OFF1_ON2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC000 +#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 +#define E1000_ICH_NVM_SIG_VALUE 0x80 + +#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 + +#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT) +/* FEXT register bit definition */ +#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004 + +#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */ +#define E1000_FEXTNVM_SW_CONFIG 1 +#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */ + +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000 +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000 + +#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 + +#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 +#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 +#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000 +/* bit for disabling packet buffer read */ +#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 +#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004 +#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT) +#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 +#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */ +#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL + +#define E1000_ICH_RAR_ENTRIES 7 +#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ +#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) +#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ +#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ + +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 +#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 +#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 + +/* PHY Wakeup Registers and defines */ +#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) +#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) +#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) +#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) +#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) +#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) +#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) +#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) +#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) +#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) + +#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ +#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ +#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ +#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ +#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ +#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ +#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ + +#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ +#define HV_MUX_DATA_CTRL PHY_REG(776, 16) +#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 +#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 +#define HV_STATS_PAGE 778 +/* Half-duplex collision counts */ +#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */ +#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) +#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */ +#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) +#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */ +#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) +#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */ +#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) +#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */ +#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) +#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ +#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) +#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */ +#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) + +#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ + +#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ +#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ +#define K1_ENTRY_LATENCY 0 +#define K1_MIN_TIME 1 + +/* SMBus Control Phy Register */ +#define CV_SMB_CTRL PHY_REG(769, 23) +#define CV_SMB_CTRL_FORCE_SMBUS 0x0001 + +#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT) +/* I218 Ultra Low Power Configuration 1 Register */ +#define I218_ULP_CONFIG1 PHY_REG(779, 16) +#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */ +#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */ +#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */ +#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */ +#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */ +#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */ +#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */ + +#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */ +/* SMBus Address Phy Register */ +#define HV_SMB_ADDR PHY_REG(768, 26) +#define HV_SMB_ADDR_MASK 0x007F +#define HV_SMB_ADDR_PEC_EN 0x0200 +#define HV_SMB_ADDR_VALID 0x0080 +#define HV_SMB_ADDR_FREQ_MASK 0x1100 +#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8 +#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12 + +/* Strapping Option Register - RO */ +#define E1000_STRAP 0x0000C +#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 +#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 +#define E1000_STRAP_SMT_FREQ_MASK 0x00003000 +#define E1000_STRAP_SMT_FREQ_SHIFT 12 + +/* OEM Bits Phy Register */ +#define HV_OEM_BITS PHY_REG(768, 25) +#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ +#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ +#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ + +/* KMRN Mode Control */ +#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) +#define HV_KMRN_MDIO_SLOW 0x0400 + +/* KMRN FIFO Control and Status */ +#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 + +/* PHY Power Management Control */ +#define HV_PM_CTRL PHY_REG(770, 17) +#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 +#define HV_PM_CTRL_K1_ENABLE 0x4000 + +#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ + +/* Inband Control */ +#define I217_INBAND_CTRL PHY_REG(770, 18) +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00 +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8 + +/* Low Power Idle GPIO Control */ +#define I217_LPI_GPIO_CTRL PHY_REG(772, 18) +#define I217_LPI_GPIO_CTRL_AUTO_EN_LPI 0x0800 + +/* PHY Low Power Idle Control */ +#define I82579_LPI_CTRL PHY_REG(772, 20) +#define I82579_LPI_CTRL_100_ENABLE 0x2000 +#define I82579_LPI_CTRL_1000_ENABLE 0x4000 +#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 + +/* 82579 DFT Control */ +#define I82579_DFT_CTRL PHY_REG(769, 20) +#define I82579_DFT_CTRL_GATE_PHY_RESET 0x0040 /* Gate PHY Reset on MAC Reset */ + +/* Extended Management Interface (EMI) Registers */ +#define I82579_EMI_ADDR 0x10 +#define I82579_EMI_DATA 0x11 +#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ +#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */ +#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ +#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ +#define I82579_RX_CONFIG 0x3412 /* Receive configuration */ +#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */ +#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ +#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ +#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ +#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ +#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ +#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ +#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */ +#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ +#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ +#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ +#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ +#define I217_RX_CONFIG 0xB20C /* Receive configuration */ + +#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ +#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */ + +/* Intel Rapid Start Technology Support */ +#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70) +#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 +#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) +#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000 +#define I217_CGFREG PHY_REG(772, 29) +#define I217_CGFREG_ENABLE_MTA_RESET 0x0002 +#define I217_MEMPWR PHY_REG(772, 26) +#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 + +/* Receive Address Initial CRC Calculation */ +#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) + +#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) +#define E1000_PCI_REVISION_ID_REG 0x08 +#endif /* defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) */ +void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state); +void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); +void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); +void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw); +u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw); +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); +s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time); +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); +s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data); +s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data); +s32 e1000_set_eee_pchlan(struct e1000_hw *hw); +#ifdef ULP_SUPPORT +s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx); +s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force); +#endif /* ULP_SUPPORT */ +#endif /* _E1000_ICH8LAN_H_ */ +void e1000_demote_ltr(struct e1000_hw *hw, bool demote, bool link); diff --git a/drivers/net/e1000/base/e1000_mac.c b/drivers/net/e1000/base/e1000_mac.c new file mode 100644 index 00000000..a0f3a999 --- /dev/null +++ b/drivers/net/e1000/base/e1000_mac.c @@ -0,0 +1,2249 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "e1000_api.h" + +STATIC s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw); +STATIC void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); +STATIC void e1000_config_collision_dist_generic(struct e1000_hw *hw); +STATIC int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); + +/** + * e1000_init_mac_ops_generic - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_mac_ops_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + DEBUGFUNC("e1000_init_mac_ops_generic"); + + /* General Setup */ + mac->ops.init_params = e1000_null_ops_generic; + mac->ops.init_hw = e1000_null_ops_generic; + mac->ops.reset_hw = e1000_null_ops_generic; + mac->ops.setup_physical_interface = e1000_null_ops_generic; + mac->ops.get_bus_info = e1000_null_ops_generic; + mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie; + mac->ops.read_mac_addr = e1000_read_mac_addr_generic; + mac->ops.config_collision_dist = e1000_config_collision_dist_generic; + mac->ops.clear_hw_cntrs = e1000_null_mac_generic; + /* LED */ + mac->ops.cleanup_led = e1000_null_ops_generic; + mac->ops.setup_led = e1000_null_ops_generic; + mac->ops.blink_led = e1000_null_ops_generic; + mac->ops.led_on = e1000_null_ops_generic; + mac->ops.led_off = e1000_null_ops_generic; + /* LINK */ + mac->ops.setup_link = e1000_null_ops_generic; + mac->ops.get_link_up_info = e1000_null_link_info; + mac->ops.check_for_link = e1000_null_ops_generic; + /* Management */ + mac->ops.check_mng_mode = e1000_null_mng_mode; + /* VLAN, MC, etc. */ + mac->ops.update_mc_addr_list = e1000_null_update_mc; + mac->ops.clear_vfta = e1000_null_mac_generic; + mac->ops.write_vfta = e1000_null_write_vfta; + mac->ops.rar_set = e1000_rar_set_generic; + mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic; +} + +/** + * e1000_null_ops_generic - No-op function, returns 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_ops_generic"); + UNREFERENCED_1PARAMETER(hw); + return E1000_SUCCESS; +} + +/** + * e1000_null_mac_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_mac_generic"); + UNREFERENCED_1PARAMETER(hw); + return; +} + +/** + * e1000_null_link_info - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d) +{ + DEBUGFUNC("e1000_null_link_info"); + UNREFERENCED_3PARAMETER(hw, s, d); + return E1000_SUCCESS; +} + +/** + * e1000_null_mng_mode - No-op function, return false + * @hw: pointer to the HW structure + **/ +bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_mng_mode"); + UNREFERENCED_1PARAMETER(hw); + return false; +} + +/** + * e1000_null_update_mc - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) +{ + DEBUGFUNC("e1000_null_update_mc"); + UNREFERENCED_3PARAMETER(hw, h, a); + return; +} + +/** + * e1000_null_write_vfta - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b) +{ + DEBUGFUNC("e1000_null_write_vfta"); + UNREFERENCED_3PARAMETER(hw, a, b); + return; +} + +/** + * e1000_null_rar_set - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) +{ + DEBUGFUNC("e1000_null_rar_set"); + UNREFERENCED_3PARAMETER(hw, h, a); + return E1000_SUCCESS; +} + +/** + * e1000_get_bus_info_pci_generic - Get PCI(x) bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCI/PCIx), and PCI(-x) function. + **/ +s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + u32 status = E1000_READ_REG(hw, E1000_STATUS); + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_get_bus_info_pci_generic"); + + /* PCI or PCI-X? */ + bus->type = (status & E1000_STATUS_PCIX_MODE) + ? e1000_bus_type_pcix + : e1000_bus_type_pci; + + /* Bus speed */ + if (bus->type == e1000_bus_type_pci) { + bus->speed = (status & E1000_STATUS_PCI66) + ? e1000_bus_speed_66 + : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + bus->speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + bus->speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + bus->speed = e1000_bus_speed_133; + break; + default: + bus->speed = e1000_bus_speed_reserved; + break; + } + } + + /* Bus width */ + bus->width = (status & E1000_STATUS_BUS64) + ? e1000_bus_width_64 + : e1000_bus_width_32; + + /* Which PCI(-X) function? */ + mac->ops.set_lan_id(hw); + + return ret_val; +} + +/** + * e1000_get_bus_info_pcie_generic - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u16 pcie_link_status; + + DEBUGFUNC("e1000_get_bus_info_pcie_generic"); + + bus->type = e1000_bus_type_pci_express; + + ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS, + &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { + switch (pcie_link_status & PCIE_LINK_SPEED_MASK) { + case PCIE_LINK_SPEED_2500: + bus->speed = e1000_bus_speed_2500; + break; + case PCIE_LINK_SPEED_5000: + bus->speed = e1000_bus_speed_5000; + break; + default: + bus->speed = e1000_bus_speed_unknown; + break; + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT); + } + + mac->ops.set_lan_id(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +STATIC void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u32 reg; + + /* The status register reports the correct function number + * for the device regardless of function swap state. + */ + reg = E1000_READ_REG(hw, E1000_STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; +} + +/** + * e1000_set_lan_id_multi_port_pci - Set LAN id for PCI multiple port devices + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading PCI config space. + **/ +void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u16 pci_header_type; + u32 status; + + e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type); + if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) { + status = E1000_READ_REG(hw, E1000_STATUS); + bus->func = (status & E1000_STATUS_FUNC_MASK) + >> E1000_STATUS_FUNC_SHIFT; + } else { + bus->func = 0; + } +} + +/** + * e1000_set_lan_id_single_port - Set LAN id for a single port device + * @hw: pointer to the HW structure + * + * Sets the LAN function id to zero for a single port device. + **/ +void e1000_set_lan_id_single_port(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + + bus->func = 0; +} + +/** + * e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_generic(struct e1000_hw *hw) +{ + u32 offset; + + DEBUGFUNC("e1000_clear_vfta_generic"); + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) +{ + DEBUGFUNC("e1000_write_vfta_generic"); + + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_init_rx_addrs_generic - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ADDR_LEN] = {0}; + + DEBUGFUNC("e1000_init_rx_addrs_generic"); + + /* Setup the receive address */ + DEBUGOUT("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is programmed into RAR0, replacing + * the permanent address that was installed into RAR0 by the Si on reset. + * This function will return SUCCESS unless it encounters an error while + * reading the EEPROM. + **/ +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ADDR_LEN]; + + DEBUGFUNC("e1000_check_alt_mac_addr_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + return ret_val; + + /* not supported on older hardware or 82573 */ + if ((hw->mac.type < e1000_82571) || (hw->mac.type == e1000_82573)) + return E1000_SUCCESS; + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ + if (hw->mac.type >= e1000_82580) + return E1000_SUCCESS; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + return E1000_SUCCESS; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + if (hw->bus.func == E1000_FUNC_2) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; + + if (hw->bus.func == E1000_FUNC_3) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; + for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (alt_mac_addr[0] & 0x01) { + DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n"); + return E1000_SUCCESS; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + + return E1000_SUCCESS; +} + +/** + * e1000_rar_set_generic - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +STATIC int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + DEBUGFUNC("e1000_rar_set_generic"); + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); + E1000_WRITE_FLUSH(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr_generic - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. + **/ +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + DEBUGFUNC("e1000_hash_mc_addr_generic"); + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + DEBUGFUNC("e1000_update_mc_addr_list_generic"); + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ADDR_LEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value + * @hw: pointer to the HW structure + * + * In certain situations, a system BIOS may report that the PCIx maximum + * memory read byte count (MMRBC) value is higher than than the actual + * value. We check the PCIx command register with the current PCIx status + * register. + **/ +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw) +{ + u16 cmd_mmrbc; + u16 pcix_cmd; + u16 pcix_stat_hi_word; + u16 stat_mmrbc; + + DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic"); + + /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */ + if (hw->bus.type != e1000_bus_type_pcix) + return; + + e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word); + cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >> + PCIX_COMMAND_MMRBC_SHIFT; + stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> + PCIX_STATUS_HI_MMRBC_SHIFT; + if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) + stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; + if (cmd_mmrbc > stat_mmrbc) { + pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK; + pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; + e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + } +} + +/** + * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_base_generic"); + + E1000_READ_REG(hw, E1000_CRCERRS); + E1000_READ_REG(hw, E1000_SYMERRS); + E1000_READ_REG(hw, E1000_MPC); + E1000_READ_REG(hw, E1000_SCC); + E1000_READ_REG(hw, E1000_ECOL); + E1000_READ_REG(hw, E1000_MCC); + E1000_READ_REG(hw, E1000_LATECOL); + E1000_READ_REG(hw, E1000_COLC); + E1000_READ_REG(hw, E1000_DC); + E1000_READ_REG(hw, E1000_SEC); + E1000_READ_REG(hw, E1000_RLEC); + E1000_READ_REG(hw, E1000_XONRXC); + E1000_READ_REG(hw, E1000_XONTXC); + E1000_READ_REG(hw, E1000_XOFFRXC); + E1000_READ_REG(hw, E1000_XOFFTXC); + E1000_READ_REG(hw, E1000_FCRUC); + E1000_READ_REG(hw, E1000_GPRC); + E1000_READ_REG(hw, E1000_BPRC); + E1000_READ_REG(hw, E1000_MPRC); + E1000_READ_REG(hw, E1000_GPTC); + E1000_READ_REG(hw, E1000_GORCL); + E1000_READ_REG(hw, E1000_GORCH); + E1000_READ_REG(hw, E1000_GOTCL); + E1000_READ_REG(hw, E1000_GOTCH); + E1000_READ_REG(hw, E1000_RNBC); + E1000_READ_REG(hw, E1000_RUC); + E1000_READ_REG(hw, E1000_RFC); + E1000_READ_REG(hw, E1000_ROC); + E1000_READ_REG(hw, E1000_RJC); + E1000_READ_REG(hw, E1000_TORL); + E1000_READ_REG(hw, E1000_TORH); + E1000_READ_REG(hw, E1000_TOTL); + E1000_READ_REG(hw, E1000_TOTH); + E1000_READ_REG(hw, E1000_TPR); + E1000_READ_REG(hw, E1000_TPT); + E1000_READ_REG(hw, E1000_MPTC); + E1000_READ_REG(hw, E1000_BPTC); +} + +/** + * e1000_check_for_copper_link_generic - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_check_for_copper_link"); + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return E1000_SUCCESS; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) + return E1000_SUCCESS; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift_generic(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) + return -E1000_ERR_CONFIG; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + + return ret_val; +} + +/** + * e1000_check_for_fiber_link_generic - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + DEBUGFUNC("e1000_check_for_fiber_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && + !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return E1000_SUCCESS; + } + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + DEBUGFUNC("e1000_check_for_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return E1000_SUCCESS; + } + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + DEBUGOUT("SERDES: Link up - forced.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) { + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n"); + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - no sync.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - autoneg failed\n"); + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + u16 nvm_offset = 0; + + DEBUGFUNC("e1000_set_default_fc_generic"); + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->mac.type == e1000_i350) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); + ret_val = hw->nvm.ops.read(hw, + NVM_INIT_CONTROL2_REG + + nvm_offset, + 1, &nvm_data); + } else { + ret_val = hw->nvm.ops.read(hw, + NVM_INIT_CONTROL2_REG, + 1, &nvm_data); + } + + + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link_generic - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 e1000_setup_link_generic(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_setup_link_generic"); + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) + return E1000_SUCCESS; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + return ret_val; + } + + /* Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); + E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); + E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + + return e1000_set_fc_watermarks_generic(hw); +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + + DEBUGFUNC("e1000_commit_fc_settings_generic"); + + /* Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + E1000_WRITE_REG(hw, E1000_TXCW, txcw); + mac->txcw = txcw; + + return E1000_SUCCESS; +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + DEBUGFUNC("e1000_poll_fiber_serdes_link_generic"); + + /* If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + msec_delay(10); + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + DEBUGOUT("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = true; + /* AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + DEBUGOUT("Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = false; + } else { + mac->autoneg_failed = false; + DEBUGOUT("Valid Link Found\n"); + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_setup_fiber_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + hw->mac.ops.config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + return ret_val; + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + DEBUGOUT("Auto-negotiation enabled\n"); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + + /* For these adapters, the SW definable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + DEBUGOUT("No signal detected\n"); + } + + return ret_val; +} + +/** + * e1000_config_collision_dist_generic - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +STATIC void e1000_config_collision_dist_generic(struct e1000_hw *hw) +{ + u32 tctl; + + DEBUGFUNC("e1000_config_collision_dist_generic"); + + tctl = E1000_READ_REG(hw, E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + **/ +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + DEBUGFUNC("e1000_set_fc_watermarks_generic"); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl); + E1000_WRITE_REG(hw, E1000_FCRTH, fcrth); + + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc_generic - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_force_mac_fc_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up_generic - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + DEBUGFUNC("e1000_config_fc_after_link_up_generic"); + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = e1000_force_mac_fc_generic(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = e1000_force_mac_fc_generic(hw); + } + + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + DEBUGOUT("Copper PHY and Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) && + mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + DEBUGOUT("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); + pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg); + + ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic"); + + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + DEBUGOUT("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + DEBUGOUT("100 Mbs, "); + } else { + *speed = SPEED_10; + DEBUGOUT("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw, + u16 *speed, u16 *duplex) +{ + DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic"); + UNREFERENCED_1PARAMETER(hw); + + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return E1000_SUCCESS; +} + +/** + * e1000_get_hw_semaphore_generic - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_generic"); + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_put_hw_semaphore_generic - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("e1000_put_hw_semaphore_generic"); + + swsm = E1000_READ_REG(hw, E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + E1000_WRITE_REG(hw, E1000_SWSM, swsm); +} + +/** + * e1000_get_auto_rd_done_generic - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw) +{ + s32 i = 0; + + DEBUGFUNC("e1000_get_auto_rd_done_generic"); + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD) + break; + msec_delay(1); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + DEBUGOUT("Auto read by HW from NVM has not completed.\n"); + return -E1000_ERR_RESET; + } + + return E1000_SUCCESS; +} + +/** + * e1000_valid_led_default_generic - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return E1000_SUCCESS; +} + +/** + * e1000_id_led_init_generic - + * @hw: pointer to the HW structure + * + **/ +s32 e1000_id_led_init_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + DEBUGFUNC("e1000_id_led_init_generic"); + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +s32 e1000_setup_led_generic(struct e1000_hw *hw) +{ + u32 ledctl; + + DEBUGFUNC("e1000_setup_led_generic"); + + if (hw->mac.ops.setup_led != e1000_setup_led_generic) + return -E1000_ERR_CONFIG; + + if (hw->phy.media_type == e1000_media_type_fiber) { + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + } else if (hw->phy.media_type == e1000_media_type_copper) { + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 e1000_cleanup_led_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_cleanup_led_generic"); + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); + return E1000_SUCCESS; +} + +/** + * e1000_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * + * Blink the LEDs which are set to be on. + **/ +s32 e1000_blink_led_generic(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + DEBUGFUNC("e1000_blink_led_generic"); + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } + + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink); + + return E1000_SUCCESS; +} + +/** + * e1000_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_on_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 e1000_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_off_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + DEBUGFUNC("e1000_set_pcie_no_snoop_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) + return; + + if (no_snoop) { + gcr = E1000_READ_REG(hw, E1000_GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + E1000_WRITE_REG(hw, E1000_GCR, gcr); + } +} + +/** + * e1000_disable_pcie_master_generic - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns E1000_SUCCESS if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + + DEBUGFUNC("e1000_disable_pcie_master_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) + return E1000_SUCCESS; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + while (timeout) { + if (!(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE) || + E1000_REMOVED(hw->hw_addr)) + break; + usec_delay(100); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + + return E1000_SUCCESS; +} + +/** + * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000_reset_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_reset_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + return; + } + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = false; + E1000_WRITE_REG(hw, E1000_AIT, 0); +} + +/** + * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000_update_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_update_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + return; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = true; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + E1000_WRITE_REG(hw, E1000_AIT, + mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = false; + E1000_WRITE_REG(hw, E1000_AIT, 0); + } + } +} + +/** + * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotiation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +STATIC s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_validate_mdi_setting_generic"); + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + DEBUGOUT("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + return -E1000_ERR_CONFIG; + } + + return E1000_SUCCESS; +} + +/** + * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Validate the MDI/MDIx setting, allowing for auto-crossover during forced + * operation. + **/ +s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic"); + UNREFERENCED_1PARAMETER(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + + DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic"); + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + E1000_WRITE_REG(hw, reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + usec_delay(5); + regvalue = E1000_READ_REG(hw, reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + DEBUGOUT1("Reg %08x did not indicate ready\n", reg); + return -E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} diff --git a/drivers/net/e1000/base/e1000_mac.h b/drivers/net/e1000/base/e1000_mac.h new file mode 100644 index 00000000..96a260c3 --- /dev/null +++ b/drivers/net/e1000/base/e1000_mac.h @@ -0,0 +1,95 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_MAC_H_ +#define _E1000_MAC_H_ + +void e1000_init_mac_ops_generic(struct e1000_hw *hw); +#ifndef E1000_REMOVED +#define E1000_REMOVED(a) (0) +#endif /* E1000_REMOVED */ +void e1000_null_mac_generic(struct e1000_hw *hw); +s32 e1000_null_ops_generic(struct e1000_hw *hw); +s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d); +bool e1000_null_mng_mode(struct e1000_hw *hw); +void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a); +void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b); +int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a); +s32 e1000_blink_led_generic(struct e1000_hw *hw); +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_cleanup_led_generic(struct e1000_hw *hw); +s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw); +s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw); +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw); +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw); +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw); +s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw); +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw); +void e1000_set_lan_id_single_port(struct e1000_hw *hw); +void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw); +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +s32 e1000_id_led_init_generic(struct e1000_hw *hw); +s32 e1000_led_on_generic(struct e1000_hw *hw); +s32 e1000_led_off_generic(struct e1000_hw *hw); +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 e1000_set_default_fc_generic(struct e1000_hw *hw); +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw); +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_setup_led_generic(struct e1000_hw *hw); +s32 e1000_setup_link_generic(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw); +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr); + +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw); +void e1000_clear_vfta_generic(struct e1000_hw *hw); +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count); +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw); +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw); +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +void e1000_reset_adaptive_generic(struct e1000_hw *hw); +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop); +void e1000_update_adaptive_generic(struct e1000_hw *hw); +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); + +#endif diff --git a/drivers/net/e1000/base/e1000_manage.c b/drivers/net/e1000/base/e1000_manage.c new file mode 100644 index 00000000..8564a7f8 --- /dev/null +++ b/drivers/net/e1000/base/e1000_manage.c @@ -0,0 +1,576 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "e1000_api.h" + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + DEBUGFUNC("e1000_calculate_checksum"); + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * e1000_mng_enable_host_if_generic - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw) +{ + u32 hicr; + u8 i; + + DEBUGFUNC("e1000_mng_enable_host_if_generic"); + + if (!hw->mac.arc_subsystem_valid) { + DEBUGOUT("ARC subsystem not valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_EN)) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay_irq(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + DEBUGOUT("Previous command timeout failed .\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_mng_mode_generic - Generic check management mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm = E1000_READ_REG(hw, E1000_FWSM); + + DEBUGFUNC("e1000_check_mng_mode_generic"); + + + return (fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); +} + +/** + * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic"); + + hw->mac.tx_pkt_filtering = true; + + /* No manageability, no filtering */ + if (!hw->mac.ops.check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if_generic(hw); + if (ret_val != E1000_SUCCESS) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, + offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = true; + return hw->mac.tx_pkt_filtering; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) + hw->mac.tx_pkt_filtering = false; + + return hw->mac.tx_pkt_filtering; +} + +/** + * e1000_mng_write_cmd_header_generic - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + DEBUGFUNC("e1000_mng_write_cmd_header_generic"); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, + *((u32 *) hdr + i)); + E1000_WRITE_FLUSH(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_mng_host_if_write_generic - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + DEBUGFUNC("e1000_mng_host_if_write_generic"); + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, + data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, + data); + } + + return E1000_SUCCESS; +} + +/** + * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer, + u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + DEBUGFUNC("e1000_mng_write_dhcp_info_generic"); + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if_generic(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write_generic(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + return E1000_SUCCESS; +} + +/** + * e1000_enable_mng_pass_thru - Check if management passthrough is needed + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + + DEBUGFUNC("e1000_enable_mng_pass_thru"); + + if (!hw->mac.asf_firmware_present) + return false; + + manc = E1000_READ_REG(hw, E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + return false; + + if (hw->mac.has_fwsm) { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + factps = E1000_READ_REG(hw, E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) + return true; + } else if ((hw->mac.type == e1000_82574) || + (hw->mac.type == e1000_82583)) { + u16 data; + s32 ret_val; + + factps = E1000_READ_REG(hw, E1000_FACTPS); + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + if (ret_val) + return false; + + if (!(factps & E1000_FACTPS_MNGCG) && + ((data & E1000_NVM_INIT_CTRL2_MNGM) == + (e1000_mng_mode_pt << 13))) + return true; + } else if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + return true; + } + + return false; +} + +/** + * e1000_host_interface_command - Writes buffer to host interface + * @hw: pointer to the HW structure + * @buffer: contains a command to write + * @length: the byte length of the buffer, must be multiple of 4 bytes + * + * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS + * else returns E1000_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length) +{ + u32 hicr, i; + + DEBUGFUNC("e1000_host_interface_command"); + + if (!(hw->mac.arc_subsystem_valid)) { + DEBUGOUT("Hardware doesn't support host interface command.\n"); + return E1000_SUCCESS; + } + + if (!hw->mac.asf_firmware_present) { + DEBUGOUT("Firmware is not present.\n"); + return E1000_SUCCESS; + } + + if (length == 0 || length & 0x3 || + length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_EN)) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < length; i++) + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, + *((u32 *)buffer + i)); + + /* Setting this bit tells the ARC that a new command is pending. */ + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay(1); + } + + /* Check command successful completion. */ + if (i == E1000_HI_COMMAND_TIMEOUT || + (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) { + DEBUGOUT("Command has failed with no status valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + for (i = 0; i < length; i++) + *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, + E1000_HOST_IF, + i); + + return E1000_SUCCESS; +} +/** + * e1000_load_firmware - Writes proxy FW code buffer to host interface + * and execute. + * @hw: pointer to the HW structure + * @buffer: contains a firmware to write + * @length: the byte length of the buffer, must be multiple of 4 bytes + * + * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled + * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length) +{ + u32 hicr, hibba, fwsm, icr, i; + + DEBUGFUNC("e1000_load_firmware"); + + if (hw->mac.type < e1000_i210) { + DEBUGOUT("Hardware doesn't support loading FW by the driver\n"); + return -E1000_ERR_CONFIG; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_EN)) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_CONFIG; + } + if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) { + DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n"); + return -E1000_ERR_CONFIG; + } + + if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + /* Clear notification from ROM-FW by reading ICR register */ + icr = E1000_READ_REG(hw, E1000_ICR_V2); + + /* Reset ROM-FW */ + hicr = E1000_READ_REG(hw, E1000_HICR); + hicr |= E1000_HICR_FW_RESET_ENABLE; + E1000_WRITE_REG(hw, E1000_HICR, hicr); + hicr |= E1000_HICR_FW_RESET; + E1000_WRITE_REG(hw, E1000_HICR, hicr); + E1000_WRITE_FLUSH(hw); + + /* Wait till MAC notifies about its readiness after ROM-FW reset */ + for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) { + icr = E1000_READ_REG(hw, E1000_ICR_V2); + if (icr & E1000_ICR_MNG) + break; + msec_delay(1); + } + + /* Check for timeout */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("FW reset failed.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Wait till MAC is ready to accept new FW code */ + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + if ((fwsm & E1000_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT == + E1000_FWSM_HI_EN_ONLY_MODE)) + break; + msec_delay(1); + } + + /* Check for timeout */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("FW reset failed.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant FW code block + * into the ram area in DWORDs via 1kB ram addressing window. + */ + for (i = 0; i < length; i++) { + if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) { + /* Point to correct 1kB ram window */ + hibba = E1000_HI_FW_BASE_ADDRESS + + ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) * + (i / E1000_HI_FW_BLOCK_DWORD_LENGTH)); + + E1000_WRITE_REG(hw, E1000_HIBBA, hibba); + } + + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, + i % E1000_HI_FW_BLOCK_DWORD_LENGTH, + *((u32 *)buffer + i)); + } + + /* Setting this bit tells the ARC that a new FW is ready to execute. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay(1); + } + + /* Check for successful FW start. */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("New FW did not start within timeout period.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return E1000_SUCCESS; +} + + diff --git a/drivers/net/e1000/base/e1000_manage.h b/drivers/net/e1000/base/e1000_manage.h new file mode 100644 index 00000000..25be1156 --- /dev/null +++ b/drivers/net/e1000/base/e1000_manage.h @@ -0,0 +1,95 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_MANAGE_H_ +#define _E1000_MANAGE_H_ + +bool e1000_check_mng_mode_generic(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw); +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, + u8 *buffer, u16 length); +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw); +u8 e1000_calculate_checksum(u8 *buffer, u32 length); +s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length); +s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 +#define E1000_FWSM_HI_EN_ONLY_MODE 0x4 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */ +#define E1000_HI_FW_BASE_ADDRESS 0x10000 +#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */ +#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */ +#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */ +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define E1000_HICR_C 0x02 +#define E1000_HICR_SV 0x04 /* Status Validity */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +#endif diff --git a/drivers/net/e1000/base/e1000_mbx.c b/drivers/net/e1000/base/e1000_mbx.c new file mode 100644 index 00000000..6daf16b0 --- /dev/null +++ b/drivers/net/e1000/base/e1000_mbx.c @@ -0,0 +1,777 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "e1000_mbx.h" + +/** + * e1000_null_mbx_check_for_flag - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG mbx_id) +{ + DEBUGFUNC("e1000_null_mbx_check_flag"); + UNREFERENCED_2PARAMETER(hw, mbx_id); + + return E1000_SUCCESS; +} + +/** + * e1000_null_mbx_transact - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG *msg, + u16 E1000_UNUSEDARG size, + u16 E1000_UNUSEDARG mbx_id) +{ + DEBUGFUNC("e1000_null_mbx_rw_msg"); + UNREFERENCED_4PARAMETER(hw, msg, size, mbx_id); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_read_mbx"); + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * e1000_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_mbx"); + + if (size > mbx->size) + ret_val = -E1000_ERR_MBX; + + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_msg"); + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_ack"); + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_rst"); + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +STATIC s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("e1000_poll_for_msg"); + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +STATIC s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("e1000_poll_for_ack"); + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_read_posted_mbx"); + + if (!mbx->ops.read) + goto out; + + ret_val = e1000_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_write_posted_mbx"); + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = e1000_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +/** + * e1000_init_mbx_ops_generic - Initialize mbx function pointers + * @hw: pointer to the HW structure + * + * Sets the function pointers to no-op functions + **/ +void e1000_init_mbx_ops_generic(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + mbx->ops.init_params = e1000_null_ops_generic; + mbx->ops.read = e1000_null_mbx_transact; + mbx->ops.write = e1000_null_mbx_transact; + mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag; + mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag; + mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; +} + +/** + * e1000_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +STATIC u32 e1000_read_v2p_mailbox(struct e1000_hw *hw) +{ + u32 v2p_mailbox = E1000_READ_REG(hw, E1000_V2PMAILBOX(0)); + + v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox; + hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * e1000_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +STATIC s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask) +{ + u32 v2p_mailbox = e1000_read_v2p_mailbox(hw); + s32 ret_val = -E1000_ERR_MBX; + + if (v2p_mailbox & mask) + ret_val = E1000_SUCCESS; + + hw->dev_spec.vf.v2p_mailbox &= ~mask; + + return ret_val; +} + +/** + * e1000_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +STATIC s32 e1000_check_for_msg_vf(struct e1000_hw *hw, + u16 E1000_UNUSEDARG mbx_id) +{ + s32 ret_val = -E1000_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("e1000_check_for_msg_vf"); + + if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * e1000_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +STATIC s32 e1000_check_for_ack_vf(struct e1000_hw *hw, + u16 E1000_UNUSEDARG mbx_id) +{ + s32 ret_val = -E1000_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("e1000_check_for_ack_vf"); + + if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * e1000_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns true if the PF has set the reset done bit or else false + **/ +STATIC s32 e1000_check_for_rst_vf(struct e1000_hw *hw, + u16 E1000_UNUSEDARG mbx_id) +{ + s32 ret_val = -E1000_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("e1000_check_for_rst_vf"); + + if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | + E1000_V2PMAILBOX_RSTI))) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * e1000_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +STATIC s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_obtain_mbx_lock_vf"); + + /* Take ownership of the buffer */ + E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); + + /* reserve mailbox for vf use */ + if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) + ret_val = E1000_SUCCESS; + + return ret_val; +} + +/** + * e1000_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +STATIC s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 E1000_UNUSEDARG mbx_id) +{ + s32 ret_val; + u16 i; + + UNREFERENCED_1PARAMETER(mbx_id); + + DEBUGFUNC("e1000_write_mbx_vf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + e1000_check_for_msg_vf(hw, 0); + e1000_check_for_ack_vf(hw, 0); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(0), i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_REQ); + +out_no_write: + return ret_val; +} + +/** + * e1000_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +STATIC s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 E1000_UNUSEDARG mbx_id) +{ + s32 ret_val = E1000_SUCCESS; + u16 i; + + DEBUGFUNC("e1000_read_mbx_vf"); + UNREFERENCED_1PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(0), i); + + /* Acknowledge receipt and release mailbox, then we're done */ + E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * e1000_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications */ + mbx->timeout = 0; + mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = e1000_read_mbx_vf; + mbx->ops.write = e1000_write_mbx_vf; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; + mbx->ops.check_for_msg = e1000_check_for_msg_vf; + mbx->ops.check_for_ack = e1000_check_for_ack_vf; + mbx->ops.check_for_rst = e1000_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return E1000_SUCCESS; +} + +STATIC s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask) +{ + u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR); + s32 ret_val = -E1000_ERR_MBX; + + if (mbvficr & mask) { + ret_val = E1000_SUCCESS; + E1000_WRITE_REG(hw, E1000_MBVFICR, mask); + } + + return ret_val; +} + +/** + * e1000_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_msg_pf"); + + if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * e1000_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_ack_pf"); + + if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * e1000_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 vflre = E1000_READ_REG(hw, E1000_VFLRE); + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_rst_pf"); + + if (vflre & (1 << vf_number)) { + ret_val = E1000_SUCCESS; + E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * e1000_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +STATIC s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; + + DEBUGFUNC("e1000_obtain_mbx_lock_pf"); + + /* Take ownership of the buffer */ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + ret_val = E1000_SUCCESS; + + return ret_val; +} + +/** + * e1000_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +STATIC s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_write_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + e1000_check_for_msg_pf(hw, vf_number); + e1000_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * e1000_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +STATIC s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_read_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * e1000_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 e1000_init_mbx_params_pf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + case e1000_i354: + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = e1000_read_mbx_pf; + mbx->ops.write = e1000_write_mbx_pf; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; + mbx->ops.check_for_msg = e1000_check_for_msg_pf; + mbx->ops.check_for_ack = e1000_check_for_ack_pf; + mbx->ops.check_for_rst = e1000_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + default: + return E1000_SUCCESS; + } +} + diff --git a/drivers/net/e1000/base/e1000_mbx.h b/drivers/net/e1000/base/e1000_mbx.h new file mode 100644 index 00000000..563dcb9d --- /dev/null +++ b/drivers/net/e1000/base/e1000_mbx.h @@ -0,0 +1,105 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "e1000_api.h" + +/* Define mailbox register bits */ +#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Msgs below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Msgs below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_check_for_msg(struct e1000_hw *, u16); +s32 e1000_check_for_ack(struct e1000_hw *, u16); +s32 e1000_check_for_rst(struct e1000_hw *, u16); +void e1000_init_mbx_ops_generic(struct e1000_hw *hw); +s32 e1000_init_mbx_params_vf(struct e1000_hw *); +s32 e1000_init_mbx_params_pf(struct e1000_hw *); + +#endif /* _E1000_MBX_H_ */ diff --git a/drivers/net/e1000/base/e1000_nvm.c b/drivers/net/e1000/base/e1000_nvm.c new file mode 100644 index 00000000..762acd16 --- /dev/null +++ b/drivers/net/e1000/base/e1000_nvm.c @@ -0,0 +1,1384 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "e1000_api.h" + +STATIC void e1000_reload_nvm_generic(struct e1000_hw *hw); + +/** + * e1000_init_nvm_ops_generic - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_nvm_ops_generic(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + DEBUGFUNC("e1000_init_nvm_ops_generic"); + + /* Initialize function pointers */ + nvm->ops.init_params = e1000_null_ops_generic; + nvm->ops.acquire = e1000_null_ops_generic; + nvm->ops.read = e1000_null_read_nvm; + nvm->ops.release = e1000_null_nvm_generic; + nvm->ops.reload = e1000_reload_nvm_generic; + nvm->ops.update = e1000_null_ops_generic; + nvm->ops.valid_led_default = e1000_null_led_default; + nvm->ops.validate = e1000_null_ops_generic; + nvm->ops.write = e1000_null_write_nvm; +} + +/** + * e1000_null_nvm_read - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, + u16 E1000_UNUSEDARG *c) +{ + DEBUGFUNC("e1000_null_read_nvm"); + UNREFERENCED_4PARAMETER(hw, a, b, c); + return E1000_SUCCESS; +} + +/** + * e1000_null_nvm_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_nvm_generic"); + UNREFERENCED_1PARAMETER(hw); + return; +} + +/** + * e1000_null_led_default - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG *data) +{ + DEBUGFUNC("e1000_null_led_default"); + UNREFERENCED_2PARAMETER(hw, data); + return E1000_SUCCESS; +} + +/** + * e1000_null_write_nvm - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, + u16 E1000_UNUSEDARG *c) +{ + DEBUGFUNC("e1000_null_write_nvm"); + UNREFERENCED_4PARAMETER(hw, a, b, c); + return E1000_SUCCESS; +} + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +STATIC void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +STATIC void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +STATIC void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u32 mask; + + DEBUGFUNC("e1000_shift_out_eec_bits"); + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_microwire) + eecd &= ~E1000_EECD_DO; + else + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + + usec_delay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + E1000_WRITE_REG(hw, E1000_EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +STATIC u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + DEBUGFUNC("e1000_shift_in_eec_bits"); + + eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + + DEBUGFUNC("e1000_poll_eerd_eewr_done"); + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = E1000_READ_REG(hw, E1000_EERD); + else + reg = E1000_READ_REG(hw, E1000_EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) + return E1000_SUCCESS; + + usec_delay(5); + } + + return -E1000_ERR_NVM; +} + +/** + * e1000_acquire_nvm_generic - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw) +{ + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + + DEBUGFUNC("e1000_acquire_nvm_generic"); + + E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ); + eecd = E1000_READ_REG(hw, E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + usec_delay(5); + eecd = E1000_READ_REG(hw, E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + DEBUGOUT("Could not acquire NVM grant\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +STATIC void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + + DEBUGFUNC("e1000_standby_nvm"); + + if (nvm->type == e1000_nvm_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + + e1000_lower_eec_clk(hw, &eecd); + } else if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("e1000_stop_nvm"); + + eecd = E1000_READ_REG(hw, E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) { + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000_release_nvm_generic - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000_release_nvm_generic(struct e1000_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("e1000_release_nvm_generic"); + + e1000_stop_nvm(hw); + + eecd = E1000_READ_REG(hw, E1000_EECD); + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, E1000_EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +STATIC s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u8 spi_stat_reg; + + DEBUGFUNC("e1000_ready_nvm_eeprom"); + + if (nvm->type == e1000_nvm_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + /* Set CS */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + } else if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(1); + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + usec_delay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + DEBUGOUT("SPI NVM Status error\n"); + return -E1000_ERR_NVM; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + DEBUGFUNC("e1000_read_nvm_spi"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_nvm_microwire - Reads EEPROM's using microwire + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u8 read_opcode = NVM_READ_OPCODE_MICROWIRE; + + DEBUGFUNC("e1000_read_nvm_microwire"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset + i), + nvm->address_bits); + + /* Read the data. For microwire, each word requires the + * overhead of setup and tear-down. + */ + data[i] = e1000_shift_in_eec_bits(hw, 16); + e1000_standby_nvm(hw); + } + +release: + nvm->ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_nvm_eerd"); + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + E1000_WRITE_REG(hw, E1000_EERD, eerd); + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (E1000_READ_REG(hw, E1000_EERD) >> + E1000_NVM_RW_REG_DATA); + } + + if (ret_val) + DEBUGOUT1("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + + DEBUGFUNC("e1000_write_nvm_spi"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + msec_delay(10); + nvm->ops.release(hw); + } + + return ret_val; +} + +/** + * e1000_write_nvm_microwire - Writes EEPROM using microwire + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using microwire interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u32 eecd; + u16 words_written = 0; + u16 widx = 0; + + DEBUGFUNC("e1000_write_nvm_microwire"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_shift_out_eec_bits(hw, NVM_EWEN_OPCODE_MICROWIRE, + (u16)(nvm->opcode_bits + 2)); + + e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2)); + + e1000_standby_nvm(hw); + + while (words_written < words) { + e1000_shift_out_eec_bits(hw, NVM_WRITE_OPCODE_MICROWIRE, + nvm->opcode_bits); + + e1000_shift_out_eec_bits(hw, (u16)(offset + words_written), + nvm->address_bits); + + e1000_shift_out_eec_bits(hw, data[words_written], 16); + + e1000_standby_nvm(hw); + + for (widx = 0; widx < 200; widx++) { + eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & E1000_EECD_DO) + break; + usec_delay(50); + } + + if (widx == 200) { + DEBUGOUT("NVM Write did not complete\n"); + ret_val = -E1000_ERR_NVM; + goto release; + } + + e1000_standby_nvm(hw); + + words_written++; + } + + e1000_shift_out_eec_bits(hw, NVM_EWDS_OPCODE_MICROWIRE, + (u16)(nvm->opcode_bits + 2)); + + e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2)); + +release: + nvm->ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 offset; + u16 length; + + DEBUGFUNC("e1000_read_pba_string_generic"); + + if ((hw->mac.type >= e1000_i210) && + !e1000_get_flash_presence_i210(hw)) { + DEBUGOUT("Flashless no PBA string\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + + if (pba_num == NULL) { + DEBUGOUT("PBA string buffer was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + DEBUGOUT("NVM PBA number is not stored as string\n"); + + /* make sure callers buffer is big enough to store the PBA */ + if (pba_num_size < E1000_PBANUM_LENGTH) { + DEBUGOUT("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return E1000_SUCCESS; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + DEBUGOUT("PBA string buffer too small\n"); + return -E1000_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return E1000_SUCCESS; +} + +/** + * e1000_read_pba_length_generic - Read device part number length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num_size. + **/ +s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 length; + + DEBUGFUNC("e1000_read_pba_length_generic"); + + if (pba_num_size == NULL) { + DEBUGOUT("PBA buffer size was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* if data is not ptr guard the PBA must be in legacy format */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + *pba_num_size = E1000_PBANUM_LENGTH; + return E1000_SUCCESS; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + + /* Convert from length in u16 values to u8 chars, add 1 for NULL, + * and subtract 2 because length field is included in length. + */ + *pba_num_size = ((u32)length * 2) - 1; + + return E1000_SUCCESS; +} + +/** + * e1000_read_pba_num_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num) +{ + s32 ret_val; + u16 nvm_data; + + DEBUGFUNC("e1000_read_pba_num_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } else if (nvm_data == NVM_PBA_PTR_GUARD) { + DEBUGOUT("NVM Not Supported\n"); + return -E1000_NOT_IMPLEMENTED; + } + *pba_num = (u32)(nvm_data << 16); + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + *pba_num |= nvm_data; + + return E1000_SUCCESS; +} + + +/** + * e1000_read_pba_raw + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @max_pba_block_size: PBA block size limit + * @pba: pointer to output PBA structure + * + * Reads PBA from EEPROM image when eeprom_buf is not NULL. + * Reads PBA from physical EEPROM device when eeprom_buf is NULL. + * + **/ +s32 e1000_read_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 max_pba_block_size, + struct e1000_pba *pba) +{ + s32 ret_val; + u16 pba_block_size; + + if (pba == NULL) + return -E1000_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 2, + &pba->word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > NVM_PBA_OFFSET_1) { + pba->word[0] = eeprom_buf[NVM_PBA_OFFSET_0]; + pba->word[1] = eeprom_buf[NVM_PBA_OFFSET_1]; + } else { + return -E1000_ERR_PARAM; + } + } + + if (pba->word[0] == NVM_PBA_PTR_GUARD) { + if (pba->pba_block == NULL) + return -E1000_ERR_PARAM; + + ret_val = e1000_get_pba_block_size(hw, eeprom_buf, + eeprom_buf_size, + &pba_block_size); + if (ret_val) + return ret_val; + + if (pba_block_size > max_pba_block_size) + return -E1000_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = e1000_read_nvm(hw, pba->word[1], + pba_block_size, + pba->pba_block); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > (u32)(pba->word[1] + + pba_block_size)) { + memcpy(pba->pba_block, + &eeprom_buf[pba->word[1]], + pba_block_size * sizeof(u16)); + } else { + return -E1000_ERR_PARAM; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_pba_raw + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @pba: pointer to PBA structure + * + * Writes PBA to EEPROM image when eeprom_buf is not NULL. + * Writes PBA to physical EEPROM device when eeprom_buf is NULL. + * + **/ +s32 e1000_write_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, struct e1000_pba *pba) +{ + s32 ret_val; + + if (pba == NULL) + return -E1000_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = e1000_write_nvm(hw, NVM_PBA_OFFSET_0, 2, + &pba->word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > NVM_PBA_OFFSET_1) { + eeprom_buf[NVM_PBA_OFFSET_0] = pba->word[0]; + eeprom_buf[NVM_PBA_OFFSET_1] = pba->word[1]; + } else { + return -E1000_ERR_PARAM; + } + } + + if (pba->word[0] == NVM_PBA_PTR_GUARD) { + if (pba->pba_block == NULL) + return -E1000_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = e1000_write_nvm(hw, pba->word[1], + pba->pba_block[0], + pba->pba_block); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > (u32)(pba->word[1] + + pba->pba_block[0])) { + memcpy(&eeprom_buf[pba->word[1]], + pba->pba_block, + pba->pba_block[0] * sizeof(u16)); + } else { + return -E1000_ERR_PARAM; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_pba_block_size + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @pba_data_size: pointer to output variable + * + * Returns the size of the PBA block in words. Function operates on EEPROM + * image if the eeprom_buf pointer is not NULL otherwise it accesses physical + * EEPROM device. + * + **/ +s32 e1000_get_pba_block_size(struct e1000_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 *pba_block_size) +{ + s32 ret_val; + u16 pba_word[2]; + u16 length; + + DEBUGFUNC("e1000_get_pba_block_size"); + + if (eeprom_buf == NULL) { + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 2, &pba_word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > NVM_PBA_OFFSET_1) { + pba_word[0] = eeprom_buf[NVM_PBA_OFFSET_0]; + pba_word[1] = eeprom_buf[NVM_PBA_OFFSET_1]; + } else { + return -E1000_ERR_PARAM; + } + } + + if (pba_word[0] == NVM_PBA_PTR_GUARD) { + if (eeprom_buf == NULL) { + ret_val = e1000_read_nvm(hw, pba_word[1] + 0, 1, + &length); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > pba_word[1]) + length = eeprom_buf[pba_word[1] + 0]; + else + return -E1000_ERR_PARAM; + } + + if (length == 0xFFFF || length == 0) + return -E1000_ERR_NVM_PBA_SECTION; + } else { + /* PBA number in legacy format, there is no PBA Block. */ + length = 0; + } + + if (pba_block_size != NULL) + *pba_block_size = length; + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = E1000_READ_REG(hw, E1000_RAH(0)); + rar_low = E1000_READ_REG(hw, E1000_RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return E1000_SUCCESS; +} + +/** + * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_validate_nvm_checksum_generic"); + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + DEBUGOUT("NVM Checksum Invalid\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum"); + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum.\n"); + return ret_val; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + DEBUGOUT("NVM Write Error while updating checksum.\n"); + + return ret_val; +} + +/** + * e1000_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +STATIC void e1000_reload_nvm_generic(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + DEBUGFUNC("e1000_reload_nvm_generic"); + + usec_delay(10); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_get_fw_version - Get firmware version information + * @hw: pointer to the HW structure + * @fw_vers: pointer to output version structure + * + * unsupported/not present features return 0 in version structure + **/ +void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) +{ + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; + u16 comb_verh, comb_verl, comb_offset; + + memset(fw_vers, 0, sizeof(struct e1000_fw_version)); + + /* basic eeprom version numbers, bits used vary by part and by tool + * used to create the nvm images */ + /* Check which data format we have */ + switch (hw->mac.type) { + case e1000_i211: + e1000_read_invm_version(hw, fw_vers); + return; + case e1000_82575: + case e1000_82576: + case e1000_82580: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + /* Use this format, unless EETRACK ID exists, + * then use alternate format + */ + if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK); + goto etrack_id; + } + break; + case e1000_i210: + if (!(e1000_get_flash_presence_i210(hw))) { + e1000_read_invm_version(hw, fw_vers); + return; + } + /* fall through */ + case e1000_i350: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if ((comb_offset != 0x0) && + (comb_offset != NVM_VER_INVALID)) { + + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset + + 1), 1, &comb_verh); + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), + 1, &comb_verl); + + /* get Option Rom version if it exists and is valid */ + if ((comb_verh && comb_verl) && + ((comb_verh != NVM_VER_INVALID) && + (comb_verl != NVM_VER_INVALID))) { + + fw_vers->or_valid = true; + fw_vers->or_major = + comb_verl >> NVM_COMB_VER_SHFT; + fw_vers->or_build = + (comb_verl << NVM_COMB_VER_SHFT) + | (comb_verh >> NVM_COMB_VER_SHFT); + fw_vers->or_patch = + comb_verh & NVM_COMB_VER_MASK; + } + } + break; + default: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + + /* check for old style version format in newer images*/ + if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { + eeprom_verl = (fw_version & NVM_COMB_VER_MASK); + } else { + eeprom_verl = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + } + /* Convert minor value to hex before assigning to output struct + * Val to be converted will not be higher than 99, per tool output + */ + q = eeprom_verl / NVM_HEX_CONV; + hval = q * NVM_HEX_TENS; + rem = eeprom_verl % NVM_HEX_CONV; + result = hval + rem; + fw_vers->eep_minor = result; + +etrack_id: + if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; + } else if ((etrack_test & NVM_ETRACK_VALID) == 0) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | + eeprom_verl; + } +} + + diff --git a/drivers/net/e1000/base/e1000_nvm.h b/drivers/net/e1000/base/e1000_nvm.h new file mode 100644 index 00000000..c400dc3a --- /dev/null +++ b/drivers/net/e1000/base/e1000_nvm.h @@ -0,0 +1,98 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_NVM_H_ +#define _E1000_NVM_H_ + +struct e1000_pba { + u16 word[2]; + u16 *pba_block; +}; + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; + + +void e1000_init_nvm_ops_generic(struct e1000_hw *hw); +s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); +void e1000_null_nvm_generic(struct e1000_hw *hw); +s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data); +s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw); + +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); +s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num); +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size); +s32 e1000_read_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 max_pba_block_size, + struct e1000_pba *pba); +s32 e1000_write_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, struct e1000_pba *pba); +s32 e1000_get_pba_block_size(struct e1000_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 *pba_block_size); +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data); +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw); +s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw); +void e1000_stop_nvm(struct e1000_hw *hw); +void e1000_release_nvm_generic(struct e1000_hw *hw); +void e1000_get_fw_version(struct e1000_hw *hw, + struct e1000_fw_version *fw_vers); + +#define E1000_STM_OPCODE 0xDB00 + +#endif diff --git a/drivers/net/e1000/base/e1000_osdep.c b/drivers/net/e1000/base/e1000_osdep.c new file mode 100644 index 00000000..7270edfa --- /dev/null +++ b/drivers/net/e1000/base/e1000_osdep.c @@ -0,0 +1,83 @@ +/****************************************************************************** + + Copyright (c) 2001-2014, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "e1000_api.h" + +/* + * NOTE: the following routines using the e1000 + * naming style are provided to the shared + * code but are OS specific + */ + +void +e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + return; +} + +void +e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + *value = 0; + return; +} + +void +e1000_pci_set_mwi(struct e1000_hw *hw) +{ +} + +void +e1000_pci_clear_mwi(struct e1000_hw *hw) +{ +} + + +/* + * Read the PCI Express capabilities + */ +int32_t +e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + return E1000_NOT_IMPLEMENTED; +} + +/* + * Write the PCI Express capabilities + */ +int32_t +e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + return E1000_NOT_IMPLEMENTED; +} diff --git a/drivers/net/e1000/base/e1000_osdep.h b/drivers/net/e1000/base/e1000_osdep.h new file mode 100644 index 00000000..47a19481 --- /dev/null +++ b/drivers/net/e1000/base/e1000_osdep.h @@ -0,0 +1,196 @@ +/****************************************************************************** + + Copyright (c) 2001-2014, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include <stdint.h> +#include <stdio.h> +#include <stdarg.h> +#include <string.h> +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_byteorder.h> + +#include "../e1000_logs.h" + +#define DELAY(x) rte_delay_us(x) +#define usec_delay(x) DELAY(x) +#define usec_delay_irq(x) DELAY(x) +#define msec_delay(x) DELAY(1000*(x)) +#define msec_delay_irq(x) DELAY(1000*(x)) + +#define DEBUGFUNC(F) DEBUGOUT(F "\n"); +#define DEBUGOUT(S, args...) PMD_DRV_LOG_RAW(DEBUG, S, ##args) +#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args) + +#define UNREFERENCED_PARAMETER(_p) +#define UNREFERENCED_1PARAMETER(_p) +#define UNREFERENCED_2PARAMETER(_p, _q) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) + +#define FALSE 0 +#define TRUE 1 + +#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */ + +/* Mutex used in the shared code */ +#define E1000_MUTEX uintptr_t +#define E1000_MUTEX_INIT(mutex) (*(mutex) = 0) +#define E1000_MUTEX_LOCK(mutex) (*(mutex) = 1) +#define E1000_MUTEX_UNLOCK(mutex) (*(mutex) = 0) + +typedef uint64_t u64; +typedef uint32_t u32; +typedef uint16_t u16; +typedef uint8_t u8; +typedef int64_t s64; +typedef int32_t s32; +typedef int16_t s16; +typedef int8_t s8; +typedef int bool; + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 + +#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS) + +#define E1000_PCI_REG(reg) (*((volatile uint32_t *)(reg))) + +#define E1000_PCI_REG16(reg) (*((volatile uint16_t *)(reg))) + +#define E1000_PCI_REG_WRITE(reg, value) do { \ + E1000_PCI_REG((reg)) = (rte_cpu_to_le_32(value)); \ +} while (0) + +#define E1000_PCI_REG_WRITE16(reg, value) do { \ + E1000_PCI_REG16((reg)) = (rte_cpu_to_le_16(value)); \ +} while (0) + +#define E1000_PCI_REG_ADDR(hw, reg) \ + ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg))) + +#define E1000_PCI_REG_ARRAY_ADDR(hw, reg, index) \ + E1000_PCI_REG_ADDR((hw), (reg) + ((index) << 2)) + +#define E1000_PCI_REG_FLASH_ADDR(hw, reg) \ + ((volatile uint32_t *)((char *)(hw)->flash_address + (reg))) + +static inline uint32_t e1000_read_addr(volatile void *addr) +{ + return rte_le_to_cpu_32(E1000_PCI_REG(addr)); +} + +static inline uint16_t e1000_read_addr16(volatile void *addr) +{ + return rte_le_to_cpu_16(E1000_PCI_REG16(addr)); +} + +/* Necessary defines */ +#define E1000_MRQC_ENABLE_MASK 0x00000007 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_ALL_FULL_DUPLEX ( \ + ADVERTISE_10_FULL | ADVERTISE_100_FULL | ADVERTISE_1000_FULL) + +#define M88E1543_E_PHY_ID 0x01410EA0 +#define ULP_SUPPORT + +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 + +/* Register READ/WRITE macros */ + +#define E1000_READ_REG(hw, reg) \ + e1000_read_addr(E1000_PCI_REG_ADDR((hw), (reg))) + +#define E1000_WRITE_REG(hw, reg, value) \ + E1000_PCI_REG_WRITE(E1000_PCI_REG_ADDR((hw), (reg)), (value)) + +#define E1000_READ_REG_ARRAY(hw, reg, index) \ + E1000_PCI_REG(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index))) + +#define E1000_WRITE_REG_ARRAY(hw, reg, index, value) \ + E1000_PCI_REG_WRITE(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value)) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_ACCESS_PANIC(x, hw, reg, value) \ + rte_panic("%s:%u\t" RTE_STR(x) "(%p, 0x%x, 0x%x)", \ + __FILE__, __LINE__, (hw), (reg), (unsigned int)(value)) + +/* + * To be able to do IO write, we need to map IO BAR + * (bar 2/4 depending on device). + * Right now mapping multiple BARs is not supported by DPDK. + * Fortunatelly we need it only for legacy hw support. + */ + +#define E1000_WRITE_REG_IO(hw, reg, value) \ + E1000_WRITE_REG(hw, reg, value) + +/* + * Tested on I217/I218 chipset. + */ + +#define E1000_READ_FLASH_REG(hw, reg) \ + e1000_read_addr(E1000_PCI_REG_FLASH_ADDR((hw), (reg))) + +#define E1000_READ_FLASH_REG16(hw, reg) \ + e1000_read_addr16(E1000_PCI_REG_FLASH_ADDR((hw), (reg))) + +#define E1000_WRITE_FLASH_REG(hw, reg, value) \ + E1000_PCI_REG_WRITE(E1000_PCI_REG_FLASH_ADDR((hw), (reg)), (value)) + +#define E1000_WRITE_FLASH_REG16(hw, reg, value) \ + E1000_PCI_REG_WRITE16(E1000_PCI_REG_FLASH_ADDR((hw), (reg)), (value)) + +#define STATIC static + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + +#define false FALSE +#define true TRUE + +#endif /* _E1000_OSDEP_H_ */ diff --git a/drivers/net/e1000/base/e1000_phy.c b/drivers/net/e1000/base/e1000_phy.c new file mode 100644 index 00000000..d43b7ce0 --- /dev/null +++ b/drivers/net/e1000/base/e1000_phy.c @@ -0,0 +1,4258 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "e1000_api.h" + +STATIC s32 e1000_wait_autoneg(struct e1000_hw *hw); +STATIC s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read, bool page_set); +STATIC u32 e1000_get_phy_addr_for_hv_page(u32 page); +STATIC s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, + u16 *data, bool read); + +/* Cable length tables */ +STATIC const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_m88_cable_length_table) / \ + sizeof(e1000_m88_cable_length_table[0])) + +STATIC const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124}; +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_igp_2_cable_length_table) / \ + sizeof(e1000_igp_2_cable_length_table[0])) + +/** + * e1000_init_phy_ops_generic - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_phy_ops_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + DEBUGFUNC("e1000_init_phy_ops_generic"); + + /* Initialize function pointers */ + phy->ops.init_params = e1000_null_ops_generic; + phy->ops.acquire = e1000_null_ops_generic; + phy->ops.check_polarity = e1000_null_ops_generic; + phy->ops.check_reset_block = e1000_null_ops_generic; + phy->ops.commit = e1000_null_ops_generic; + phy->ops.force_speed_duplex = e1000_null_ops_generic; + phy->ops.get_cfg_done = e1000_null_ops_generic; + phy->ops.get_cable_length = e1000_null_ops_generic; + phy->ops.get_info = e1000_null_ops_generic; + phy->ops.set_page = e1000_null_set_page; + phy->ops.read_reg = e1000_null_read_reg; + phy->ops.read_reg_locked = e1000_null_read_reg; + phy->ops.read_reg_page = e1000_null_read_reg; + phy->ops.release = e1000_null_phy_generic; + phy->ops.reset = e1000_null_ops_generic; + phy->ops.set_d0_lplu_state = e1000_null_lplu_state; + phy->ops.set_d3_lplu_state = e1000_null_lplu_state; + phy->ops.write_reg = e1000_null_write_reg; + phy->ops.write_reg_locked = e1000_null_write_reg; + phy->ops.write_reg_page = e1000_null_write_reg; + phy->ops.power_up = e1000_null_phy_generic; + phy->ops.power_down = e1000_null_phy_generic; + phy->ops.read_i2c_byte = e1000_read_i2c_byte_null; + phy->ops.write_i2c_byte = e1000_write_i2c_byte_null; + phy->ops.cfg_on_link_up = e1000_null_ops_generic; +} + +/** + * e1000_null_set_page - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG data) +{ + DEBUGFUNC("e1000_null_set_page"); + UNREFERENCED_2PARAMETER(hw, data); + return E1000_SUCCESS; +} + +/** + * e1000_null_read_reg - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data) +{ + DEBUGFUNC("e1000_null_read_reg"); + UNREFERENCED_3PARAMETER(hw, offset, data); + return E1000_SUCCESS; +} + +/** + * e1000_null_phy_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_phy_generic"); + UNREFERENCED_1PARAMETER(hw); + return; +} + +/** + * e1000_null_lplu_state - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw, + bool E1000_UNUSEDARG active) +{ + DEBUGFUNC("e1000_null_lplu_state"); + UNREFERENCED_2PARAMETER(hw, active); + return E1000_SUCCESS; +} + +/** + * e1000_null_write_reg - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data) +{ + DEBUGFUNC("e1000_null_write_reg"); + UNREFERENCED_3PARAMETER(hw, offset, data); + return E1000_SUCCESS; +} + +/** + * e1000_read_i2c_byte_null - No-op function, return 0 + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: data value read + * + **/ +s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG byte_offset, + u8 E1000_UNUSEDARG dev_addr, + u8 E1000_UNUSEDARG *data) +{ + DEBUGFUNC("e1000_read_i2c_byte_null"); + UNREFERENCED_4PARAMETER(hw, byte_offset, dev_addr, data); + return E1000_SUCCESS; +} + +/** + * e1000_write_i2c_byte_null - No-op function, return 0 + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: data value to write + * + **/ +s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG byte_offset, + u8 E1000_UNUSEDARG dev_addr, + u8 E1000_UNUSEDARG data) +{ + DEBUGFUNC("e1000_write_i2c_byte_null"); + UNREFERENCED_4PARAMETER(hw, byte_offset, dev_addr, data); + return E1000_SUCCESS; +} + +/** + * e1000_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 e1000_check_reset_block_generic(struct e1000_hw *hw) +{ + u32 manc; + + DEBUGFUNC("e1000_check_reset_block"); + + manc = E1000_READ_REG(hw, E1000_MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? + E1000_BLK_PHY_RESET : E1000_SUCCESS; +} + +/** + * e1000_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 e1000_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_id; + u16 retry_count = 0; + + DEBUGFUNC("e1000_get_phy_id"); + + if (!phy->ops.read_reg) + return E1000_SUCCESS; + + while (retry_count < 2) { + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + usec_delay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + + if (phy->id != 0 && phy->id != PHY_REVISION_MASK) + return E1000_SUCCESS; + + retry_count++; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_reset_dsp_generic - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_phy_reset_dsp_generic"); + + if (!hw->phy.ops.write_reg) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + return ret_val; + + return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); +} + +/** + * e1000_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + DEBUGFUNC("e1000_read_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay_irq(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + *data = (u16) mdic; + + /* Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + usec_delay_irq(100); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + DEBUGFUNC("e1000_write_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay_irq(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + + /* Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + usec_delay_irq(100); + + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + DEBUGFUNC("e1000_read_phy_reg_i2c"); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + DEBUGFUNC("e1000_write_phy_reg_i2c"); + + /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/ + if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { + DEBUGOUT1("PHY I2C Address %d is out of range.\n", + hw->phy.addr); + return -E1000_ERR_CONFIG; + } + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters + * access + **/ +s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + DEBUGFUNC("e1000_read_sfp_data_byte"); + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + data_local = E1000_READ_REG(hw, E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + + return E1000_SUCCESS; +} + +/** + * e1000_write_sfp_data_byte - Writes SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to write to + * @data: data to write + * + * Writes one byte to SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters + * access + **/ +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + DEBUGFUNC("e1000_write_sfp_data_byte"); + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + /* The programming interface is 16 bits wide + * so we need to read the whole word first + * then update appropriate byte lane and write + * the updated word back. + */ + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing + * with an EEPROM to write the data given. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + /* Set a command to read single word */ + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + /* Poll the ready bit to see if lastly + * launched I2C operation completed + */ + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) { + /* Check if this is READ or WRITE phase */ + if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) == + E1000_I2CCMD_OPCODE_READ) { + /* Write the selected byte + * lane and update whole word + */ + data_local = i2ccmd & 0xFF00; + data_local |= data; + i2ccmd = ((offset << + E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | data_local); + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + } else { + break; + } + } + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_phy_reg_m88"); + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_phy_reg_m88"); + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_set_page_igp - Set page as on IGP-like PHY(s) + * @hw: pointer to the HW structure + * @page: page to set (shifted left when necessary) + * + * Sets PHY page required for PHY register access. Assumes semaphore is + * already acquired. Note, this function sets phy.addr to 1 so the caller + * must set it appropriately (if necessary) after this function returns. + **/ +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) +{ + DEBUGFUNC("e1000_set_page_igp"); + + DEBUGOUT1("Setting page 0x%x\n", page); + + hw->phy.addr = 1; + + return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); +} + +/** + * __e1000_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +STATIC s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("__e1000_read_phy_reg_igp"); + + if (!locked) { + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores the + * retrieved information in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000_read_phy_reg_igp_locked - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_igp(hw, offset, data, true); +} + +/** + * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +STATIC s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_phy_reg_igp"); + + if (!locked) { + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & + offset, + data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000_write_phy_reg_igp_locked - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. + * Assumes semaphore already acquired. + **/ +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_igp(hw, offset, data, true); +} + +/** + * __e1000_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +STATIC s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + u32 kmrnctrlsta; + + DEBUGFUNC("__e1000_read_kmrn_reg"); + + if (!locked) { + s32 ret_val = E1000_SUCCESS; + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); + + usec_delay(2); + + kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + if (!locked) + hw->phy.ops.release(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_kmrn_reg_generic - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset using the + * kumeran interface. The information retrieved is stored in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000_read_kmrn_reg_locked - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the kumeran interface. The + * information retrieved is stored in data. + * Assumes semaphore already acquired. + **/ +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, true); +} + +/** + * __e1000_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +STATIC s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + u32 kmrnctrlsta; + + DEBUGFUNC("e1000_write_kmrn_reg_generic"); + + if (!locked) { + s32 ret_val = E1000_SUCCESS; + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); + + usec_delay(2); + + if (!locked) + hw->phy.ops.release(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_write_kmrn_reg_generic - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to the PHY register at the offset + * using the kumeran interface. Release the acquired semaphore before exiting. + **/ +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000_write_kmrn_reg_locked - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Write the data to PHY register at the offset using the kumeran interface. + * Assumes semaphore already acquired. + **/ +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, true); +} + +/** + * e1000_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +STATIC s32 e1000_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + /* fall-through */ + default: + break; + } + + return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); +} + +/** + * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_82577"); + + if (hw->phy.type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + return ret_val; + } + } + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; + + ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data); + if (ret_val) + return ret_val; + + /* Set MDI/MDIX mode */ + ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data); + if (ret_val) + return ret_val; + + return e1000_set_master_slave_mode(hw); +} + +/** + * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_m88"); + + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* For BM PHY this bit is downshift enable */ + if (phy->type != e1000_phy_bm) + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift on BM (disabled by default) */ + if (phy->type == e1000_phy_bm) { + /* For 82574/82583, first disable then enable downshift */ + if (phy->id == BME1000_E_PHY_ID_R2) { + phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; + } + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((phy->type == e1000_phy_m88) && + (phy->revision < E1000_REVISION_4) && + (phy->id != BME1000_E_PHY_ID_R2)) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == E1000_REVISION_2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + + if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) { + /* Set PHY page 0, register 29 to 0x0003 */ + ret_val = phy->ops.write_reg(hw, 29, 0x0003); + if (ret_val) + return ret_val; + + /* Set PHY page 0, register 30 to 0x0000 */ + ret_val = phy->ops.write_reg(hw, 30, 0x0000); + if (ret_val) + return ret_val; + } + + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + if (phy->type == e1000_phy_82578) { + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + /* 82578 PHY - set the downshift count to 1x. */ + phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; + phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_m88_gen2"); + + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + /* M88E1112 does not support this mode) */ + if (phy->id != M88E1112_E_PHY_ID) { + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + } + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ + if (phy->id == M88E1543_E_PHY_ID) { + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; + ret_val = + phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; + phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; + phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + ret_val = e1000_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_copper_link_setup_igp"); + + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + return ret_val; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msec_delay(100); + + /* The NVM settings will configure LPLU in D3 for + * non-IGP1 PHYs. + */ + if (phy->type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = hw->phy.ops.set_d3_lplu_state(hw, false); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* disable lplu d0 during driver init */ + if (hw->phy.ops.set_d0_lplu_state) { + ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D0\n"); + return ret_val; + } + } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_set_master_slave_mode(hw); + } + + return ret_val; +} + +/** + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + DEBUGFUNC("e1000_phy_setup_autoneg"); + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + DEBUGOUT("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + DEBUGOUT("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + DEBUGOUT("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + DEBUGOUT("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + DEBUGOUT("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + DEBUGOUT("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_copper_link_autoneg"); + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (!phy->autoneg_advertised) + phy->autoneg_advertised = phy->autoneg_mask; + + DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + DEBUGOUT("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->mac.get_link_status = true; + + return ret_val; +} + +/** + * e1000_setup_copper_link_generic - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_setup_copper_link_generic"); + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + DEBUGOUT("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + DEBUGOUT("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, + &link); + if (ret_val) + return ret_val; + + if (link) { + DEBUGOUT("Valid link established!!!\n"); + hw->mac.ops.config_collision_dist(hw); + ret_val = e1000_config_fc_after_link_up_generic(hw); + } else { + DEBUGOUT("Unable to establish link!!!\n"); + } + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_igp"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + DEBUGOUT1("IGP PSCR: %X\n", phy_data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_m88"); + + /* I210 and I211 devices support Auto-Crossover in forced operation. */ + if (phy->type != e1000_phy_i210) { + /* Clear Auto-Crossover to force MDI manually. M88E1000 + * requires MDI forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); + } + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Reset the phy to commit changes. */ + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + return ret_val; + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + reset_dsp = false; + break; + default: + if (hw->phy.type != e1000_phy_m88) + reset_dsp = false; + break; + } + + if (!reset_dsp) { + DEBUGOUT("Link taking longer than expected.\n"); + } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = phy->ops.write_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + return ret_val; + ret_val = e1000_phy_reset_dsp_generic(hw); + if (ret_val) + return ret_val; + } + } + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + if (hw->phy.type != e1000_phy_m88) + return E1000_SUCCESS; + + if (hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1340M_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID) + return E1000_SUCCESS; + if (hw->phy.id == I210_I_PHY_ID) + return E1000_SUCCESS; + if ((hw->phy.id == M88E1543_E_PHY_ID) || + (hw->phy.id == M88E1512_E_PHY_ID)) + return E1000_SUCCESS; + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_ife"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data); + if (ret_val) + return ret_val; + + /* Disable MDI-X support for 10/100 */ + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + return ret_val; + + DEBUGOUT1("IFE PMC: %X\n", data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + DEBUGFUNC("e1000_phy_force_speed_duplex_setup"); + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~MII_CR_SPEED_1000; + DEBUGOUT("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + DEBUGOUT("Forcing 10mb\n"); + } + + hw->mac.ops.config_collision_dist(hw); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); +} + +/** + * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + return ret_val; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + + return ret_val; +} + +/** + * e1000_check_downshift_generic - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 e1000_check_downshift_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("e1000_check_downshift_generic"); + + switch (phy->type) { + case e1000_phy_i210: + case e1000_phy_m88: + case e1000_phy_gg82563: + case e1000_phy_bm: + case e1000_phy_82578: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp: + case e1000_phy_igp_2: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + return E1000_SUCCESS; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = !!(phy_data & mask); + + return ret_val; +} + +/** + * e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_check_polarity_m88"); + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +s32 e1000_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + DEBUGFUNC("e1000_check_polarity_igp"); + + /* Polarity is determined based on the speed of + * our connection. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = phy->ops.read_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = ((data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_check_polarity_ife - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reversal feature being enabled. + **/ +s32 e1000_check_polarity_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("e1000_check_polarity_ife"); + + /* Polarity is determined based on the reversal feature being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = ((phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +STATIC s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("e1000_wait_autoneg"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msec_delay(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * e1000_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("e1000_phy_has_link_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + msec_delay(usec_interval/1000); + else + usec_delay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + msec_delay(usec_interval/1000); + else + usec_delay(usec_interval); + } + + *success = (i < iterations); + + return ret_val; +} + +/** + * e1000_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 e1000_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + DEBUGFUNC("e1000_get_cable_length_m88"); + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return E1000_SUCCESS; +} + +s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, phy_data2, is_cm; + u16 index, default_page; + + DEBUGFUNC("e1000_get_cable_length_m88_gen2"); + + switch (hw->phy.id) { + case I210_I_PHY_ID: + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + I347AT4_PCDC, &phy_data2); + if (ret_val) + return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + break; + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case I347AT4_E_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + return ret_val; + + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) + return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + return ret_val; + break; + + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) + return ret_val; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + + phy->max_cable_length) / 2; + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + return ret_val; + + break; + default: + return -E1000_ERR_PHY; + } + + return ret_val; +} + +/** + * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + DEBUGFUNC("e1000_get_cable_length_igp_2"); + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK); + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) + return -E1000_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0); + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_m88"); + + if (phy->media_type != e1000_media_type_copper) { + DEBUGOUT("Phy info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy->polarity_correction = !!(phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); + + ret_val = e1000_check_polarity_m88(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_igp"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_igp(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000_get_phy_info_ife - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + **/ +s32 e1000_get_phy_info_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_ife"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + return ret_val; + phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife(hw); + if (ret_val) + return ret_val; + } else { + /* Polarity is forced */ + phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + + return E1000_SUCCESS; +} + +/** + * e1000_phy_sw_reset_generic - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_phy_sw_reset_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + usec_delay(1); + + return ret_val; +} + +/** + * e1000_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + DEBUGFUNC("e1000_phy_hw_reset_generic"); + + if (phy->ops.check_reset_block) { + ret_val = phy->ops.check_reset_block(hw); + if (ret_val) + return E1000_SUCCESS; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + return ret_val; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(hw); + + usec_delay(phy->reset_delay_us); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + usec_delay(150); + + phy->ops.release(hw); + + return phy->ops.get_cfg_done(hw); +} + +/** + * e1000_get_cfg_done_generic - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_get_cfg_done_generic"); + UNREFERENCED_1PARAMETER(hw); + + msec_delay_irq(10); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw) +{ + DEBUGOUT("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to Tx amplitude in Gig mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + hw->phy.ops.write_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + hw->phy.ops.write_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + hw->phy.ops.write_reg(hw, 0x1798, 0xD008); + /* Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + hw->phy.ops.write_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + hw->phy.ops.write_reg(hw, 0x187A, 0x0800); + /* Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + hw->phy.ops.write_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + hw->phy.ops.write_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + hw->phy.ops.write_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) +{ + enum e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + case BME1000_E_PHY_ID: + case BME1000_E_PHY_ID_R2: + phy_type = e1000_phy_bm; + break; + case I82578_E_PHY_ID: + phy_type = e1000_phy_82578; + break; + case I82577_E_PHY_ID: + phy_type = e1000_phy_82577; + break; + case I82579_E_PHY_ID: + phy_type = e1000_phy_82579; + break; + case I217_E_PHY_ID: + phy_type = e1000_phy_i217; + break; + case I82580_I_PHY_ID: + phy_type = e1000_phy_82580; + break; + case I210_I_PHY_ID: + phy_type = e1000_phy_i210; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return phy_type; +} + +/** + * e1000_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + **/ +s32 e1000_determine_phy_address(struct e1000_hw *hw) +{ + u32 phy_addr = 0; + u32 i; + enum e1000_phy_type phy_type = e1000_phy_unknown; + + hw->phy.id = phy_type; + + for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { + hw->phy.addr = phy_addr; + i = 0; + + do { + e1000_get_phy_id(hw); + phy_type = e1000_get_phy_type_from_id(hw->phy.id); + + /* If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != e1000_phy_unknown) + return E1000_SUCCESS; + + msec_delay(1); + i++; + } while (i < 10); + } + + return -E1000_ERR_PHY_TYPE; +} + +/** + * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address + * @page: page to access + * + * Returns the phy address for the page requested. + **/ +STATIC u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) +{ + u32 phy_addr = 2; + + if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000_write_phy_reg_bm - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + DEBUGFUNC("e1000_write_phy_reg_bm"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto release; + } + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto release; + } + + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_read_phy_reg_bm - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + DEBUGFUNC("e1000_read_phy_reg_bm"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto release; + } + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto release; + } + + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_read_phy_reg_bm2 - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + DEBUGFUNC("e1000_read_phy_reg_bm2"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto release; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto release; + } + + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_write_phy_reg_bm2 - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + DEBUGFUNC("e1000_write_phy_reg_bm2"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto release; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto release; + } + + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers + * @hw: pointer to the HW structure + * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG + * + * Assumes semaphore already acquired and phy_reg points to a valid memory + * address to store contents of the BM_WUC_ENABLE_REG register. + **/ +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) +{ + s32 ret_val; + u16 temp; + + DEBUGFUNC("e1000_enable_phy_wakeup_reg_access_bm"); + + if (!phy_reg) + return -E1000_ERR_PARAM; + + /* All page select, port ctrl and wakeup registers use phy address 1 */ + hw->phy.addr = 1; + + /* Select Port Control Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + DEBUGOUT("Could not set Port Control page\n"); + return ret_val; + } + + ret_val = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); + if (ret_val) { + DEBUGOUT2("Could not read PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + return ret_val; + } + + /* Enable both PHY wakeup mode and Wakeup register page writes. + * Prevent a power state change by disabling ME and Host PHY wakeup. + */ + temp = *phy_reg; + temp |= BM_WUC_ENABLE_BIT; + temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); + + ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp); + if (ret_val) { + DEBUGOUT2("Could not write PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + return ret_val; + } + + /* Select Host Wakeup Registers page - caller now able to write + * registers on the Wakeup registers page + */ + return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); +} + +/** + * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs + * @hw: pointer to the HW structure + * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG + * + * Restore BM_WUC_ENABLE_REG to its original value. + * + * Assumes semaphore already acquired and *phy_reg is the contents of the + * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by + * caller. + **/ +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) +{ + s32 ret_val; + + DEBUGFUNC("e1000_disable_phy_wakeup_reg_access_bm"); + + if (!phy_reg) + return -E1000_ERR_PARAM; + + /* Select Port Control Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + DEBUGOUT("Could not set Port Control page\n"); + return ret_val; + } + + /* Restore 769.17 to its original value */ + ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg); + if (ret_val) + DEBUGOUT2("Could not restore PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + + return ret_val; +} + +/** + * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to read or write + * @read: determines if operation is read or write + * @page_set: BM_WUC_PAGE already set and access enabled + * + * Read the PHY register at offset and store the retrieved information in + * data, or write data to PHY register at offset. Note the procedure to + * access the PHY wakeup registers is different than reading the other PHY + * registers. It works as such: + * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 + * 2) Set page to 800 for host (801 if we were manageability) + * 3) Write the address using the address opcode (0x11) + * 4) Read or write the data using the data opcode (0x12) + * 5) Restore 769.17.2 to its original value + * + * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and + * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm(). + * + * Assumes semaphore is already acquired. When page_set==true, assumes + * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack + * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()). + **/ +STATIC s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read, bool page_set) +{ + s32 ret_val; + u16 reg = BM_PHY_REG_NUM(offset); + u16 page = BM_PHY_REG_PAGE(offset); + u16 phy_reg = 0; + + DEBUGFUNC("e1000_access_phy_wakeup_reg_bm"); + + /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ + if ((hw->mac.type == e1000_pchlan) && + (!(E1000_READ_REG(hw, E1000_PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) + DEBUGOUT1("Attempting to access page %d while gig enabled.\n", + page); + + if (!page_set) { + /* Enable access to PHY wakeup registers */ + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) { + DEBUGOUT("Could not enable PHY wakeup reg access\n"); + return ret_val; + } + } + + DEBUGOUT2("Accessing PHY page %d reg 0x%x\n", page, reg); + + /* Write the Wakeup register page offset value using opcode 0x11 */ + ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); + if (ret_val) { + DEBUGOUT1("Could not write address opcode to page %d\n", page); + return ret_val; + } + + if (read) { + /* Read the Wakeup register page value using opcode 0x12 */ + ret_val = e1000_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + data); + } else { + /* Write the Wakeup register page value using opcode 0x12 */ + ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + *data); + } + + if (ret_val) { + DEBUGOUT2("Could not access PHY reg %d.%d\n", page, reg); + return ret_val; + } + + if (!page_set) + ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + + return ret_val; +} + +/** + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); + msec_delay(1); +} + +/** + * __e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphore before exiting. + **/ +STATIC s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + + DEBUGFUNC("__e1000_read_phy_reg_hv"); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = e1000_access_phy_debug_regs_hv(hw, offset, + data, true); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + DEBUGOUT3("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, + data); +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores + * the retrieved information in data. Release the acquired semaphore + * before exiting. + **/ +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * e1000_read_phy_reg_hv_locked - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * e1000_read_phy_reg_page_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired and page already set. + **/ +s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * __e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +STATIC s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + + DEBUGFUNC("__e1000_write_phy_reg_hv"); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = e1000_access_phy_debug_regs_hv(hw, offset, + &data, false); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + /* Workaround MDIO accesses being disabled after entering IEEE + * Power Down (when bit 11 of the PHY Control register is set) + */ + if ((hw->phy.type == e1000_phy_82578) && + (hw->phy.revision >= 1) && + (hw->phy.addr == 2) && + !(MAX_PHY_REG_ADDRESS & reg) && + (data & (1 << 11))) { + u16 data2 = 0x7EFF; + ret_val = e1000_access_phy_debug_regs_hv(hw, + (1 << 6) | 0x3, + &data2, false); + if (ret_val) + goto out; + } + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + DEBUGOUT3("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, + data); + +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register at the offset. + * Release the acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * e1000_write_phy_reg_hv_locked - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired. + **/ +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * e1000_write_phy_reg_page_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired and page already set. + **/ +s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page + * @page: page to be accessed + **/ +STATIC u32 e1000_get_phy_addr_for_hv_page(u32 page) +{ + u32 phy_addr = 2; + + if (page >= HV_INTC_FC_PAGE_START) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to be read or written + * @read: determines if operation is read or write + * + * Reads the PHY register at offset and stores the retreived information + * in data. Assumes semaphore already acquired. Note that the procedure + * to access these regs uses the address port and data port to read/write. + * These accesses done with PHY address 2 and without using pages. + **/ +STATIC s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, + u16 *data, bool read) +{ + s32 ret_val; + u32 addr_reg; + u32 data_reg; + + DEBUGFUNC("e1000_access_phy_debug_regs_hv"); + + /* This takes care of the difference with desktop vs mobile phy */ + addr_reg = ((hw->phy.type == e1000_phy_82578) ? + I82578_ADDR_REG : I82577_ADDR_REG); + data_reg = addr_reg + 1; + + /* All operations in this function are phy address 2 */ + hw->phy.addr = 2; + + /* masking with 0x3F to remove the page from offset */ + ret_val = e1000_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); + if (ret_val) { + DEBUGOUT("Could not write the Address Offset port register\n"); + return ret_val; + } + + /* Read or write the data value next */ + if (read) + ret_val = e1000_read_phy_reg_mdic(hw, data_reg, data); + else + ret_val = e1000_write_phy_reg_mdic(hw, data_reg, *data); + + if (ret_val) + DEBUGOUT("Could not access the Data port register\n"); + + return ret_val; +} + +/** + * e1000_link_stall_workaround_hv - Si workaround + * @hw: pointer to the HW structure + * + * This function works around a Si bug where the link partner can get + * a link up indication before the PHY does. If small packets are sent + * by the link partner they can be placed in the packet buffer without + * being properly accounted for by the PHY and will stall preventing + * further packets from being received. The workaround is to clear the + * packet buffer after the PHY detects link up. + **/ +s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_link_stall_workaround_hv"); + + if (hw->phy.type != e1000_phy_82578) + return E1000_SUCCESS; + + /* Do not apply workaround if in PHY loopback bit 14 set */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &data); + if (data & PHY_CONTROL_LB) + return E1000_SUCCESS; + + /* check if link is up and at 1Gbps */ + ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data); + if (ret_val) + return ret_val; + + data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); + + if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + return E1000_SUCCESS; + + msec_delay(200); + + /* flush the packets in the fifo buffer */ + ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, + (HV_MUX_DATA_CTRL_GEN_TO_MAC | + HV_MUX_DATA_CTRL_FORCE_SPEED)); + if (ret_val) + return ret_val; + + return hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, + HV_MUX_DATA_CTRL_GEN_TO_MAC); +} + +/** + * e1000_check_polarity_82577 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_check_polarity_82577"); + + ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. + **/ +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_82577"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * e1000_get_phy_info_82577 - Retrieve I82577 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_82577"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_82577(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); + + if ((data & I82577_PHY_STATUS2_SPEED_MASK) == + I82577_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 e1000_get_cable_length_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + DEBUGFUNC("e1000_get_cable_length_82577"); + + ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + return ret_val; + + length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT); + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + return -E1000_ERR_PHY; + + phy->cable_length = length; + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_gs40g - Write GS40G PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + DEBUGFUNC("e1000_write_phy_reg_gs40g"); + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = e1000_write_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_read_phy_reg_gs40g - Read GS40G PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is page to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + DEBUGFUNC("e1000_read_phy_reg_gs40g"); + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = e1000_read_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_read_phy_reg_mphy - Read mPHY control register + * @hw: pointer to the HW structure + * @address: address to be read + * @data: pointer to the read data + * + * Reads the mPHY control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data) +{ + u32 mphy_ctrl = 0; + bool locked = false; + bool ready; + + DEBUGFUNC("e1000_read_phy_reg_mphy"); + + /* Check if mPHY is ready to read/write operations */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* Check if mPHY access is disabled and enable it if so */ + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { + locked = true; + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + mphy_ctrl |= E1000_MPHY_ENA_ACCESS; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + } + + /* Set the address that we want to read */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* We mask address, because we want to use only current lane */ + mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK & + ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) | + (address & E1000_MPHY_ADDRESS_MASK); + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + + /* Read data from the address */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + *data = E1000_READ_REG(hw, E1000_MPHY_DATA); + + /* Disable access to mPHY if it was originally disabled */ + if (locked) + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, + E1000_MPHY_DIS_ACCESS); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_mphy - Write mPHY control register + * @hw: pointer to the HW structure + * @address: address to write to + * @data: data to write to register at offset + * @line_override: used when we want to use different line than default one + * + * Writes data to mPHY control register. + **/ +s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, + bool line_override) +{ + u32 mphy_ctrl = 0; + bool locked = false; + bool ready; + + DEBUGFUNC("e1000_write_phy_reg_mphy"); + + /* Check if mPHY is ready to read/write operations */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* Check if mPHY access is disabled and enable it if so */ + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { + locked = true; + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + mphy_ctrl |= E1000_MPHY_ENA_ACCESS; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + } + + /* Set the address that we want to read */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* We mask address, because we want to use only current lane */ + if (line_override) + mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE; + else + mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE; + mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) | + (address & E1000_MPHY_ADDRESS_MASK); + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + + /* Read data from the address */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_DATA, data); + + /* Disable access to mPHY if it was originally disabled */ + if (locked) + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, + E1000_MPHY_DIS_ACCESS); + + return E1000_SUCCESS; +} + +/** + * e1000_is_mphy_ready - Check if mPHY control register is not busy + * @hw: pointer to the HW structure + * + * Returns mPHY control register status. + **/ +bool e1000_is_mphy_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u32 mphy_ctrl = 0; + bool ready = false; + + while (retry_count < 2) { + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_BUSY) { + usec_delay(20); + retry_count++; + continue; + } + ready = true; + break; + } + + if (!ready) + DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n"); + + return ready; +} diff --git a/drivers/net/e1000/base/e1000_phy.h b/drivers/net/e1000/base/e1000_phy.h new file mode 100644 index 00000000..3e45a9ef --- /dev/null +++ b/drivers/net/e1000/base/e1000_phy.h @@ -0,0 +1,333 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_PHY_H_ +#define _E1000_PHY_H_ + +void e1000_init_phy_ops_generic(struct e1000_hw *hw); +s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data); +void e1000_null_phy_generic(struct e1000_hw *hw); +s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_null_set_page(struct e1000_hw *hw, u16 data); +s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 e1000_check_downshift_generic(struct e1000_hw *hw); +s32 e1000_check_polarity_m88(struct e1000_hw *hw); +s32 e1000_check_polarity_igp(struct e1000_hw *hw); +s32 e1000_check_polarity_ife(struct e1000_hw *hw); +s32 e1000_check_reset_block_generic(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +s32 e1000_copper_link_autoneg(struct e1000_hw *hw); +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw); +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw); +s32 e1000_get_cfg_done_generic(struct e1000_hw *hw); +s32 e1000_get_phy_id(struct e1000_hw *hw); +s32 e1000_get_phy_info_igp(struct e1000_hw *hw); +s32 e1000_get_phy_info_m88(struct e1000_hw *hw); +s32 e1000_get_phy_info_ife(struct e1000_hw *hw); +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw); +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw); +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw); +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active); +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw); +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); +s32 e1000_determine_phy_address(struct e1000_hw *hw); +s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); +void e1000_power_up_phy_copper(struct e1000_hw *hw); +void e1000_power_down_phy_copper(struct e1000_hw *hw); +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data); +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); +s32 e1000_check_polarity_82577(struct e1000_hw *hw); +s32 e1000_get_phy_info_82577(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); +s32 e1000_get_cable_length_82577(struct e1000_hw *hw); +s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data); +s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, + bool line_override); +bool e1000_is_mphy_ready(struct e1000_hw *hw); + +#define E1000_MAX_PHY_ADDR 8 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +/* GS40G - I210 PHY defines */ +#define GS40G_PAGE_SELECT 0x16 +#define GS40G_PAGE_SHIFT 16 +#define GS40G_OFFSET_MASK 0xFFFF +#define GS40G_PAGE_2 0x20000 +#define GS40G_MAC_REG2 0x15 +#define GS40G_MAC_LB 0x4140 +#define GS40G_MAC_SPEED_1G 0X0006 +#define GS40G_COPPER_SPEC 0x0010 + +/* BM/HV Specific Registers */ +#define BM_PORT_CTRL_PAGE 769 +#define BM_WUC_PAGE 800 +#define BM_WUC_ADDRESS_OPCODE 0x11 +#define BM_WUC_DATA_OPCODE 0x12 +#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE +#define BM_WUC_ENABLE_REG 17 +#define BM_WUC_ENABLE_BIT (1 << 2) +#define BM_WUC_HOST_WU_BIT (1 << 4) +#define BM_WUC_ME_WU_BIT (1 << 5) + +#define PHY_UPPER_SHIFT 21 +#define BM_PHY_REG(page, reg) \ + (((reg) & MAX_PHY_REG_ADDRESS) |\ + (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\ + (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT))) +#define BM_PHY_REG_PAGE(offset) \ + ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) +#define BM_PHY_REG_NUM(offset) \ + ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\ + (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\ + ~MAX_PHY_REG_ADDRESS))) + +#define HV_INTC_FC_PAGE_START 768 +#define I82578_ADDR_REG 29 +#define I82577_ADDR_REG 16 +#define I82577_CFG_REG 22 +#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ +#define I82577_CTRL_REG 23 + +/* 82577 specific PHY registers */ +#define I82577_PHY_CTRL_2 18 +#define I82577_PHY_LBK_CTRL 19 +#define I82577_PHY_STATUS_2 26 +#define I82577_PHY_DIAG_STATUS 31 + +/* I82577 PHY Status 2 */ +#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82577_PHY_STATUS2_MDIX 0x0800 +#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 + +/* I82577 PHY Control 2 */ +#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82577 PHY Diagnostics Status */ +#define I82577_DSTATUS_CABLE_LENGTH 0x03FC +#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ +#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ + +#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */ +#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */ +#define E1000_MPHY_BUSY 0x00010000 /* busy bit */ +#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */ +#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */ + +/* BM PHY Copper Specific Control 1 */ +#define BM_CS_CTRL1 16 + +/* BM PHY Copper Specific Status */ +#define BM_CS_STATUS 17 +#define BM_CS_STATUS_LINK_UP 0x0400 +#define BM_CS_STATUS_RESOLVED 0x0800 +#define BM_CS_STATUS_SPEED_MASK 0xC000 +#define BM_CS_STATUS_SPEED_1000 0x8000 + +/* 82577 Mobile Phy Status Register */ +#define HV_M_STATUS 26 +#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 +#define HV_M_STATUS_SPEED_MASK 0x0300 +#define HV_M_STATUS_SPEED_1000 0x0200 +#define HV_M_STATUS_SPEED_100 0x0100 +#define HV_M_STATUS_LINK_UP 0x0040 + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +/* Enable flexible speed on link-up */ +#define IGP01E1000_GMII_FLEX_SPD 0x0010 +#define IGP01E1000_GMII_SPD 0x0020 /* Enable SPD */ + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */ +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ +#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ +#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ +#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 +#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */ +#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ +#define E1000_KMRNCTRLSTA_K0S_CTRL 0x1E /* Kumeran K0s Control */ +#define E1000_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_SHIFT 0 +#define E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT 4 +#define E1000_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_MASK \ + (3 << E1000_KMRNCTRLSTA_K0S_CTRL_ENTRY_LTNCY_SHIFT) +#define E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK \ + (7 << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT) +#define E1000_KMRNCTRLSTA_OP_MODES 0x1F /* Kumeran Modes of Operation */ +#define E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC 0x0002 /* change LSC to CSC */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */ + +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct sfp_e1000_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600 +#define E1000_SFF_VENDOR_OUI_FTL 0x00906500 +#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100 + +#endif diff --git a/drivers/net/e1000/base/e1000_regs.h b/drivers/net/e1000/base/e1000_regs.h new file mode 100644 index 00000000..84531a99 --- /dev/null +++ b/drivers/net/e1000/base/e1000_regs.h @@ -0,0 +1,688 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */ +#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */ +#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */ +#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */ +#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ +#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ +#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ +#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ +#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ +#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */ +#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */ +#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT) +#define E1000_FEXT 0x0002C /* Future Extended - RW */ +#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ +#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ +#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ +#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ +#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */ +#define E1000_SVCR 0x000F0 +#define E1000_SVT 0x000F4 +#define E1000_LPIC 0x000FC /* Low Power IDLE control */ +#define E1000_RCTL 0x00100 /* Rx Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ +#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_TCTL 0x00400 /* Tx Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ +#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* Tx Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_LEDMUX 0x08130 /* LED MUX Control */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEMNGCTL_I210 0x01010 /* i210 MNG EEprom Mode Control */ +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ +#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */ +#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ +#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */ +#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */ +#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */ +#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */ +#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */ +#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ +#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +/* Split and Replication Rx Control - RW */ +#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ +#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ +#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ +#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ +#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ +#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */ +#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */ +#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ +#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ +#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ +#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ +#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ +#define E1000_I210_FLMNGCTL 0x12038 +#define E1000_I210_FLMNGDATA 0x1203C +#define E1000_I210_FLMNGCNT 0x12040 + +#define E1000_I210_FLSWCTL 0x12048 +#define E1000_I210_FLSWDATA 0x1204C +#define E1000_I210_FLSWCNT 0x12050 + +#define E1000_I210_FLA 0x1201C + +#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) +#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ + +/* QAV Tx mode control register */ +#define E1000_I210_TQAVCTRL 0x3570 + +/* QAV Tx mode control register bitfields masks */ +/* QAV enable */ +#define E1000_TQAVCTRL_MODE (1 << 0) +/* Fetching arbitration type */ +#define E1000_TQAVCTRL_FETCH_ARB (1 << 4) +/* Fetching timer enable */ +#define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5) +/* Launch arbitration type */ +#define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8) +/* Launch timer enable */ +#define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9) +/* SP waits for SR enable */ +#define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10) +/* Fetching timer correction */ +#define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16 +#define E1000_TQAVCTRL_FETCH_TIMER_DELTA \ + (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET) + +/* High credit registers where _n can be 0 or 1. */ +#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n)) + +/* Queues fetch arbitration priority control register */ +#define E1000_I210_TQAVARBCTRL 0x3574 +/* Queues priority masks where _n and _p can be 0-3. */ +#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * (_n))) +/* QAV Tx mode control registers where _n can be 0 or 1. */ +#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n)) + +/* QAV Tx mode control register bitfields masks */ +#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */ +#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */ +#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */ + +/* Good transmitted packets counter registers */ +#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n))) + +/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */ +#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * (_n))) + +#define E1000_MMDAC 13 /* MMD Access Control */ +#define E1000_MMDAAD 14 /* MMD Access Address/Data */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \ + (0x0C030 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \ + (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \ + (0x0E03C + ((_n) * 0x40))) +#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) +#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) +#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */ +#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ +/* Same as TXPBS, renamed for newer Si - RW */ +#define E1000_ITPBS 0x03404 +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */ +#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */ +#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */ +#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */ +#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */ +#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ +#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */ +#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */ +/* DMA Tx Max Total Allow Size Reqs - RW */ +#define E1000_DTXMXSZRQ 0x03540 +#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ +#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */ + +#define E1000_VFGPRC 0x00F10 +#define E1000_VFGORC 0x00F18 +#define E1000_VFMPRC 0x00F3C +#define E1000_VFGPTC 0x00F14 +#define E1000_VFGOTC 0x00F34 +#define E1000_VFGOTLBC 0x00F50 +#define E1000_VFGPTLBC 0x00F44 +#define E1000_VFGORLBC 0x00F48 +#define E1000_VFGPRLBC 0x00F40 +/* Virtualization statistical counters */ +#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n))) +#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n))) +#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n))) +#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n))) +#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n))) +#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n))) +#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n))) +#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n))) +#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n))) + +/* LinkSec */ +#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */ +#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */ +#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */ +#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */ +#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */ +#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */ +#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */ +#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */ +#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */ +#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */ +#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */ +#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */ +#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */ +#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */ +#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */ +#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */ +#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */ +#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */ +#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */ +#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */ +#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */ +#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */ +#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */ +#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */ +#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */ +#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */ +#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */ +#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */ +#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */ +#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */ +/* LinkSec Tx 128-bit Key 0 - WO */ +#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) +/* LinkSec Tx 128-bit Key 1 - WO */ +#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) +#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */ +#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */ +/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit + * key - RW. + */ +#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m))) + +#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */ +#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */ +#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */ +#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */ +/* IPSec Rx IPv4/v6 Address - RW */ +#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n))) +/* IPSec Rx 128-bit Key - RW */ +#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) +#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */ +#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */ +/* IPSec Tx 128-bit Key - RW */ +#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) +#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */ +#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */ +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */ +#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */ +#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */ +#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ +#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */ +/* Flexible Host Filter Table */ +#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100)) +/* Ext Flexible Host Filter Table */ +#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100)) + + +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +/* Management Decision Filters */ +#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) +#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +/* Driver-only SW semaphore (not used by BOOT agents) */ +#define E1000_SWSM2 0x05B58 +#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ +#define E1000_UFUSE 0x05B78 /* UFUSE - RO */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ +#define E1000_FWSTS 0x08F0C /* FW Status */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* VT Registers */ +#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */ +#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */ +#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */ +#define E1000_MDFB 0x03558 /* Malicious Driver free block */ +#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ +#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_SCCRL 0x05DB0 /* Storm Control Control */ +#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */ +#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */ +/* These act per VF so an array friendly macro is used */ +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VFVMBMEM(_n) (0x00800 + (_n)) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +/* VLAN Virtual Machine Filter - RW */ +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */ +#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */ +#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ +#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ +#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ +#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */ +#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */ +#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */ +#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ +#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */ +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */ +#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */ +#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */ +#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */ +#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */ +/* Tx Desc plane TC Rate-scheduler config */ +#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) +/* Tx Packet plane TC Rate-Scheduler Config */ +#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler Config */ +#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) +/* Tx Desc Plane TC Rate-Scheduler Status */ +#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) +/* Tx Desc Plane TC Rate-Scheduler MMW */ +#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) +/* Tx Packet plane TC Rate-Scheduler Status */ +#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) +/* Tx Packet plane TC Rate-scheduler MMW */ +#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler Status */ +#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler MMW */ +#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) +/* Tx Desc plane VM Rate-Scheduler MMW*/ +#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) +/* Tx BCN Rate-Scheduler MMW */ +#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */ +#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */ +#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */ +#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */ +#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */ +#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */ +#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */ +#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */ +#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */ +#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */ +#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */ +#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */ +#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */ + +/* DMA Coalescing registers */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* PCIe Parity Status Register */ +#define E1000_PCIEERRSTS 0x05BA8 + +#define E1000_PROXYS 0x5F64 /* Proxying Status */ +#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */ +/* Thermal sensor configuration and status registers */ +#define E1000_THMJT 0x08100 /* Junction Temperature */ +#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* Energy Efficient Ethernet "EEE" registers */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define E1000_EEE_SU 0x0E34 /* EEE Setup */ +#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */ +#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */ + +/* OS2BMC Registers */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + + + +#endif diff --git a/drivers/net/e1000/base/e1000_vf.c b/drivers/net/e1000/base/e1000_vf.c new file mode 100644 index 00000000..7845b48e --- /dev/null +++ b/drivers/net/e1000/base/e1000_vf.c @@ -0,0 +1,588 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + + +#include "e1000_api.h" + + +STATIC s32 e1000_init_phy_params_vf(struct e1000_hw *hw); +STATIC s32 e1000_init_nvm_params_vf(struct e1000_hw *hw); +STATIC void e1000_release_vf(struct e1000_hw *hw); +STATIC s32 e1000_acquire_vf(struct e1000_hw *hw); +STATIC s32 e1000_setup_link_vf(struct e1000_hw *hw); +STATIC s32 e1000_get_bus_info_pcie_vf(struct e1000_hw *hw); +STATIC s32 e1000_init_mac_params_vf(struct e1000_hw *hw); +STATIC s32 e1000_check_for_link_vf(struct e1000_hw *hw); +STATIC s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +STATIC s32 e1000_init_hw_vf(struct e1000_hw *hw); +STATIC s32 e1000_reset_hw_vf(struct e1000_hw *hw); +STATIC void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, u32); +STATIC int e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); +STATIC s32 e1000_read_mac_addr_vf(struct e1000_hw *); + +/** + * e1000_init_phy_params_vf - Inits PHY params + * @hw: pointer to the HW structure + * + * Doesn't do much - there's no PHY available to the VF. + **/ +STATIC s32 e1000_init_phy_params_vf(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_phy_params_vf"); + hw->phy.type = e1000_phy_vf; + hw->phy.ops.acquire = e1000_acquire_vf; + hw->phy.ops.release = e1000_release_vf; + + return E1000_SUCCESS; +} + +/** + * e1000_init_nvm_params_vf - Inits NVM params + * @hw: pointer to the HW structure + * + * Doesn't do much - there's no NVM available to the VF. + **/ +STATIC s32 e1000_init_nvm_params_vf(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_nvm_params_vf"); + hw->nvm.type = e1000_nvm_none; + hw->nvm.ops.acquire = e1000_acquire_vf; + hw->nvm.ops.release = e1000_release_vf; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_vf - Inits MAC params + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_init_mac_params_vf(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_init_mac_params_vf"); + + /* Set media type */ + /* + * Virtual functions don't care what they're media type is as they + * have no direct access to the PHY, or the media. That is handled + * by the physical function driver. + */ + hw->phy.media_type = e1000_media_type_unknown; + + /* No ASF features for the VF driver */ + mac->asf_firmware_present = false; + /* ARC subsystem not supported */ + mac->arc_subsystem_valid = false; + /* Disable adaptive IFS mode so the generic funcs don't do anything */ + mac->adaptive_ifs = false; + /* VF's have no MTA Registers - PF feature only */ + mac->mta_reg_count = 128; + /* VF's have no access to RAR entries */ + mac->rar_entry_count = 1; + + /* Function pointers */ + /* link setup */ + mac->ops.setup_link = e1000_setup_link_vf; + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pcie_vf; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_vf; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_vf; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_vf; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_vf; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf; + /* set mac address */ + mac->ops.rar_set = e1000_rar_set_vf; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_vf; + + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_vf - Inits function pointers + * @hw: pointer to the HW structure + **/ +void e1000_init_function_pointers_vf(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_vf"); + + hw->mac.ops.init_params = e1000_init_mac_params_vf; + hw->nvm.ops.init_params = e1000_init_nvm_params_vf; + hw->phy.ops.init_params = e1000_init_phy_params_vf; + hw->mbx.ops.init_params = e1000_init_mbx_params_vf; +} + +/** + * e1000_acquire_vf - Acquire rights to access PHY or NVM. + * @hw: pointer to the HW structure + * + * There is no PHY or NVM so we want all attempts to acquire these to fail. + * In addition, the MAC registers to access PHY/NVM don't exist so we don't + * even want any SW to attempt to use them. + **/ +STATIC s32 e1000_acquire_vf(struct e1000_hw E1000_UNUSEDARG *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return -E1000_ERR_PHY; +} + +/** + * e1000_release_vf - Release PHY or NVM + * @hw: pointer to the HW structure + * + * There is no PHY or NVM so we want all attempts to acquire these to fail. + * In addition, the MAC registers to access PHY/NVM don't exist so we don't + * even want any SW to attempt to use them. + **/ +STATIC void e1000_release_vf(struct e1000_hw E1000_UNUSEDARG *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return; +} + +/** + * e1000_setup_link_vf - Sets up link. + * @hw: pointer to the HW structure + * + * Virtual functions cannot change link. + **/ +STATIC s32 e1000_setup_link_vf(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_setup_link_vf"); + UNREFERENCED_1PARAMETER(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_bus_info_pcie_vf - Gets the bus info. + * @hw: pointer to the HW structure + * + * Virtual functions are not really on their own bus. + **/ +STATIC s32 e1000_get_bus_info_pcie_vf(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + + DEBUGFUNC("e1000_get_bus_info_pcie_vf"); + + /* Do not set type PCI-E because we don't want disable master to run */ + bus->type = e1000_bus_type_reserved; + bus->speed = e1000_bus_speed_2500; + + return 0; +} + +/** + * e1000_get_link_up_info_vf - Gets link info. + * @hw: pointer to the HW structure + * @speed: pointer to 16 bit value to store link speed. + * @duplex: pointer to 16 bit value to store duplex. + * + * Since we cannot read the PHY and get accurate link info, we must rely upon + * the status register's data which is often stale and inaccurate. + **/ +STATIC s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 status; + + DEBUGFUNC("e1000_get_link_up_info_vf"); + + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + DEBUGOUT("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + DEBUGOUT("100 Mbs, "); + } else { + *speed = SPEED_10; + DEBUGOUT("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } + + return E1000_SUCCESS; +} + +/** + * e1000_reset_hw_vf - Resets the HW + * @hw: pointer to the HW structure + * + * VF's provide a function level reset. This is done using bit 26 of ctrl_reg. + * This is all the reset we can perform on a VF. + **/ +STATIC s32 e1000_reset_hw_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 timeout = E1000_VF_INIT_TIMEOUT; + s32 ret_val = -E1000_ERR_MAC_INIT; + u32 ctrl, msgbuf[3]; + u8 *addr = (u8 *)(&msgbuf[1]); + + DEBUGFUNC("e1000_reset_hw_vf"); + + DEBUGOUT("Issuing a function level reset to MAC\n"); + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + /* we cannot reset while the RSTI / RSTD bits are asserted */ + while (!mbx->ops.check_for_rst(hw, 0) && timeout) { + timeout--; + usec_delay(5); + } + + if (timeout) { + /* mailbox timeout can now become active */ + mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; + + msgbuf[0] = E1000_VF_RESET; + mbx->ops.write_posted(hw, msgbuf, 1, 0); + + msec_delay(10); + + /* set our "perm_addr" based on info provided by PF */ + ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0); + if (!ret_val) { + if (msgbuf[0] == (E1000_VF_RESET | + E1000_VT_MSGTYPE_ACK)) + memcpy(hw->mac.perm_addr, addr, 6); + else + ret_val = -E1000_ERR_MAC_INIT; + } + } + + return ret_val; +} + +/** + * e1000_init_hw_vf - Inits the HW + * @hw: pointer to the HW structure + * + * Not much to do here except clear the PF Reset indication if there is one. + **/ +STATIC s32 e1000_init_hw_vf(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_hw_vf"); + + /* attempt to set and restore our mac address */ + e1000_rar_set_vf(hw, hw->mac.addr, 0); + + return E1000_SUCCESS; +} + +/** + * e1000_rar_set_vf - set device MAC address + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index receive address array register + **/ +STATIC int e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, + u32 E1000_UNUSEDARG index) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + UNREFERENCED_1PARAMETER(index); + memset(msgbuf, 0, 12); + msgbuf[0] = E1000_VF_SET_MAC_ADDR; + memcpy(msg_addr, addr, 6); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0); + + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; + + /* if nacked the address was rejected, use "perm_addr" */ + if (!ret_val && + (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) + e1000_read_mac_addr_vf(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr_vf - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. + **/ +STATIC u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + DEBUGFUNC("e1000_hash_mc_addr_generic"); + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* + * The bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +STATIC void e1000_write_msg_read_ack(struct e1000_hw *hw, + u32 *msg, u16 size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 retmsg[E1000_VFMAILBOX_SIZE]; + s32 retval = mbx->ops.write_posted(hw, msg, size, 0); + + if (!retval) + mbx->ops.read_posted(hw, retmsg, E1000_VFMAILBOX_SIZE, 0); +} + +/** + * e1000_update_mc_addr_list_vf - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates the Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 msgbuf[E1000_VFMAILBOX_SIZE]; + u16 *hash_list = (u16 *)&msgbuf[1]; + u32 hash_value; + u32 i; + + DEBUGFUNC("e1000_update_mc_addr_list_vf"); + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count); + + if (mc_addr_count > 30) { + msgbuf[0] |= E1000_VF_SET_MULTICAST_OVERFLOW; + mc_addr_count = 30; + } + + msgbuf[0] = E1000_VF_SET_MULTICAST; + msgbuf[0] |= mc_addr_count << E1000_VT_MSGINFO_SHIFT; + + for (i = 0; i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list); + DEBUGOUT1("Hash value = 0x%03X\n", hash_value); + hash_list[i] = hash_value & 0x0FFF; + mc_addr_list += ETH_ADDR_LEN; + } + + e1000_write_msg_read_ack(hw, msgbuf, E1000_VFMAILBOX_SIZE); +} + +/** + * e1000_vfta_set_vf - Set/Unset vlan filter table address + * @hw: pointer to the HW structure + * @vid: determines the vfta register and bit to set/unset + * @set: if true then set bit, else clear bit + **/ +void e1000_vfta_set_vf(struct e1000_hw *hw, u16 vid, bool set) +{ + u32 msgbuf[2]; + + msgbuf[0] = E1000_VF_SET_VLAN; + msgbuf[1] = vid; + /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ + if (set) + msgbuf[0] |= E1000_VF_SET_VLAN_ADD; + + e1000_write_msg_read_ack(hw, msgbuf, 2); +} + +/** e1000_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) +{ + u32 msgbuf[2]; + + msgbuf[0] = E1000_VF_SET_LPE; + msgbuf[1] = max_size; + + e1000_write_msg_read_ack(hw, msgbuf, 2); +} + +/** + * e1000_promisc_set_vf - Set flags for Unicast or Multicast promisc + * @hw: pointer to the HW structure + * @uni: boolean indicating unicast promisc status + * @multi: boolean indicating multicast promisc status + **/ +s32 e1000_promisc_set_vf(struct e1000_hw *hw, enum e1000_promisc_type type) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf = E1000_VF_SET_PROMISC; + s32 ret_val; + + switch (type) { + case e1000_promisc_multicast: + msgbuf |= E1000_VF_SET_PROMISC_MULTICAST; + break; + case e1000_promisc_enabled: + msgbuf |= E1000_VF_SET_PROMISC_MULTICAST; + case e1000_promisc_unicast: + msgbuf |= E1000_VF_SET_PROMISC_UNICAST; + case e1000_promisc_disabled: + break; + default: + return -E1000_ERR_MAC_INIT; + } + + ret_val = mbx->ops.write_posted(hw, &msgbuf, 1, 0); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, &msgbuf, 1, 0); + + if (!ret_val && !(msgbuf & E1000_VT_MSGTYPE_ACK)) + ret_val = -E1000_ERR_MAC_INIT; + + return ret_val; +} + +/** + * e1000_read_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + **/ +STATIC s32 e1000_read_mac_addr_vf(struct e1000_hw *hw) +{ + int i; + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return E1000_SUCCESS; +} + +/** + * e1000_check_for_link_vf - Check for link for a virtual interface + * @hw: pointer to the HW structure + * + * Checks to see if the underlying PF is still talking to the VF and + * if it is then it reports the link state to the hardware, otherwise + * it reports link down and returns an error. + **/ +STATIC s32 e1000_check_for_link_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + u32 in_msg = 0; + + DEBUGFUNC("e1000_check_for_link_vf"); + + /* + * We only want to run this if there has been a rst asserted. + * in this case that could mean a link change, device reset, + * or a virtual function reset + */ + + /* If we were hit with a reset or timeout drop the link */ + if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) + goto out; + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error */ + if (mbx->ops.read(hw, &in_msg, 1, 0)) + goto out; + + /* if incoming message isn't clear to send we are waiting on response */ + if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { + /* message is not CTS and is NACK we have lost CTS status */ + if (in_msg & E1000_VT_MSGTYPE_NACK) + ret_val = -E1000_ERR_MAC_INIT; + goto out; + } + + /* at this point we know the PF is talking to us, check and see if + * we are still accepting timeout or if we had a timeout failure. + * if we failed then we will need to reinit */ + if (!mbx->timeout) { + ret_val = -E1000_ERR_MAC_INIT; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link */ + mac->get_link_status = false; + +out: + return ret_val; +} + diff --git a/drivers/net/e1000/base/e1000_vf.h b/drivers/net/e1000/base/e1000_vf.h new file mode 100644 index 00000000..d6216dec --- /dev/null +++ b/drivers/net/e1000/base/e1000_vf.h @@ -0,0 +1,295 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _E1000_VF_H_ +#define _E1000_VF_H_ + +#include "e1000_osdep.h" +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_DEV_ID_I350_VF 0x1520 + +#define E1000_VF_INIT_TIMEOUT 200 /* Num of retries to clear RSTI */ + +/* Additional Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +/* Interrupt Defines */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + ((_n) << 2)) +#define E1000_EICS 0x01520 /* Ext. Intr Cause Set -W0 */ +#define E1000_EIMS 0x01524 /* Ext. Intr Mask Set/Read -RW */ +#define E1000_EIMC 0x01528 /* Ext. Intr Mask Clear -WO */ +#define E1000_EIAC 0x0152C /* Ext. Intr Auto Clear -RW */ +#define E1000_EIAM 0x01530 /* Ext. Intr Ack Auto Clear Mask -RW */ +#define E1000_IVAR0 0x01700 /* Intr Vector Alloc (array) -RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes -RW */ +#define E1000_IVAR_VALID 0x80 + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + u32 data; + struct { + /* RSS type, Packet type */ + u16 pkt_info; + /* Split Header, header buffer len */ + u16 hdr_info; + } hs_rss; + } lo_dword; + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + u64 buffer_addr; /* Address of descriptor's data buf */ + u32 cmd_type_len; + u32 olinfo_status; + } read; + struct { + u64 rsvd; /* Reserved */ + u32 nxtseq_seed; + u32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + u32 vlan_macip_lens; + u32 seqnum_seed; + u32 type_tucmd_mlhl; + u32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_vfadapt, + e1000_vfadapt_i350, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +struct e1000_vf_stats { + u64 base_gprc; + u64 base_gptc; + u64 base_gorc; + u64 base_gotc; + u64 base_mprc; + u64 base_gotlbc; + u64 base_gptlbc; + u64 base_gorlbc; + u64 base_gprlbc; + + u32 last_gprc; + u32 last_gptc; + u32 last_gorc; + u32 last_gotc; + u32 last_mprc; + u32 last_gotlbc; + u32 last_gptlbc; + u32 last_gorlbc; + u32 last_gprlbc; + + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 mprc; + u64 gotlbc; + u64 gptlbc; + u64 gorlbc; + u64 gprlbc; +}; + +#include "e1000_mbx.h" + +struct e1000_mac_operations { + /* Function pointers for the MAC. */ + s32 (*init_params)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + int (*rar_set)(struct e1000_hw *, u8*, u32); + s32 (*read_mac_addr)(struct e1000_hw *); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u16 mta_reg_count; + u16 rar_entry_count; + + bool get_link_status; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw *, u16); + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_vf { + u32 vf_number; + u32 v2p_mailbox; +}; + +struct e1000_hw { + void *back; + + u8 *hw_addr; + u8 *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_mbx_info mbx; + + union { + struct e1000_dev_spec_vf vf; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +enum e1000_promisc_type { + e1000_promisc_disabled = 0, /* all promisc modes disabled */ + e1000_promisc_unicast = 1, /* unicast promiscuous enabled */ + e1000_promisc_multicast = 2, /* multicast promiscuous enabled */ + e1000_promisc_enabled = 3, /* both uni and multicast promisc */ + e1000_num_promisc_types +}; + +/* These functions must be implemented by drivers */ +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_vfta_set_vf(struct e1000_hw *, u16, bool); +void e1000_rlpml_set_vf(struct e1000_hw *, u16); +s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type); +#endif /* _E1000_VF_H_ */ diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h new file mode 100644 index 00000000..e8bf8dad --- /dev/null +++ b/drivers/net/e1000/e1000_ethdev.h @@ -0,0 +1,391 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _E1000_ETHDEV_H_ +#define _E1000_ETHDEV_H_ +#include <rte_time.h> + +/* need update link, bit flag */ +#define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) +#define E1000_FLAG_MAILBOX (uint32_t)(1 << 1) + +/* + * Defines that were not part of e1000_hw.h as they are not used by the FreeBSD + * driver. + */ +#define E1000_ADVTXD_POPTS_TXSM 0x00000200 /* L4 Checksum offload request */ +#define E1000_ADVTXD_POPTS_IXSM 0x00000100 /* IP Checksum offload request */ +#define E1000_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE of Reserved */ +#define E1000_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */ +#define E1000_RXD_ERR_CKSUM_BIT 29 +#define E1000_RXD_ERR_CKSUM_MSK 3 +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */ +#define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */ +#define IGB_VFTA_SIZE 128 + +#define IGB_MAX_RX_QUEUE_NUM 8 +#define IGB_MAX_RX_QUEUE_NUM_82576 16 + +#define E1000_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */ +#define E1000_SYN_FILTER_QUEUE 0x0000000E /* syn filter queue field */ +#define E1000_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field */ +#define E1000_RFCTL_SYNQFP 0x00080000 /* SYNQFP in RFCTL register */ + +#define E1000_ETQF_ETHERTYPE 0x0000FFFF +#define E1000_ETQF_QUEUE 0x00070000 +#define E1000_ETQF_QUEUE_SHIFT 16 +#define E1000_MAX_ETQF_FILTERS 8 + +#define E1000_IMIR_DSTPORT 0x0000FFFF +#define E1000_IMIR_PRIORITY 0xE0000000 +#define E1000_MAX_TTQF_FILTERS 8 +#define E1000_2TUPLE_MAX_PRI 7 + +#define E1000_MAX_FLEX_FILTERS 8 +#define E1000_MAX_FHFT 4 +#define E1000_MAX_FHFT_EXT 4 +#define E1000_FHFT_SIZE_IN_DWD 64 +#define E1000_MAX_FLEX_FILTER_PRI 7 +#define E1000_MAX_FLEX_FILTER_LEN 128 +#define E1000_MAX_FLEX_FILTER_DWDS \ + (E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t)) +#define E1000_FLEX_FILTERS_MASK_SIZE \ + (E1000_MAX_FLEX_FILTER_DWDS / 4) +#define E1000_FHFT_QUEUEING_LEN 0x0000007F +#define E1000_FHFT_QUEUEING_QUEUE 0x00000700 +#define E1000_FHFT_QUEUEING_PRIO 0x00070000 +#define E1000_FHFT_QUEUEING_OFFSET 0xFC +#define E1000_FHFT_QUEUEING_QUEUE_SHIFT 8 +#define E1000_FHFT_QUEUEING_PRIO_SHIFT 16 +#define E1000_WUFC_FLEX_HQ 0x00004000 + +#define E1000_SPQF_SRCPORT 0x0000FFFF + +#define E1000_MAX_FTQF_FILTERS 8 +#define E1000_FTQF_PROTOCOL_MASK 0x000000FF +#define E1000_FTQF_5TUPLE_MASK_SHIFT 28 +#define E1000_FTQF_QUEUE_MASK 0x03ff0000 +#define E1000_FTQF_QUEUE_SHIFT 16 +#define E1000_FTQF_QUEUE_ENABLE 0x00000100 + +#define IGB_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + ETH_RSS_IPV6_EX | \ + ETH_RSS_IPV6_TCP_EX | \ + ETH_RSS_IPV6_UDP_EX) + +/* + * Maximum number of Ring Descriptors. + * + * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring + * desscriptors should meet the following condition: + * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0 + */ +#define E1000_MIN_RING_DESC 32 +#define E1000_MAX_RING_DESC 4096 + +/* + * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be + * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. + * This will also optimize cache line size effect. + * H/W supports up to cache line size 128. + */ +#define E1000_ALIGN 128 + +#define IGB_RXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_rx_desc)) +#define IGB_TXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_tx_desc)) + +#define EM_RXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_rx_desc)) +#define EM_TXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_data_desc)) + +#define E1000_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define E1000_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + +/* structure for interrupt relative data */ +struct e1000_interrupt { + uint32_t flags; + uint32_t mask; +}; + +/* local vfta copy */ +struct e1000_vfta { + uint32_t vfta[IGB_VFTA_SIZE]; +}; + +/* + * VF data which used by PF host only + */ +#define E1000_MAX_VF_MC_ENTRIES 30 +struct e1000_vf_info { + uint8_t vf_mac_addresses[ETHER_ADDR_LEN]; + uint16_t vf_mc_hashes[E1000_MAX_VF_MC_ENTRIES]; + uint16_t num_vf_mc_hashes; + uint16_t default_vf_vlan_id; + uint16_t vlans_enabled; + uint16_t pf_qos; + uint16_t vlan_count; + uint16_t tx_rate; +}; + +TAILQ_HEAD(e1000_flex_filter_list, e1000_flex_filter); + +struct e1000_flex_filter_info { + uint16_t len; + uint32_t dwords[E1000_MAX_FLEX_FILTER_DWDS]; /* flex bytes in dword. */ + /* if mask bit is 1b, do not compare corresponding byte in dwords. */ + uint8_t mask[E1000_FLEX_FILTERS_MASK_SIZE]; + uint8_t priority; +}; + +/* Flex filter structure */ +struct e1000_flex_filter { + TAILQ_ENTRY(e1000_flex_filter) entries; + uint16_t index; /* index of flex filter */ + struct e1000_flex_filter_info filter_info; + uint16_t queue; /* rx queue assigned to */ +}; + +TAILQ_HEAD(e1000_5tuple_filter_list, e1000_5tuple_filter); +TAILQ_HEAD(e1000_2tuple_filter_list, e1000_2tuple_filter); + +struct e1000_5tuple_filter_info { + uint32_t dst_ip; + uint32_t src_ip; + uint16_t dst_port; + uint16_t src_port; + uint8_t proto; /* l4 protocol. */ + /* the packet matched above 5tuple and contain any set bit will hit this filter. */ + uint8_t tcp_flags; + uint8_t priority; /* seven levels (001b-111b), 111b is highest, + used when more than one filter matches. */ + uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */ + src_ip_mask:1, /* if mask is 1b, do not compare src ip. */ + dst_port_mask:1, /* if mask is 1b, do not compare dst port. */ + src_port_mask:1, /* if mask is 1b, do not compare src port. */ + proto_mask:1; /* if mask is 1b, do not compare protocol. */ +}; + +struct e1000_2tuple_filter_info { + uint16_t dst_port; + uint8_t proto; /* l4 protocol. */ + /* the packet matched above 2tuple and contain any set bit will hit this filter. */ + uint8_t tcp_flags; + uint8_t priority; /* seven levels (001b-111b), 111b is highest, + used when more than one filter matches. */ + uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */ + src_ip_mask:1, /* if mask is 1b, do not compare src ip. */ + dst_port_mask:1, /* if mask is 1b, do not compare dst port. */ + src_port_mask:1, /* if mask is 1b, do not compare src port. */ + proto_mask:1; /* if mask is 1b, do not compare protocol. */ +}; + +/* 5tuple filter structure */ +struct e1000_5tuple_filter { + TAILQ_ENTRY(e1000_5tuple_filter) entries; + uint16_t index; /* the index of 5tuple filter */ + struct e1000_5tuple_filter_info filter_info; + uint16_t queue; /* rx queue assigned to */ +}; + +/* 2tuple filter structure */ +struct e1000_2tuple_filter { + TAILQ_ENTRY(e1000_2tuple_filter) entries; + uint16_t index; /* the index of 2tuple filter */ + struct e1000_2tuple_filter_info filter_info; + uint16_t queue; /* rx queue assigned to */ +}; + +/* + * Structure to store filters' info. + */ +struct e1000_filter_info { + uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */ + /* store used ethertype filters*/ + uint16_t ethertype_filters[E1000_MAX_ETQF_FILTERS]; + uint8_t flex_mask; /* Bit mask for every used flex filter */ + struct e1000_flex_filter_list flex_list; + /* Bit mask for every used 5tuple filter */ + uint8_t fivetuple_mask; + struct e1000_5tuple_filter_list fivetuple_list; + /* Bit mask for every used 2tuple filter */ + uint8_t twotuple_mask; + struct e1000_2tuple_filter_list twotuple_list; +}; + +/* + * Structure to store private data for each driver instance (for each port). + */ +struct e1000_adapter { + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_interrupt intr; + struct e1000_vfta shadow_vfta; + struct e1000_vf_info *vfdata; + struct e1000_filter_info filter; + bool stopped; + struct rte_timecounter systime_tc; + struct rte_timecounter rx_tstamp_tc; + struct rte_timecounter tx_tstamp_tc; +}; + +#define E1000_DEV_PRIVATE(adapter) \ + ((struct e1000_adapter *)adapter) + +#define E1000_DEV_PRIVATE_TO_HW(adapter) \ + (&((struct e1000_adapter *)adapter)->hw) + +#define E1000_DEV_PRIVATE_TO_STATS(adapter) \ + (&((struct e1000_adapter *)adapter)->stats) + +#define E1000_DEV_PRIVATE_TO_INTR(adapter) \ + (&((struct e1000_adapter *)adapter)->intr) + +#define E1000_DEV_PRIVATE_TO_VFTA(adapter) \ + (&((struct e1000_adapter *)adapter)->shadow_vfta) + +#define E1000_DEV_PRIVATE_TO_P_VFDATA(adapter) \ + (&((struct e1000_adapter *)adapter)->vfdata) + +#define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \ + (&((struct e1000_adapter *)adapter)->filter) + +/* + * RX/TX IGB function prototypes + */ +void eth_igb_tx_queue_release(void *txq); +void eth_igb_rx_queue_release(void *rxq); +void igb_dev_clear_queues(struct rte_eth_dev *dev); +void igb_dev_free_queues(struct rte_eth_dev *dev); + +int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev, + uint16_t rx_queue_id); + +int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset); + +int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +int eth_igb_rx_init(struct rte_eth_dev *dev); + +void eth_igb_tx_init(struct rte_eth_dev *dev); + +uint16_t eth_igb_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t eth_igb_recv_scattered_pkts(void *rxq, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + +int eth_igb_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); + +int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); + +int eth_igbvf_rx_init(struct rte_eth_dev *dev); + +void eth_igbvf_tx_init(struct rte_eth_dev *dev); + +/* + * misc function prototypes + */ +void igb_pf_host_init(struct rte_eth_dev *eth_dev); + +void igb_pf_mbx_process(struct rte_eth_dev *eth_dev); + +int igb_pf_host_configure(struct rte_eth_dev *eth_dev); + +void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); + +void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + +/* + * RX/TX EM function prototypes + */ +void eth_em_tx_queue_release(void *txq); +void eth_em_rx_queue_release(void *rxq); + +void em_dev_clear_queues(struct rte_eth_dev *dev); +void em_dev_free_queues(struct rte_eth_dev *dev); + +int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev, + uint16_t rx_queue_id); + +int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset); + +int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +int eth_em_rx_init(struct rte_eth_dev *dev); + +void eth_em_tx_init(struct rte_eth_dev *dev); + +uint16_t eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); + +void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + +void igb_pf_host_uninit(struct rte_eth_dev *dev); + +#endif /* _E1000_ETHDEV_H_ */ diff --git a/drivers/net/e1000/e1000_logs.h b/drivers/net/e1000/e1000_logs.h new file mode 100644 index 00000000..81e7bf52 --- /dev/null +++ b/drivers/net/e1000/e1000_logs.h @@ -0,0 +1,77 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _E1000_LOGS_H_ +#define _E1000_LOGS_H_ + +#define PMD_INIT_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args) + +#ifdef RTE_LIBRTE_E1000_DEBUG_INIT +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +#else +#define PMD_INIT_FUNC_TRACE() do { } while (0) +#endif + +#ifdef RTE_LIBRTE_E1000_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_E1000_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_E1000_DEBUG_TX_FREE +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_E1000_DEBUG_DRIVER +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) +#else +#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) +#endif + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +#endif /* _E1000_LOGS_H_ */ diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c new file mode 100644 index 00000000..653be092 --- /dev/null +++ b/drivers/net/e1000/em_ethdev.c @@ -0,0 +1,1780 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <stdarg.h> + +#include <rte_common.h> +#include <rte_interrupts.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_eal.h> +#include <rte_atomic.h> +#include <rte_malloc.h> +#include <rte_dev.h> + +#include "e1000_logs.h" +#include "base/e1000_api.h" +#include "e1000_ethdev.h" + +#define EM_EIAC 0x000DC + +#define PMD_ROUNDUP(x,y) (((x) + (y) - 1)/(y) * (y)) + + +static int eth_em_configure(struct rte_eth_dev *dev); +static int eth_em_start(struct rte_eth_dev *dev); +static void eth_em_stop(struct rte_eth_dev *dev); +static void eth_em_close(struct rte_eth_dev *dev); +static void eth_em_promiscuous_enable(struct rte_eth_dev *dev); +static void eth_em_promiscuous_disable(struct rte_eth_dev *dev); +static void eth_em_allmulticast_enable(struct rte_eth_dev *dev); +static void eth_em_allmulticast_disable(struct rte_eth_dev *dev); +static int eth_em_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static void eth_em_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *rte_stats); +static void eth_em_stats_reset(struct rte_eth_dev *dev); +static void eth_em_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int eth_em_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int eth_em_interrupt_setup(struct rte_eth_dev *dev); +static int eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev); +static int eth_em_interrupt_get_status(struct rte_eth_dev *dev); +static int eth_em_interrupt_action(struct rte_eth_dev *dev); +static void eth_em_interrupt_handler(struct rte_intr_handle *handle, + void *param); + +static int em_hw_init(struct e1000_hw *hw); +static int em_hardware_init(struct e1000_hw *hw); +static void em_hw_control_acquire(struct e1000_hw *hw); +static void em_hw_control_release(struct e1000_hw *hw); +static void em_init_manageability(struct e1000_hw *hw); +static void em_release_manageability(struct e1000_hw *hw); + +static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + +static int eth_em_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static void eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static void em_vlan_hw_filter_enable(struct rte_eth_dev *dev); +static void em_vlan_hw_filter_disable(struct rte_eth_dev *dev); +static void em_vlan_hw_strip_enable(struct rte_eth_dev *dev); +static void em_vlan_hw_strip_disable(struct rte_eth_dev *dev); + +/* +static void eth_em_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +*/ + +static int eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); +static int eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); +static void em_lsc_intr_disable(struct e1000_hw *hw); +static void em_rxq_intr_enable(struct e1000_hw *hw); +static void em_rxq_intr_disable(struct e1000_hw *hw); + +static int eth_em_led_on(struct rte_eth_dev *dev); +static int eth_em_led_off(struct rte_eth_dev *dev); + +static int em_get_rx_buffer_size(struct e1000_hw *hw); +static void eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t pool); +static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index); + +static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, + uint32_t nb_mc_addr); + +#define EM_FC_PAUSE_TIME 0x0680 +#define EM_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ +#define EM_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ + +static enum e1000_fc_mode em_fc_setting = e1000_fc_full; + +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id pci_id_em_map[] = { + +#define RTE_PCI_DEV_ID_DECL_EM(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" + +{0}, +}; + +static const struct eth_dev_ops eth_em_ops = { + .dev_configure = eth_em_configure, + .dev_start = eth_em_start, + .dev_stop = eth_em_stop, + .dev_close = eth_em_close, + .promiscuous_enable = eth_em_promiscuous_enable, + .promiscuous_disable = eth_em_promiscuous_disable, + .allmulticast_enable = eth_em_allmulticast_enable, + .allmulticast_disable = eth_em_allmulticast_disable, + .link_update = eth_em_link_update, + .stats_get = eth_em_stats_get, + .stats_reset = eth_em_stats_reset, + .dev_infos_get = eth_em_infos_get, + .mtu_set = eth_em_mtu_set, + .vlan_filter_set = eth_em_vlan_filter_set, + .vlan_offload_set = eth_em_vlan_offload_set, + .rx_queue_setup = eth_em_rx_queue_setup, + .rx_queue_release = eth_em_rx_queue_release, + .rx_queue_count = eth_em_rx_queue_count, + .rx_descriptor_done = eth_em_rx_descriptor_done, + .tx_queue_setup = eth_em_tx_queue_setup, + .tx_queue_release = eth_em_tx_queue_release, + .rx_queue_intr_enable = eth_em_rx_queue_intr_enable, + .rx_queue_intr_disable = eth_em_rx_queue_intr_disable, + .dev_led_on = eth_em_led_on, + .dev_led_off = eth_em_led_off, + .flow_ctrl_get = eth_em_flow_ctrl_get, + .flow_ctrl_set = eth_em_flow_ctrl_set, + .mac_addr_add = eth_em_rar_set, + .mac_addr_remove = eth_em_rar_clear, + .set_mc_addr_list = eth_em_set_mc_addr_list, + .rxq_info_get = em_rxq_info_get, + .txq_info_get = em_txq_info_get, +}; + +/** + * Atomically reads the link status information from global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +rte_em_dev_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &(dev->data->dev_link); + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/** + * Atomically writes the link status information into global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +rte_em_dev_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &(dev->data->dev_link); + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/** + * eth_em_dev_is_ich8 - Check for ICH8 device + * @hw: pointer to the HW structure + * + * return TRUE for ICH8, otherwise FALSE + **/ +static bool +eth_em_dev_is_ich8(struct e1000_hw *hw) +{ + DEBUGFUNC("eth_em_dev_is_ich8"); + + switch (hw->device_id) { + case E1000_DEV_ID_PCH_LPT_I217_LM: + case E1000_DEV_ID_PCH_LPT_I217_V: + case E1000_DEV_ID_PCH_LPTLP_I218_LM: + case E1000_DEV_ID_PCH_LPTLP_I218_V: + case E1000_DEV_ID_PCH_I218_V2: + case E1000_DEV_ID_PCH_I218_LM2: + case E1000_DEV_ID_PCH_I218_V3: + case E1000_DEV_ID_PCH_I218_LM3: + return 1; + default: + return 0; + } +} + +static int +eth_em_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(eth_dev->data->dev_private); + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); + + pci_dev = eth_dev->pci_dev; + + eth_dev->dev_ops = ð_em_ops; + eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts; + eth_dev->tx_pkt_burst = (eth_tx_burst_t)ð_em_xmit_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (eth_dev->data->scattered_rx) + eth_dev->rx_pkt_burst = + (eth_rx_burst_t)ð_em_recv_scattered_pkts; + return 0; + } + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; + hw->device_id = pci_dev->id.device_id; + adapter->stopped = 0; + + /* For ICH8 support we'll need to map the flash memory BAR */ + if (eth_em_dev_is_ich8(hw)) + hw->flash_address = (void *)pci_dev->mem_resource[1].addr; + + if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS || + em_hw_init(hw) != 0) { + PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: " + "failed to init HW", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + return -ENODEV; + } + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN * + hw->mac.rar_entry_count, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " + "store MAC addresses", + ETHER_ADDR_LEN * hw->mac.rar_entry_count); + return -ENOMEM; + } + + /* Copy the permanent MAC address */ + ether_addr_copy((struct ether_addr *) hw->mac.addr, + eth_dev->data->mac_addrs); + + /* initialize the vfta */ + memset(shadow_vfta, 0, sizeof(*shadow_vfta)); + + PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + rte_intr_callback_register(&(pci_dev->intr_handle), + eth_em_interrupt_handler, (void *)eth_dev); + + return 0; +} + +static int +eth_em_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(eth_dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + pci_dev = eth_dev->pci_dev; + + if (adapter->stopped == 0) + eth_em_close(eth_dev); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + + /* disable uio intr before callback unregister */ + rte_intr_disable(&(pci_dev->intr_handle)); + rte_intr_callback_unregister(&(pci_dev->intr_handle), + eth_em_interrupt_handler, (void *)eth_dev); + + return 0; +} + +static struct eth_driver rte_em_pmd = { + .pci_drv = { + .name = "rte_em_pmd", + .id_table = pci_id_em_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_DETACHABLE, + }, + .eth_dev_init = eth_em_dev_init, + .eth_dev_uninit = eth_em_dev_uninit, + .dev_private_size = sizeof(struct e1000_adapter), +}; + +static int +rte_em_pmd_init(const char *name __rte_unused, const char *params __rte_unused) +{ + rte_eth_driver_register(&rte_em_pmd); + return 0; +} + +static int +em_hw_init(struct e1000_hw *hw) +{ + int diag; + + diag = hw->mac.ops.init_params(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "MAC Initialization Error"); + return diag; + } + diag = hw->nvm.ops.init_params(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "NVM Initialization Error"); + return diag; + } + diag = hw->phy.ops.init_params(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "PHY Initialization Error"); + return diag; + } + (void) e1000_get_bus_info(hw); + + hw->mac.autoneg = 1; + hw->phy.autoneg_wait_to_complete = 0; + hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; + + e1000_init_script_state_82541(hw, TRUE); + e1000_set_tbi_compatibility_82543(hw, TRUE); + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = 0; /* AUTO_ALL_MODES */ + hw->phy.disable_polarity_correction = 0; + hw->phy.ms_type = e1000_ms_hw_default; + } + + /* + * Start from a known state, this is important in reading the nvm + * and mac from that. + */ + e1000_reset_hw(hw); + + /* Make sure we have a good EEPROM before we read from it */ + if (e1000_validate_nvm_checksum(hw) < 0) { + /* + * Some PCI-E parts fail the first check due to + * the link being in sleep state, call it again, + * if it fails a second time its a real issue. + */ + diag = e1000_validate_nvm_checksum(hw); + if (diag < 0) { + PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); + goto error; + } + } + + /* Read the permanent MAC address out of the EEPROM */ + diag = e1000_read_mac_addr(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); + goto error; + } + + /* Now initialize the hardware */ + diag = em_hardware_init(hw); + if (diag != 0) { + PMD_INIT_LOG(ERR, "Hardware initialization failed"); + goto error; + } + + hw->mac.get_link_status = 1; + + /* Indicate SOL/IDER usage */ + diag = e1000_check_reset_block(hw); + if (diag < 0) { + PMD_INIT_LOG(ERR, "PHY reset is blocked due to " + "SOL/IDER session"); + } + return 0; + +error: + em_hw_control_release(hw); + return diag; +} + +static int +eth_em_configure(struct rte_eth_dev *dev) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + PMD_INIT_FUNC_TRACE(); + + return 0; +} + +static void +em_set_pba(struct e1000_hw *hw) +{ + uint32_t pba; + + /* + * Packet Buffer Allocation (PBA) + * Writing PBA sets the receive portion of the buffer + * the remainder is used for the transmit buffer. + * Devices before the 82547 had a Packet Buffer of 64K. + * After the 82547 the buffer was reduced to 40K. + */ + switch (hw->mac.type) { + case e1000_82547: + case e1000_82547_rev_2: + /* 82547: Total Packet Buffer is 40K */ + pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ + break; + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ + break; + case e1000_82573: /* 82573: Total Packet Buffer is 32K */ + pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ + break; + case e1000_82574: + case e1000_82583: + pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ + break; + case e1000_ich8lan: + pba = E1000_PBA_8K; + break; + case e1000_ich9lan: + case e1000_ich10lan: + pba = E1000_PBA_10K; + break; + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + pba = E1000_PBA_26K; + break; + default: + pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ + } + + E1000_WRITE_REG(hw, E1000_PBA, pba); +} + +static int +eth_em_start(struct rte_eth_dev *dev) +{ + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(dev->data->dev_private); + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + int ret, mask; + uint32_t intr_vector = 0; + uint32_t *speeds; + int num_speeds; + bool autoneg; + + PMD_INIT_FUNC_TRACE(); + + eth_em_stop(dev); + + e1000_power_up_phy(hw); + + /* Set default PBA value */ + em_set_pba(hw); + + /* Put the address into the Receive Address Array */ + e1000_rar_set(hw, hw->mac.addr, 0); + + /* + * With the 82571 adapter, RAR[0] may be overwritten + * when the other port is reset, we make a duplicate + * in RAR[14] for that eventuality, this assures + * the interface continues to function. + */ + if (hw->mac.type == e1000_82571) { + e1000_set_laa_state_82571(hw, TRUE); + e1000_rar_set(hw, hw->mac.addr, E1000_RAR_ENTRIES - 1); + } + + /* Initialize the hardware */ + if (em_hardware_init(hw)) { + PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); + return -EIO; + } + + E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN); + + /* Configure for OS presence */ + em_init_manageability(hw); + + if (dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle)) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec\n", dev->data->nb_rx_queues); + return -ENOMEM; + } + + /* enable rx interrupt */ + em_rxq_intr_enable(hw); + } + + eth_em_tx_init(dev); + + ret = eth_em_rx_init(dev); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); + em_dev_clear_queues(dev); + return ret; + } + + e1000_clear_hw_cntrs_base_generic(hw); + + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ + ETH_VLAN_EXTEND_MASK; + eth_em_vlan_offload_set(dev, mask); + + /* Set Interrupt Throttling Rate to maximum allowed value. */ + E1000_WRITE_REG(hw, E1000_ITR, UINT16_MAX); + + /* Setup link speed and duplex */ + speeds = &dev->data->dev_conf.link_speeds; + if (*speeds == ETH_LINK_SPEED_AUTONEG) { + hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; + } else { + num_speeds = 0; + autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; + + /* Reset */ + hw->phy.autoneg_advertised = 0; + + if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { + num_speeds = -1; + goto error_invalid_config; + } + if (*speeds & ETH_LINK_SPEED_10M_HD) { + hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_10M) { + hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_100M_HD) { + hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_100M) { + hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_1G) { + hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; + num_speeds++; + } + if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) + goto error_invalid_config; + } + + e1000_setup_link(hw); + + if (rte_intr_allow_others(intr_handle)) { + /* check if lsc interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.lsc != 0) { + ret = eth_em_interrupt_setup(dev); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to setup interrupts"); + em_dev_clear_queues(dev); + return ret; + } + } + } else { + rte_intr_callback_unregister(intr_handle, + eth_em_interrupt_handler, + (void *)dev); + if (dev->data->dev_conf.intr_conf.lsc != 0) + PMD_INIT_LOG(INFO, "lsc won't enable because of" + " no intr multiplex\n"); + } + /* check if rxq interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.rxq != 0) + eth_em_rxq_interrupt_setup(dev); + + rte_intr_enable(intr_handle); + + adapter->stopped = 0; + + PMD_INIT_LOG(DEBUG, "<<"); + + return 0; + +error_invalid_config: + PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", + dev->data->dev_conf.link_speeds, dev->data->port_id); + em_dev_clear_queues(dev); + return -EINVAL; +} + +/********************************************************************* + * + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC. + * + **********************************************************************/ +static void +eth_em_stop(struct rte_eth_dev *dev) +{ + struct rte_eth_link link; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + + em_rxq_intr_disable(hw); + em_lsc_intr_disable(hw); + + e1000_reset_hw(hw); + if (hw->mac.type >= e1000_82544) + E1000_WRITE_REG(hw, E1000_WUC, 0); + + /* Power down the phy. Needed to make the link go down */ + e1000_power_down_phy(hw); + + em_dev_clear_queues(dev); + + /* clear the recorded link status */ + memset(&link, 0, sizeof(link)); + rte_em_dev_atomic_write_link_status(dev, &link); + + if (!rte_intr_allow_others(intr_handle)) + /* resume to the default handler */ + rte_intr_callback_register(intr_handle, + eth_em_interrupt_handler, + (void *)dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + +static void +eth_em_close(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(dev->data->dev_private); + + eth_em_stop(dev); + adapter->stopped = 1; + em_dev_free_queues(dev); + e1000_phy_hw_reset(hw); + em_release_manageability(hw); + em_hw_control_release(hw); +} + +static int +em_get_rx_buffer_size(struct e1000_hw *hw) +{ + uint32_t rx_buf_size; + + rx_buf_size = ((E1000_READ_REG(hw, E1000_PBA) & UINT16_MAX) << 10); + return rx_buf_size; +} + +/********************************************************************* + * + * Initialize the hardware + * + **********************************************************************/ +static int +em_hardware_init(struct e1000_hw *hw) +{ + uint32_t rx_buf_size; + int diag; + + /* Issue a global reset */ + e1000_reset_hw(hw); + + /* Let the firmware know the OS is in control */ + em_hw_control_acquire(hw); + + /* + * These parameters control the automatic generation (Tx) and + * response (Rx) to Ethernet PAUSE frames. + * - High water mark should allow for at least two standard size (1518) + * frames to be received after sending an XOFF. + * - Low water mark works best when it is very near the high water mark. + * This allows the receiver to restart by sending XON when it has + * drained a bit. Here we use an arbitrary value of 1500 which will + * restart after one full frame is pulled from the buffer. There + * could be several smaller frames in the buffer and if so they will + * not trigger the XON until their total number reduces the buffer + * by 1500. + * - The pause time is fairly large at 1000 x 512ns = 512 usec. + */ + rx_buf_size = em_get_rx_buffer_size(hw); + + hw->fc.high_water = rx_buf_size - PMD_ROUNDUP(ETHER_MAX_LEN * 2, 1024); + hw->fc.low_water = hw->fc.high_water - 1500; + + if (hw->mac.type == e1000_80003es2lan) + hw->fc.pause_time = UINT16_MAX; + else + hw->fc.pause_time = EM_FC_PAUSE_TIME; + + hw->fc.send_xon = 1; + + /* Set Flow control, use the tunable location if sane */ + if (em_fc_setting <= e1000_fc_full) + hw->fc.requested_mode = em_fc_setting; + else + hw->fc.requested_mode = e1000_fc_none; + + /* Workaround: no TX flow ctrl for PCH */ + if (hw->mac.type == e1000_pchlan) + hw->fc.requested_mode = e1000_fc_rx_pause; + + /* Override - settings for PCH2LAN, ya its magic :) */ + if (hw->mac.type == e1000_pch2lan) { + hw->fc.high_water = 0x5C20; + hw->fc.low_water = 0x5048; + hw->fc.pause_time = 0x0650; + hw->fc.refresh_time = 0x0400; + } else if (hw->mac.type == e1000_pch_lpt) { + hw->fc.requested_mode = e1000_fc_full; + } + + diag = e1000_init_hw(hw); + if (diag < 0) + return diag; + e1000_check_for_link(hw); + return 0; +} + +/* This function is based on em_update_stats_counters() in e1000/if_em.c */ +static void +eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_hw_stats *stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + int pause_frames; + + if(hw->phy.media_type == e1000_media_type_copper || + (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { + stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS); + stats->sec += E1000_READ_REG(hw, E1000_SEC); + } + + stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); + stats->mpc += E1000_READ_REG(hw, E1000_MPC); + stats->scc += E1000_READ_REG(hw, E1000_SCC); + stats->ecol += E1000_READ_REG(hw, E1000_ECOL); + + stats->mcc += E1000_READ_REG(hw, E1000_MCC); + stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); + stats->colc += E1000_READ_REG(hw, E1000_COLC); + stats->dc += E1000_READ_REG(hw, E1000_DC); + stats->rlec += E1000_READ_REG(hw, E1000_RLEC); + stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); + stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); + + /* + * For watchdog management we need to know if we have been + * paused during the last interval, so capture that here. + */ + pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); + stats->xoffrxc += pause_frames; + stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); + stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); + stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); + stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); + stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); + stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); + stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); + stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); + stats->gprc += E1000_READ_REG(hw, E1000_GPRC); + stats->bprc += E1000_READ_REG(hw, E1000_BPRC); + stats->mprc += E1000_READ_REG(hw, E1000_MPRC); + stats->gptc += E1000_READ_REG(hw, E1000_GPTC); + + /* + * For the 64-bit byte counters the low dword must be read first. + * Both registers clear on the read of the high dword. + */ + + stats->gorc += E1000_READ_REG(hw, E1000_GORCL); + stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); + stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); + stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); + + stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); + stats->ruc += E1000_READ_REG(hw, E1000_RUC); + stats->rfc += E1000_READ_REG(hw, E1000_RFC); + stats->roc += E1000_READ_REG(hw, E1000_ROC); + stats->rjc += E1000_READ_REG(hw, E1000_RJC); + + stats->tor += E1000_READ_REG(hw, E1000_TORH); + stats->tot += E1000_READ_REG(hw, E1000_TOTH); + + stats->tpr += E1000_READ_REG(hw, E1000_TPR); + stats->tpt += E1000_READ_REG(hw, E1000_TPT); + stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); + stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); + stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); + stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); + stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); + stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); + stats->mptc += E1000_READ_REG(hw, E1000_MPTC); + stats->bptc += E1000_READ_REG(hw, E1000_BPTC); + + /* Interrupt Counts */ + + if (hw->mac.type >= e1000_82571) { + stats->iac += E1000_READ_REG(hw, E1000_IAC); + stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); + stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); + stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); + stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); + stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); + stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); + stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); + stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); + } + + if (hw->mac.type >= e1000_82543) { + stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); + stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); + stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); + stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); + stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); + stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); + } + + if (rte_stats == NULL) + return; + + /* Rx Errors */ + rte_stats->imissed = stats->mpc; + rte_stats->ierrors = stats->crcerrs + + stats->rlec + stats->ruc + stats->roc + + stats->rxerrc + stats->algnerrc + stats->cexterr; + + /* Tx Errors */ + rte_stats->oerrors = stats->ecol + stats->latecol; + + rte_stats->ipackets = stats->gprc; + rte_stats->opackets = stats->gptc; + rte_stats->ibytes = stats->gorc; + rte_stats->obytes = stats->gotc; +} + +static void +eth_em_stats_reset(struct rte_eth_dev *dev) +{ + struct e1000_hw_stats *hw_stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* HW registers are cleared on read */ + eth_em_stats_get(dev, NULL); + + /* Reset software totals */ + memset(hw_stats, 0, sizeof(*hw_stats)); +} + +static int +eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + em_rxq_intr_enable(hw); + rte_intr_enable(&dev->pci_dev->intr_handle); + + return 0; +} + +static int +eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + em_rxq_intr_disable(hw); + + return 0; +} + +static uint32_t +em_get_max_pktlen(const struct e1000_hw *hw) +{ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + case e1000_ich9lan: + case e1000_ich10lan: + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_82574: + case e1000_80003es2lan: /* 9K Jumbo Frame size */ + case e1000_82583: + return 0x2412; + case e1000_pchlan: + return 0x1000; + /* Adapters that do not support jumbo frames */ + case e1000_ich8lan: + return ETHER_MAX_LEN; + default: + return MAX_JUMBO_FRAME_SIZE; + } +} + +static void +eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ + dev_info->max_rx_pktlen = em_get_max_pktlen(hw); + dev_info->max_mac_addrs = hw->mac.rar_entry_count; + + /* + * Starting with 631xESB hw supports 2 TX/RX queues per port. + * Unfortunatelly, all these nics have just one TX context. + * So we have few choises for TX: + * - Use just one TX queue. + * - Allow cksum offload only for one TX queue. + * - Don't allow TX cksum offload at all. + * For now, option #1 was chosen. + * To use second RX queue we have to use extended RX descriptor + * (Multiple Receive Queues are mutually exclusive with UDP + * fragmentation and are not supported when a legacy receive + * descriptor format is used). + * Which means separate RX routinies - as legacy nics (82540, 82545) + * don't support extended RXD. + * To avoid it we support just one RX queue for now (no RSS). + */ + + dev_info->max_rx_queues = 1; + dev_info->max_tx_queues = 1; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = E1000_MAX_RING_DESC, + .nb_min = E1000_MIN_RING_DESC, + .nb_align = EM_RXD_ALIGN, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = E1000_MAX_RING_DESC, + .nb_min = E1000_MIN_RING_DESC, + .nb_align = EM_TXD_ALIGN, + }; + + dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G; +} + +/* return 0 means link status changed, -1 means not changed */ +static int +eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link, old; + int link_check, count; + + link_check = 0; + hw->mac.get_link_status = 1; + + /* possible wait-to-complete in up to 9 seconds */ + for (count = 0; count < EM_LINK_UPDATE_CHECK_TIMEOUT; count ++) { + /* Read the real link status */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + /* Do the work to read phy */ + e1000_check_for_link(hw); + link_check = !hw->mac.get_link_status; + break; + + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_check = (E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_LU); + break; + + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_check = hw->mac.serdes_has_link; + break; + + default: + break; + } + if (link_check || wait_to_complete == 0) + break; + rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL); + } + memset(&link, 0, sizeof(link)); + rte_em_dev_atomic_read_link_status(dev, &link); + old = link; + + /* Now we check if a transition has happened */ + if (link_check && (link.link_status == ETH_LINK_DOWN)) { + uint16_t duplex, speed; + hw->mac.ops.get_link_up_info(hw, &speed, &duplex); + link.link_duplex = (duplex == FULL_DUPLEX) ? + ETH_LINK_FULL_DUPLEX : + ETH_LINK_HALF_DUPLEX; + link.link_speed = speed; + link.link_status = ETH_LINK_UP; + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + } else if (!link_check && (link.link_status == ETH_LINK_UP)) { + link.link_speed = 0; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_status = ETH_LINK_DOWN; + link.link_autoneg = ETH_LINK_SPEED_FIXED; + } + rte_em_dev_atomic_write_link_status(dev, &link); + + /* not changed */ + if (old.link_status == link.link_status) + return -1; + + /* changed */ + return 0; +} + +/* + * em_hw_control_acquire sets {CTRL_EXT|FWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means + * that the driver is loaded. For AMT version type f/w + * this means that the network i/f is open. + */ +static void +em_hw_control_acquire(struct e1000_hw *hw) +{ + uint32_t ctrl_ext, swsm; + + /* Let firmware know the driver has taken over */ + if (hw->mac.type == e1000_82573) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); + + } else { + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + } +} + +/* + * em_hw_control_release resets {CTRL_EXTT|FWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. For AMT versions of the + * f/w this means that the network i/f is closed. + */ +static void +em_hw_control_release(struct e1000_hw *hw) +{ + uint32_t ctrl_ext, swsm; + + /* Let firmware taken over control of h/w */ + if (hw->mac.type == e1000_82573) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); + } else { + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + } +} + +/* + * Bit of a misnomer, what this really means is + * to enable OS management of the system... aka + * to disable special hardware management features. + */ +static void +em_init_manageability(struct e1000_hw *hw) +{ + if (e1000_enable_mng_pass_thru(hw)) { + uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); + uint32_t manc = E1000_READ_REG(hw, E1000_MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + /* enable receiving management packets to the host */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h |= 1 << 5; /* Mng Port 623 */ + manc2h |= 1 << 6; /* Mng Port 664 */ + E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); + E1000_WRITE_REG(hw, E1000_MANC, manc); + } +} + +/* + * Give control back to hardware management + * controller if there is one. + */ +static void +em_release_manageability(struct e1000_hw *hw) +{ + uint32_t manc; + + if (e1000_enable_mng_pass_thru(hw)) { + manc = E1000_READ_REG(hw, E1000_MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + manc &= ~E1000_MANC_EN_MNG2HOST; + + E1000_WRITE_REG(hw, E1000_MANC, manc); + } +} + +static void +eth_em_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static void +eth_em_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP); + if (dev->data->all_multicast == 1) + rctl |= E1000_RCTL_MPE; + else + rctl &= (~E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static void +eth_em_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_MPE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static void +eth_em_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + if (dev->data->promiscuous == 1) + return; /* must remain in all_multicast mode */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= (~E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static int +eth_em_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vfta; + uint32_t vid_idx; + uint32_t vid_bit; + + vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK); + vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); + if (on) + vfta |= vid_bit; + else + vfta &= ~vid_bit; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); + + /* update local VFTA copy */ + shadow_vfta->vfta[vid_idx] = vfta; + + return 0; +} + +static void +em_vlan_hw_filter_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* Filter Table Disable */ + reg = E1000_READ_REG(hw, E1000_RCTL); + reg &= ~E1000_RCTL_CFIEN; + reg &= ~E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, reg); +} + +static void +em_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t reg; + int i; + + /* Filter Table Enable, CFI not used for packet acceptance */ + reg = E1000_READ_REG(hw, E1000_RCTL); + reg &= ~E1000_RCTL_CFIEN; + reg |= E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, reg); + + /* restore vfta from local copy */ + for (i = 0; i < IGB_VFTA_SIZE; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); +} + +static void +em_vlan_hw_strip_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* VLAN Mode Disable */ + reg = E1000_READ_REG(hw, E1000_CTRL); + reg &= ~E1000_CTRL_VME; + E1000_WRITE_REG(hw, E1000_CTRL, reg); + +} + +static void +em_vlan_hw_strip_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* VLAN Mode Enable */ + reg = E1000_READ_REG(hw, E1000_CTRL); + reg |= E1000_CTRL_VME; + E1000_WRITE_REG(hw, E1000_CTRL, reg); +} + +static void +eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + if(mask & ETH_VLAN_STRIP_MASK){ + if (dev->data->dev_conf.rxmode.hw_vlan_strip) + em_vlan_hw_strip_enable(dev); + else + em_vlan_hw_strip_disable(dev); + } + + if(mask & ETH_VLAN_FILTER_MASK){ + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + em_vlan_hw_filter_enable(dev); + else + em_vlan_hw_filter_disable(dev); + } +} + +/* + * It enables the interrupt mask and then enable the interrupt. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_em_interrupt_setup(struct rte_eth_dev *dev) +{ + uint32_t regval; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* clear interrupt */ + E1000_READ_REG(hw, E1000_ICR); + regval = E1000_READ_REG(hw, E1000_IMS); + E1000_WRITE_REG(hw, E1000_IMS, regval | E1000_ICR_LSC); + return 0; +} + +/* + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + E1000_READ_REG(hw, E1000_ICR); + em_rxq_intr_enable(hw); + return 0; +} + +/* + * It enable receive packet interrupt. + * @param hw + * Pointer to struct e1000_hw + * + * @return + */ +static void +em_rxq_intr_enable(struct e1000_hw *hw) +{ + E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_RXT0); + E1000_WRITE_FLUSH(hw); +} + +/* + * It disabled lsc interrupt. + * @param hw + * Pointer to struct e1000_hw + * + * @return + */ +static void +em_lsc_intr_disable(struct e1000_hw *hw) +{ + E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC); + E1000_WRITE_FLUSH(hw); +} + +/* + * It disabled receive packet interrupt. + * @param hw + * Pointer to struct e1000_hw + * + * @return + */ +static void +em_rxq_intr_disable(struct e1000_hw *hw) +{ + E1000_READ_REG(hw, E1000_ICR); + E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); + E1000_WRITE_FLUSH(hw); +} + +/* + * It reads ICR and gets interrupt causes, check it and set a bit flag + * to update link status. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_em_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t icr; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + /* read-on-clear nic registers here */ + icr = E1000_READ_REG(hw, E1000_ICR); + if (icr & E1000_ICR_LSC) { + intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + } + + return 0; +} + +/* + * It executes link_update after knowing an interrupt is prsent. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_em_interrupt_action(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + uint32_t tctl, rctl; + struct rte_eth_link link; + int ret; + + if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE)) + return -1; + + intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; + rte_intr_enable(&(dev->pci_dev->intr_handle)); + + /* set get_link_status to check register later */ + hw->mac.get_link_status = 1; + ret = eth_em_link_update(dev, 0); + + /* check if link has changed */ + if (ret < 0) + return 0; + + memset(&link, 0, sizeof(link)); + rte_em_dev_atomic_read_link_status(dev, &link); + if (link.link_status) { + PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", + dev->data->port_id, (unsigned)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + } else { + PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id); + } + PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", + dev->pci_dev->addr.domain, dev->pci_dev->addr.bus, + dev->pci_dev->addr.devid, dev->pci_dev->addr.function); + + tctl = E1000_READ_REG(hw, E1000_TCTL); + rctl = E1000_READ_REG(hw, E1000_RCTL); + if (link.link_status) { + /* enable Tx/Rx */ + tctl |= E1000_TCTL_EN; + rctl |= E1000_RCTL_EN; + } else { + /* disable Tx/Rx */ + tctl &= ~E1000_TCTL_EN; + rctl &= ~E1000_RCTL_EN; + } + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + + return 0; +} + +/** + * Interrupt handler which shall be registered at first. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +eth_em_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + eth_em_interrupt_get_status(dev); + eth_em_interrupt_action(dev); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); +} + +static int +eth_em_led_on(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; +} + +static int +eth_em_led_off(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; +} + +static int +eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct e1000_hw *hw; + uint32_t ctrl; + int tx_pause; + int rx_pause; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + fc_conf->pause_time = hw->fc.pause_time; + fc_conf->high_water = hw->fc.high_water; + fc_conf->low_water = hw->fc.low_water; + fc_conf->send_xon = hw->fc.send_xon; + fc_conf->autoneg = hw->mac.autoneg; + + /* + * Return rx_pause and tx_pause status according to actual setting of + * the TFCE and RFCE bits in the CTRL register. + */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + if (ctrl & E1000_CTRL_TFCE) + tx_pause = 1; + else + tx_pause = 0; + + if (ctrl & E1000_CTRL_RFCE) + rx_pause = 1; + else + rx_pause = 0; + + if (rx_pause && tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + + return 0; +} + +static int +eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct e1000_hw *hw; + int err; + enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { + e1000_fc_none, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full + }; + uint32_t rx_buf_size; + uint32_t max_high_water; + uint32_t rctl; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (fc_conf->autoneg != hw->mac.autoneg) + return -ENOTSUP; + rx_buf_size = em_get_rx_buffer_size(hw); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); + + /* At least reserve one Ethernet frame for watermark */ + max_high_water = rx_buf_size - ETHER_MAX_LEN; + if ((fc_conf->high_water > max_high_water) || + (fc_conf->high_water < fc_conf->low_water)) { + PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); + PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); + return -EINVAL; + } + + hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; + hw->fc.pause_time = fc_conf->pause_time; + hw->fc.high_water = fc_conf->high_water; + hw->fc.low_water = fc_conf->low_water; + hw->fc.send_xon = fc_conf->send_xon; + + err = e1000_setup_link_generic(hw); + if (err == E1000_SUCCESS) { + + /* check if we want to forward MAC frames - driver doesn't have native + * capability to do that, so we'll write the registers ourselves */ + + rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* set or clear MFLCN.PMCF bit depending on configuration */ + if (fc_conf->mac_ctrl_frame_fwd != 0) + rctl |= E1000_RCTL_PMCF; + else + rctl &= ~E1000_RCTL_PMCF; + + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + + return 0; + } + + PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); + return -EIO; +} + +static void +eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, __rte_unused uint32_t pool) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + e1000_rar_set(hw, mac_addr->addr_bytes, index); +} + +static void +eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) +{ + uint8_t addr[ETHER_ADDR_LEN]; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + memset(addr, 0, sizeof(addr)); + + e1000_rar_set(hw, addr, index); +} + +static int +eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct rte_eth_dev_info dev_info; + struct e1000_hw *hw; + uint32_t frame_size; + uint32_t rctl; + + eth_em_infos_get(dev, &dev_info); + frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; + + /* check that mtu is within the allowed range */ + if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + return -EINVAL; + + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. */ + if (!dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + return -EINVAL; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* switch to jumbo mode if needed */ + if (frame_size > ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.jumbo_frame = 1; + rctl |= E1000_RCTL_LPE; + } else { + dev->data->dev_conf.rxmode.jumbo_frame = 0; + rctl &= ~E1000_RCTL_LPE; + } + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + return 0; +} + +static int +eth_em_set_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); + return 0; +} + +struct rte_driver em_pmd_drv = { + .type = PMD_PDEV, + .init = rte_em_pmd_init, +}; + +PMD_REGISTER_DRIVER(em_pmd_drv); diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c new file mode 100644 index 00000000..441ccad8 --- /dev/null +++ b/drivers/net/e1000/em_rxtx.c @@ -0,0 +1,1861 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> +#include <stdint.h> +#include <stdarg.h> +#include <inttypes.h> + +#include <rte_interrupts.h> +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_pci.h> +#include <rte_memory.h> +#include <rte_memcpy.h> +#include <rte_memzone.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_ring.h> +#include <rte_mempool.h> +#include <rte_malloc.h> +#include <rte_mbuf.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_prefetch.h> +#include <rte_ip.h> +#include <rte_udp.h> +#include <rte_tcp.h> +#include <rte_sctp.h> +#include <rte_string_fns.h> + +#include "e1000_logs.h" +#include "base/e1000_api.h" +#include "e1000_ethdev.h" +#include "base/e1000_osdep.h" + +#define E1000_TXD_VLAN_SHIFT 16 + +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +static inline struct rte_mbuf * +rte_rxmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + m = __rte_mbuf_raw_alloc(mp); + __rte_mbuf_sanity_check_raw(m, 0); + return m; +} + +/** + * Structure associated with each descriptor of the RX ring of a RX queue. + */ +struct em_rx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ +}; + +/** + * Structure associated with each descriptor of the TX ring of a TX queue. + */ +struct em_tx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ + uint16_t next_id; /**< Index of next descriptor in ring. */ + uint16_t last_id; /**< Index of last scattered descriptor. */ +}; + +/** + * Structure associated with each RX queue. + */ +struct em_rx_queue { + struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ + volatile struct e1000_rx_desc *rx_ring; /**< RX ring virtual address. */ + uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ + volatile uint32_t *rdt_reg_addr; /**< RDT register address. */ + volatile uint32_t *rdh_reg_addr; /**< RDH register address. */ + struct em_rx_entry *sw_ring; /**< address of RX software ring. */ + struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint16_t nb_rx_desc; /**< number of RX descriptors. */ + uint16_t rx_tail; /**< current value of RDT register. */ + uint16_t nb_rx_hold; /**< number of held free RX desc. */ + uint16_t rx_free_thresh; /**< max free RX desc to hold. */ + uint16_t queue_id; /**< RX queue index. */ + uint8_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold register. */ + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ +}; + +/** + * Hardware context number + */ +enum { + EM_CTX_0 = 0, /**< CTX0 */ + EM_CTX_NUM = 1, /**< CTX NUM */ +}; + +/** Offload features */ +union em_vlan_macip { + uint32_t data; + struct { + uint16_t l3_len:9; /**< L3 (IP) Header Length. */ + uint16_t l2_len:7; /**< L2 (MAC) Header Length. */ + uint16_t vlan_tci; + /**< VLAN Tag Control Identifier (CPU order). */ + } f; +}; + +/* + * Compare mask for vlan_macip_len.data, + * should be in sync with em_vlan_macip.f layout. + * */ +#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */ +#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */ +#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */ +/** MAC+IP length. */ +#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK) + +/** + * Structure to check if new context need be built + */ +struct em_ctx_info { + uint64_t flags; /**< ol_flags related to context build. */ + uint32_t cmp_mask; /**< compare mask */ + union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */ +}; + +/** + * Structure associated with each TX queue. + */ +struct em_tx_queue { + volatile struct e1000_data_desc *tx_ring; /**< TX ring address */ + uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ + struct em_tx_entry *sw_ring; /**< virtual address of SW ring. */ + volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ + uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_tail; /**< Current value of TDT register. */ + /**< Start freeing TX buffers if there are less free descriptors than + this value. */ + uint16_t tx_free_thresh; + /**< Number of TX descriptors to use before RS bit is set. */ + uint16_t tx_rs_thresh; + /** Number of TX descriptors used since RS bit was set. */ + uint16_t nb_tx_used; + /** Index to last TX descriptor to have been cleaned. */ + uint16_t last_desc_cleaned; + /** Total number of TX descriptors ready to be allocated. */ + uint16_t nb_tx_free; + uint16_t queue_id; /**< TX queue index. */ + uint8_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold register. */ + struct em_ctx_info ctx_cache; + /**< Hardware context history.*/ +}; + +#if 1 +#define RTE_PMD_USE_PREFETCH +#endif + +#ifdef RTE_PMD_USE_PREFETCH +#define rte_em_prefetch(p) rte_prefetch0(p) +#else +#define rte_em_prefetch(p) do {} while(0) +#endif + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while(0) +#endif + +#ifndef DEFAULT_TX_FREE_THRESH +#define DEFAULT_TX_FREE_THRESH 32 +#endif /* DEFAULT_TX_FREE_THRESH */ + +#ifndef DEFAULT_TX_RS_THRESH +#define DEFAULT_TX_RS_THRESH 32 +#endif /* DEFAULT_TX_RS_THRESH */ + + +/********************************************************************* + * + * TX function + * + **********************************************************************/ + +/* + * Populates TX context descriptor. + */ +static inline void +em_set_xmit_ctx(struct em_tx_queue* txq, + volatile struct e1000_context_desc *ctx_txd, + uint64_t flags, + union em_vlan_macip hdrlen) +{ + uint32_t cmp_mask, cmd_len; + uint16_t ipcse, l2len; + struct e1000_context_desc ctx; + + cmp_mask = 0; + cmd_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C; + + l2len = hdrlen.f.l2_len; + ipcse = (uint16_t)(l2len + hdrlen.f.l3_len); + + /* setup IPCS* fields */ + ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len; + ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len + + offsetof(struct ipv4_hdr, hdr_checksum)); + + /* + * When doing checksum or TCP segmentation with IPv6 headers, + * IPCSE field should be set t0 0. + */ + if (flags & PKT_TX_IP_CKSUM) { + ctx.lower_setup.ip_fields.ipcse = + (uint16_t)rte_cpu_to_le_16(ipcse - 1); + cmd_len |= E1000_TXD_CMD_IP; + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + } else { + ctx.lower_setup.ip_fields.ipcse = 0; + } + + /* setup TUCS* fields */ + ctx.upper_setup.tcp_fields.tucss = (uint8_t)ipcse; + ctx.upper_setup.tcp_fields.tucse = 0; + + switch (flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse + + offsetof(struct udp_hdr, dgram_cksum)); + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + break; + case PKT_TX_TCP_CKSUM: + ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse + + offsetof(struct tcp_hdr, cksum)); + cmd_len |= E1000_TXD_CMD_TCP; + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + break; + default: + ctx.upper_setup.tcp_fields.tucso = 0; + } + + ctx.cmd_and_length = rte_cpu_to_le_32(cmd_len); + ctx.tcp_seg_setup.data = 0; + + *ctx_txd = ctx; + + txq->ctx_cache.flags = flags; + txq->ctx_cache.cmp_mask = cmp_mask; + txq->ctx_cache.hdrlen = hdrlen; +} + +/* + * Check which hardware context can be used. Use the existing match + * or create a new context descriptor. + */ +static inline uint32_t +what_ctx_update(struct em_tx_queue *txq, uint64_t flags, + union em_vlan_macip hdrlen) +{ + /* If match with the current context */ + if (likely (txq->ctx_cache.flags == flags && + ((txq->ctx_cache.hdrlen.data ^ hdrlen.data) & + txq->ctx_cache.cmp_mask) == 0)) + return EM_CTX_0; + + /* Mismatch */ + return EM_CTX_NUM; +} + +/* Reset transmit descriptors after they have been used */ +static inline int +em_xmit_cleanup(struct em_tx_queue *txq) +{ + struct em_tx_entry *sw_ring = txq->sw_ring; + volatile struct e1000_data_desc *txr = txq->tx_ring; + uint16_t last_desc_cleaned = txq->last_desc_cleaned; + uint16_t nb_tx_desc = txq->nb_tx_desc; + uint16_t desc_to_clean_to; + uint16_t nb_tx_to_clean; + + /* Determine the last descriptor needing to be cleaned */ + desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); + if (desc_to_clean_to >= nb_tx_desc) + desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); + + /* Check to make sure the last descriptor to clean is done */ + desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; + if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD)) + { + PMD_TX_FREE_LOG(DEBUG, + "TX descriptor %4u is not done" + "(port=%d queue=%d)", desc_to_clean_to, + txq->port_id, txq->queue_id); + /* Failed to clean any descriptors, better luck next time */ + return -(1); + } + + /* Figure out how many descriptors will be cleaned */ + if (last_desc_cleaned > desc_to_clean_to) + nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) + + desc_to_clean_to); + else + nb_tx_to_clean = (uint16_t)(desc_to_clean_to - + last_desc_cleaned); + + PMD_TX_FREE_LOG(DEBUG, + "Cleaning %4u TX descriptors: %4u to %4u " + "(port=%d queue=%d)", nb_tx_to_clean, + last_desc_cleaned, desc_to_clean_to, txq->port_id, + txq->queue_id); + + /* + * The last descriptor to clean is done, so that means all the + * descriptors from the last descriptor that was cleaned + * up to the last descriptor with the RS bit set + * are done. Only reset the threshold descriptor. + */ + txr[desc_to_clean_to].upper.fields.status = 0; + + /* Update the txq to reflect the last descriptor that was cleaned */ + txq->last_desc_cleaned = desc_to_clean_to; + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); + + /* No Error */ + return 0; +} + +static inline uint32_t +tx_desc_cksum_flags_to_upper(uint64_t ol_flags) +{ + static const uint32_t l4_olinfo[2] = {0, E1000_TXD_POPTS_TXSM << 8}; + static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8}; + uint32_t tmp; + + tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM]; + tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0]; + return tmp; +} + +uint16_t +eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct em_tx_queue *txq; + struct em_tx_entry *sw_ring; + struct em_tx_entry *txe, *txn; + volatile struct e1000_data_desc *txr; + volatile struct e1000_data_desc *txd; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint64_t buf_dma_addr; + uint32_t popts_spec; + uint32_t cmd_type_len; + uint16_t slen; + uint64_t ol_flags; + uint16_t tx_id; + uint16_t tx_last; + uint16_t nb_tx; + uint16_t nb_used; + uint64_t tx_ol_req; + uint32_t ctx; + uint32_t new_ctx; + union em_vlan_macip hdrlen; + + txq = tx_queue; + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + + /* Determine if the descriptor ring needs to be cleaned. */ + if (txq->nb_tx_free < txq->tx_free_thresh) + em_xmit_cleanup(txq); + + /* TX loop */ + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + new_ctx = 0; + tx_pkt = *tx_pkts++; + + RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + + /* + * Determine how many (if any) context descriptors + * are needed for offload functionality. + */ + ol_flags = tx_pkt->ol_flags; + + /* If hardware offload required */ + tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)); + if (tx_ol_req) { + hdrlen.f.vlan_tci = tx_pkt->vlan_tci; + hdrlen.f.l2_len = tx_pkt->l2_len; + hdrlen.f.l3_len = tx_pkt->l3_len; + /* If new context to be built or reuse the exist ctx. */ + ctx = what_ctx_update(txq, tx_ol_req, hdrlen); + + /* Only allocate context descriptor if required*/ + new_ctx = (ctx == EM_CTX_NUM); + } + + /* + * Keep track of how many descriptors are used this loop + * This will always be the number of segments + the number of + * Context descriptors required to transmit the packet + */ + nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx); + + /* + * The number of descriptors that must be allocated for a + * packet is the number of segments of that packet, plus 1 + * Context Descriptor for the hardware offload, if any. + * Determine the last TX descriptor to allocate in the TX ring + * for the packet, starting from the current position (tx_id) + * in the ring. + */ + tx_last = (uint16_t) (tx_id + nb_used - 1); + + /* Circular ring */ + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" + " tx_first=%u tx_last=%u", + (unsigned) txq->port_id, + (unsigned) txq->queue_id, + (unsigned) tx_pkt->pkt_len, + (unsigned) tx_id, + (unsigned) tx_last); + + /* + * Make sure there are enough TX descriptors available to + * transmit the entire packet. + * nb_used better be less than or equal to txq->tx_rs_thresh + */ + while (unlikely (nb_used > txq->nb_tx_free)) { + PMD_TX_FREE_LOG(DEBUG, "Not enough free TX descriptors " + "nb_used=%4u nb_free=%4u " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->port_id, txq->queue_id); + + if (em_xmit_cleanup(txq) != 0) { + /* Could not clean any descriptors */ + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + } + + /* + * By now there are enough free TX descriptors to transmit + * the packet. + */ + + /* + * Set common flags of all TX Data Descriptors. + * + * The following bits must be set in all Data Descriptors: + * - E1000_TXD_DTYP_DATA + * - E1000_TXD_DTYP_DEXT + * + * The following bits must be set in the first Data Descriptor + * and are ignored in the other ones: + * - E1000_TXD_POPTS_IXSM + * - E1000_TXD_POPTS_TXSM + * + * The following bits must be set in the last Data Descriptor + * and are ignored in the other ones: + * - E1000_TXD_CMD_VLE + * - E1000_TXD_CMD_IFCS + * + * The following bits must only be set in the last Data + * Descriptor: + * - E1000_TXD_CMD_EOP + * + * The following bits can be set in any Data Descriptor, but + * are only set in the last Data Descriptor: + * - E1000_TXD_CMD_RS + */ + cmd_type_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_IFCS; + popts_spec = 0; + + /* Set VLAN Tag offload fields. */ + if (ol_flags & PKT_TX_VLAN_PKT) { + cmd_type_len |= E1000_TXD_CMD_VLE; + popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT; + } + + if (tx_ol_req) { + /* + * Setup the TX Context Descriptor if required + */ + if (new_ctx) { + volatile struct e1000_context_desc *ctx_txd; + + ctx_txd = (volatile struct e1000_context_desc *) + &txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + em_set_xmit_ctx(txq, ctx_txd, tx_ol_req, + hdrlen); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + /* + * Setup the TX Data Descriptor, + * This path will go through + * whatever new/reuse the context descriptor + */ + popts_spec |= tx_desc_cksum_flags_to_upper(ol_flags); + } + + m_seg = tx_pkt; + do { + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + + if (txe->mbuf != NULL) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* + * Set up Transmit Data Descriptor. + */ + slen = m_seg->data_len; + buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); + + txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen); + txd->upper.data = rte_cpu_to_le_32(popts_spec); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->next; + } while (m_seg != NULL); + + /* + * The last packet data descriptor needs End Of Packet (EOP) + */ + cmd_type_len |= E1000_TXD_CMD_EOP; + txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used); + + /* Set RS bit only on threshold packets' last descriptor */ + if (txq->nb_tx_used >= txq->tx_rs_thresh) { + PMD_TX_FREE_LOG(DEBUG, + "Setting RS bit on TXD id=%4u " + "(port=%d queue=%d)", + tx_last, txq->port_id, txq->queue_id); + + cmd_type_len |= E1000_TXD_CMD_RS; + + /* Update txq RS bit counters */ + txq->nb_tx_used = 0; + } + txd->lower.data |= rte_cpu_to_le_32(cmd_type_len); + } +end_of_tx: + rte_wmb(); + + /* + * Set the Transmit Descriptor Tail (TDT) + */ + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + (unsigned) txq->port_id, (unsigned) txq->queue_id, + (unsigned) tx_id, (unsigned) nb_tx); + E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id); + txq->tx_tail = tx_id; + + return nb_tx; +} + +/********************************************************************* + * + * RX functions + * + **********************************************************************/ + +static inline uint64_t +rx_desc_status_to_pkt_flags(uint32_t rx_status) +{ + uint64_t pkt_flags; + + /* Check if VLAN present */ + pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0); + + return pkt_flags; +} + +static inline uint64_t +rx_desc_error_to_pkt_flags(uint32_t rx_error) +{ + uint64_t pkt_flags = 0; + + if (rx_error & E1000_RXD_ERR_IPE) + pkt_flags |= PKT_RX_IP_CKSUM_BAD; + if (rx_error & E1000_RXD_ERR_TCPE) + pkt_flags |= PKT_RX_L4_CKSUM_BAD; + return pkt_flags; +} + +uint16_t +eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + volatile struct e1000_rx_desc *rx_ring; + volatile struct e1000_rx_desc *rxdp; + struct em_rx_queue *rxq; + struct em_rx_entry *sw_ring; + struct em_rx_entry *rxe; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + struct e1000_rx_desc rxd; + uint64_t dma_addr; + uint16_t pkt_len; + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint8_t status; + + rxq = rx_queue; + + nb_rx = 0; + nb_hold = 0; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + while (nb_rx < nb_pkts) { + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + status = rxdp->status; + if (! (status & E1000_RXD_STAT_DD)) + break; + rxd = *rxdp; + + /* + * End of packet. + * + * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is + * likely to be invalid and to be dropped by the various + * validation checks performed by the network stack. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy do not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "status=0x%x pkt_len=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) status, + (unsigned) rte_le_to_cpu_16(rxd.length)); + + nmb = rte_rxmbuf_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", + (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_em_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_em_prefetch(&rx_ring[rx_id]); + rte_em_prefetch(&sw_ring[rx_id]); + } + + /* Rearm RXD: attach new mbuf and reset status to zero. */ + + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rxdp->buffer_addr = dma_addr; + rxdp->status = 0; + + /* + * Initialize the returned mbuf. + * 1) setup generic mbuf fields: + * - number of segments, + * - next segment, + * - packet length, + * - RX port identifier. + * 2) integrate hardware offload data, if any: + * - RSS flag & hash, + * - IP checksum flag, + * - VLAN TCI, if any, + * - error flags. + */ + pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) - + rxq->crc_len); + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = pkt_len; + rxm->data_len = pkt_len; + rxm->port = rxq->port_id; + + rxm->ol_flags = rx_desc_status_to_pkt_flags(status); + rxm->ol_flags = rxm->ol_flags | + rx_desc_error_to_pkt_flags(rxd.errors); + + /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return nb_rx; +} + +uint16_t +eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct em_rx_queue *rxq; + volatile struct e1000_rx_desc *rx_ring; + volatile struct e1000_rx_desc *rxdp; + struct em_rx_entry *sw_ring; + struct em_rx_entry *rxe; + struct rte_mbuf *first_seg; + struct rte_mbuf *last_seg; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + struct e1000_rx_desc rxd; + uint64_t dma; /* Physical address of mbuf data buffer */ + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint16_t data_len; + uint8_t status; + + rxq = rx_queue; + + nb_rx = 0; + nb_hold = 0; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + + /* + * Retrieve RX context of current packet, if any. + */ + first_seg = rxq->pkt_first_seg; + last_seg = rxq->pkt_last_seg; + + while (nb_rx < nb_pkts) { + next_desc: + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + status = rxdp->status; + if (! (status & E1000_RXD_STAT_DD)) + break; + rxd = *rxdp; + + /* + * Descriptor done. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy does not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "status=0x%x data_len=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) status, + (unsigned) rte_le_to_cpu_16(rxd.length)); + + nmb = rte_rxmbuf_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_em_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_em_prefetch(&rx_ring[rx_id]); + rte_em_prefetch(&sw_ring[rx_id]); + } + + /* + * Update RX descriptor with the physical address of the new + * data buffer of the new allocated mbuf. + */ + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rxdp->buffer_addr = dma; + rxdp->status = 0; + + /* + * Set data length & data buffer address of mbuf. + */ + data_len = rte_le_to_cpu_16(rxd.length); + rxm->data_len = data_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet and + * initialize its context. + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (first_seg == NULL) { + first_seg = rxm; + first_seg->pkt_len = data_len; + first_seg->nb_segs = 1; + } else { + first_seg->pkt_len += data_len; + first_seg->nb_segs++; + last_seg->next = rxm; + } + + /* + * If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (! (status & E1000_RXD_STAT_EOP)) { + last_seg = rxm; + goto next_desc; + } + + /* + * This is the last buffer of the received packet. + * If the CRC is not stripped by the hardware: + * - Subtract the CRC length from the total packet length. + * - If the last buffer only contains the whole CRC or a part + * of it, free the mbuf associated to the last buffer. + * If part of the CRC is also contained in the previous + * mbuf, subtract the length of that CRC part from the + * data length of the previous mbuf. + */ + rxm->next = NULL; + if (unlikely(rxq->crc_len > 0)) { + first_seg->pkt_len -= ETHER_CRC_LEN; + if (data_len <= ETHER_CRC_LEN) { + rte_pktmbuf_free_seg(rxm); + first_seg->nb_segs--; + last_seg->data_len = (uint16_t) + (last_seg->data_len - + (ETHER_CRC_LEN - data_len)); + last_seg->next = NULL; + } else + rxm->data_len = + (uint16_t) (data_len - ETHER_CRC_LEN); + } + + /* + * Initialize the first mbuf of the returned packet: + * - RX port identifier, + * - hardware offload data, if any: + * - IP checksum flag, + * - error flags. + */ + first_seg->port = rxq->port_id; + + first_seg->ol_flags = rx_desc_status_to_pkt_flags(status); + first_seg->ol_flags = first_seg->ol_flags | + rx_desc_error_to_pkt_flags(rxd.errors); + + /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch((char *)first_seg->buf_addr + + first_seg->data_off); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = first_seg; + + /* + * Setup receipt context for a new packet. + */ + first_seg = NULL; + } + + /* + * Record index of the next RX descriptor to probe. + */ + rxq->rx_tail = rx_id; + + /* + * Save receive context. + */ + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return nb_rx; +} + +#define EM_MAX_BUF_SIZE 16384 +#define EM_RCTL_FLXBUF_STEP 1024 + +static void +em_tx_queue_release_mbufs(struct em_tx_queue *txq) +{ + unsigned i; + + if (txq->sw_ring != NULL) { + for (i = 0; i != txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +em_tx_queue_release(struct em_tx_queue *txq) +{ + if (txq != NULL) { + em_tx_queue_release_mbufs(txq); + rte_free(txq->sw_ring); + rte_free(txq); + } +} + +void +eth_em_tx_queue_release(void *txq) +{ + em_tx_queue_release(txq); +} + +/* (Re)set dynamic em_tx_queue fields to defaults */ +static void +em_reset_tx_queue(struct em_tx_queue *txq) +{ + uint16_t i, nb_desc, prev; + static const struct e1000_data_desc txd_init = { + .upper.fields = {.status = E1000_TXD_STAT_DD}, + }; + + nb_desc = txq->nb_tx_desc; + + /* Initialize ring entries */ + + prev = (uint16_t) (nb_desc - 1); + + for (i = 0; i < nb_desc; i++) { + txq->tx_ring[i] = txd_init; + txq->sw_ring[i].mbuf = NULL; + txq->sw_ring[i].last_id = i; + txq->sw_ring[prev].next_id = i; + prev = i; + } + + /* + * Always allow 1 descriptor to be un-allocated to avoid + * a H/W race condition + */ + txq->nb_tx_free = (uint16_t)(nb_desc - 1); + txq->last_desc_cleaned = (uint16_t)(nb_desc - 1); + txq->nb_tx_used = 0; + txq->tx_tail = 0; + + memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache)); +} + +int +eth_em_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + const struct rte_memzone *tz; + struct em_tx_queue *txq; + struct e1000_hw *hw; + uint32_t tsize; + uint16_t tx_rs_thresh, tx_free_thresh; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Validate number of transmit descriptors. + * It must not exceed hardware maximum, and must be multiple + * of E1000_ALIGN. + */ + if (nb_desc % EM_TXD_ALIGN != 0 || + (nb_desc > E1000_MAX_RING_DESC) || + (nb_desc < E1000_MIN_RING_DESC)) { + return -(EINVAL); + } + + tx_free_thresh = tx_conf->tx_free_thresh; + if (tx_free_thresh == 0) + tx_free_thresh = (uint16_t)RTE_MIN(nb_desc / 4, + DEFAULT_TX_FREE_THRESH); + + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh == 0) + tx_rs_thresh = (uint16_t)RTE_MIN(tx_free_thresh, + DEFAULT_TX_RS_THRESH); + + if (tx_free_thresh >= (nb_desc - 3)) { + PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the " + "number of TX descriptors minus 3. " + "(tx_free_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + if (tx_rs_thresh > tx_free_thresh) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to " + "tx_free_thresh. (tx_free_thresh=%u " + "tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return -(EINVAL); + } + + /* + * If rs_bit_thresh is greater than 1, then TX WTHRESH should be + * set to 0. If WTHRESH is greater than zero, the RS bit is ignored + * by the NIC and all descriptors are written back after the NIC + * accumulates WTHRESH descriptors. + */ + if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) { + PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if " + "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + + /* Free memory prior to re-allocation if needed... */ + if (dev->data->tx_queues[queue_idx] != NULL) { + em_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* + * Allocate TX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC; + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize, + RTE_CACHE_LINE_SIZE, socket_id); + if (tz == NULL) + return -ENOMEM; + + /* Allocate the tx queue data structure. */ + if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq), + RTE_CACHE_LINE_SIZE)) == NULL) + return -ENOMEM; + + /* Allocate software ring */ + if ((txq->sw_ring = rte_zmalloc("txq->sw_ring", + sizeof(txq->sw_ring[0]) * nb_desc, + RTE_CACHE_LINE_SIZE)) == NULL) { + em_tx_queue_release(txq); + return -ENOMEM; + } + + txq->nb_tx_desc = nb_desc; + txq->tx_free_thresh = tx_free_thresh; + txq->tx_rs_thresh = tx_rs_thresh; + txq->pthresh = tx_conf->tx_thresh.pthresh; + txq->hthresh = tx_conf->tx_thresh.hthresh; + txq->wthresh = tx_conf->tx_thresh.wthresh; + txq->queue_id = queue_idx; + txq->port_id = dev->data->port_id; + + txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx)); + txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); + txq->tx_ring = (struct e1000_data_desc *) tz->addr; + + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); + + em_reset_tx_queue(txq); + + dev->data->tx_queues[queue_idx] = txq; + return 0; +} + +static void +em_rx_queue_release_mbufs(struct em_rx_queue *rxq) +{ + unsigned i; + + if (rxq->sw_ring != NULL) { + for (i = 0; i != rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +em_rx_queue_release(struct em_rx_queue *rxq) +{ + if (rxq != NULL) { + em_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq); + } +} + +void +eth_em_rx_queue_release(void *rxq) +{ + em_rx_queue_release(rxq); +} + +/* Reset dynamic em_rx_queue fields back to defaults */ +static void +em_reset_rx_queue(struct em_rx_queue *rxq) +{ + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; +} + +int +eth_em_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + const struct rte_memzone *rz; + struct em_rx_queue *rxq; + struct e1000_hw *hw; + uint32_t rsize; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Validate number of receive descriptors. + * It must not exceed hardware maximum, and must be multiple + * of E1000_ALIGN. + */ + if (nb_desc % EM_RXD_ALIGN != 0 || + (nb_desc > E1000_MAX_RING_DESC) || + (nb_desc < E1000_MIN_RING_DESC)) { + return -EINVAL; + } + + /* + * EM devices don't support drop_en functionality + */ + if (rx_conf->rx_drop_en) { + PMD_INIT_LOG(ERR, "drop_en functionality not supported by " + "device"); + return -EINVAL; + } + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->rx_queues[queue_idx] != NULL) { + em_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* Allocate RX ring for max possible mumber of hardware descriptors. */ + rsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC; + rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize, + RTE_CACHE_LINE_SIZE, socket_id); + if (rz == NULL) + return -ENOMEM; + + /* Allocate the RX queue data structure. */ + if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq), + RTE_CACHE_LINE_SIZE)) == NULL) + return -ENOMEM; + + /* Allocate software ring. */ + if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring", + sizeof (rxq->sw_ring[0]) * nb_desc, + RTE_CACHE_LINE_SIZE)) == NULL) { + em_rx_queue_release(rxq); + return -ENOMEM; + } + + rxq->mb_pool = mp; + rxq->nb_rx_desc = nb_desc; + rxq->pthresh = rx_conf->rx_thresh.pthresh; + rxq->hthresh = rx_conf->rx_thresh.hthresh; + rxq->wthresh = rx_conf->rx_thresh.wthresh; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? + 0 : ETHER_CRC_LEN); + + rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx)); + rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx)); + rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); + rxq->rx_ring = (struct e1000_rx_desc *) rz->addr; + + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); + + dev->data->rx_queues[queue_idx] = rxq; + em_reset_rx_queue(rxq); + + return 0; +} + +uint32_t +eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define EM_RXQ_SCAN_INTERVAL 4 + volatile struct e1000_rx_desc *rxdp; + struct em_rx_queue *rxq; + uint32_t desc = 0; + + if (rx_queue_id >= dev->data->nb_rx_queues) { + PMD_RX_LOG(DEBUG, "Invalid RX queue_id=%d", rx_queue_id); + return 0; + } + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &(rxq->rx_ring[rxq->rx_tail]); + + while ((desc < rxq->nb_rx_desc) && + (rxdp->status & E1000_RXD_STAT_DD)) { + desc += EM_RXQ_SCAN_INTERVAL; + rxdp += EM_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return desc; +} + +int +eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile struct e1000_rx_desc *rxdp; + struct em_rx_queue *rxq = rx_queue; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return 0; + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &rxq->rx_ring[desc]; + return !!(rxdp->status & E1000_RXD_STAT_DD); +} + +void +em_dev_clear_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + struct em_tx_queue *txq; + struct em_rx_queue *rxq; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq != NULL) { + em_tx_queue_release_mbufs(txq); + em_reset_tx_queue(txq); + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq != NULL) { + em_rx_queue_release_mbufs(rxq); + em_reset_rx_queue(rxq); + } + } +} + +void +em_dev_free_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + eth_em_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + eth_em_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + +/* + * Takes as input/output parameter RX buffer size. + * Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register. + */ +static uint32_t +em_rctl_bsize(__rte_unused enum e1000_mac_type hwtyp, uint32_t *bufsz) +{ + /* + * For BSIZE & BSEX all configurable sizes are: + * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX); + * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX); + * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX); + * 2048: rctl |= E1000_RCTL_SZ_2048; + * 1024: rctl |= E1000_RCTL_SZ_1024; + * 512: rctl |= E1000_RCTL_SZ_512; + * 256: rctl |= E1000_RCTL_SZ_256; + */ + static const struct { + uint32_t bufsz; + uint32_t rctl; + } bufsz_to_rctl[] = { + {16384, (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX)}, + {8192, (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX)}, + {4096, (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX)}, + {2048, E1000_RCTL_SZ_2048}, + {1024, E1000_RCTL_SZ_1024}, + {512, E1000_RCTL_SZ_512}, + {256, E1000_RCTL_SZ_256}, + }; + + int i; + uint32_t rctl_bsize; + + rctl_bsize = *bufsz; + + /* + * Starting from 82571 it is possible to specify RX buffer size + * by RCTL.FLXBUF. When this field is different from zero, the + * RX buffer size = RCTL.FLXBUF * 1K + * (e.g. t is possible to specify RX buffer size 1,2,...,15KB). + * It is working ok on real HW, but by some reason doesn't work + * on VMware emulated 82574L. + * So for now, always use BSIZE/BSEX to setup RX buffer size. + * If you don't plan to use it on VMware emulated 82574L and + * would like to specify RX buffer size in 1K granularity, + * uncomment the following lines: + * *************************************************************** + * if (hwtyp >= e1000_82571 && hwtyp <= e1000_82574 && + * rctl_bsize >= EM_RCTL_FLXBUF_STEP) { + * rctl_bsize /= EM_RCTL_FLXBUF_STEP; + * *bufsz = rctl_bsize; + * return (rctl_bsize << E1000_RCTL_FLXBUF_SHIFT & + * E1000_RCTL_FLXBUF_MASK); + * } + * *************************************************************** + */ + + for (i = 0; i != sizeof(bufsz_to_rctl) / sizeof(bufsz_to_rctl[0]); + i++) { + if (rctl_bsize >= bufsz_to_rctl[i].bufsz) { + *bufsz = bufsz_to_rctl[i].bufsz; + return bufsz_to_rctl[i].rctl; + } + } + + /* Should never happen. */ + return -EINVAL; +} + +static int +em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq) +{ + struct em_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + unsigned i; + static const struct e1000_rx_desc rxd_init = { + .buffer_addr = 0, + }; + + /* Initialize software ring entries */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile struct e1000_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); + + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, "RX mbuf alloc failed " + "queue_id=%hu", rxq->queue_id); + return -ENOMEM; + } + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); + + /* Clear HW ring memory */ + rxq->rx_ring[i] = rxd_init; + + rxd = &rxq->rx_ring[i]; + rxd->buffer_addr = dma_addr; + rxe[i].mbuf = mbuf; + } + + return 0; +} + +/********************************************************************* + * + * Enable receive unit. + * + **********************************************************************/ +int +eth_em_rx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct em_rx_queue *rxq; + uint32_t rctl; + uint32_t rfctl; + uint32_t rxcsum; + uint32_t rctl_bsize; + uint16_t i; + int ret; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Make sure receives are disabled while setting + * up the descriptor ring. + */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + + /* Disable extended descriptor type. */ + rfctl &= ~E1000_RFCTL_EXTEN; + /* Disable accelerated acknowledge */ + if (hw->mac.type == e1000_82574) + rfctl |= E1000_RFCTL_ACK_DIS; + + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + + /* + * XXX TEMPORARY WORKAROUND: on some systems with 82573 + * long latencies are observed, like Lenovo X60. This + * change eliminates the problem, but since having positive + * values in RDTR is a known source of problems on other + * platforms another solution is being sought. + */ + if (hw->mac.type == e1000_82573) + E1000_WRITE_REG(hw, E1000_RDTR, 0x20); + + dev->rx_pkt_burst = (eth_rx_burst_t)eth_em_recv_pkts; + + /* Determine RX bufsize. */ + rctl_bsize = EM_MAX_BUF_SIZE; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + uint32_t buf_size; + + rxq = dev->data->rx_queues[i]; + buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM; + rctl_bsize = RTE_MIN(rctl_bsize, buf_size); + } + + rctl |= em_rctl_bsize(hw->mac.type, &rctl_bsize); + + /* Configure and enable each RX queue. */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + uint64_t bus_addr; + uint32_t rxdctl; + + rxq = dev->data->rx_queues[i]; + + /* Allocate buffers for descriptor rings and setup queue */ + ret = em_alloc_rx_queue_mbufs(rxq); + if (ret) + return ret; + + /* + * Reset crc_len in case it was changed after queue setup by a + * call to configure + */ + rxq->crc_len = + (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ? + 0 : ETHER_CRC_LEN); + + bus_addr = rxq->rx_ring_phys_addr; + E1000_WRITE_REG(hw, E1000_RDLEN(i), + rxq->nb_rx_desc * + sizeof(*rxq->rx_ring)); + E1000_WRITE_REG(hw, E1000_RDBAH(i), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr); + + E1000_WRITE_REG(hw, E1000_RDH(i), 0); + E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1); + + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); + rxdctl &= 0xFE000000; + rxdctl |= rxq->pthresh & 0x3F; + rxdctl |= (rxq->hthresh & 0x3F) << 8; + rxdctl |= (rxq->wthresh & 0x3F) << 16; + rxdctl |= E1000_RXDCTL_GRAN; + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); + + /* + * Due to EM devices not having any sort of hardware + * limit for packet length, jumbo frame of any size + * can be accepted, thus we have to enable scattered + * rx if jumbo frames are enabled (or if buffer size + * is too small to accommodate non-jumbo packets) + * to avoid splitting packets that don't fit into + * one buffer. + */ + if (dev->data->dev_conf.rxmode.jumbo_frame || + rctl_bsize < ETHER_MAX_LEN) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); + dev->rx_pkt_burst = + (eth_rx_burst_t)eth_em_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + } + + if (dev->data->dev_conf.rxmode.enable_scatter) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); + dev->rx_pkt_burst = eth_em_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + + /* + * Setup the Checksum Register. + * Receive Full-Packet Checksum Offload is mutually exclusive with RSS. + */ + rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); + + if (dev->data->dev_conf.rxmode.hw_ip_checksum) + rxcsum |= E1000_RXCSUM_IPOFL; + else + rxcsum &= ~E1000_RXCSUM_IPOFL; + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); + + /* No MRQ or RSS support for now */ + + /* Set early receive threshold on appropriate hw */ + if ((hw->mac.type == e1000_ich9lan || + hw->mac.type == e1000_pch2lan || + hw->mac.type == e1000_ich10lan) && + dev->data->dev_conf.rxmode.jumbo_frame == 1) { + u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); + E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3); + E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13)); + } + + if (hw->mac.type == e1000_pch2lan) { + if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + e1000_lv_jumbo_workaround_ich8lan(hw, TRUE); + else + e1000_lv_jumbo_workaround_ich8lan(hw, FALSE); + } + + /* Setup the Receive Control Register. */ + if (dev->data->dev_conf.rxmode.hw_strip_crc) + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ + else + rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* Make sure VLAN Filters are off. */ + rctl &= ~E1000_RCTL_VFE; + /* Don't store bad packets. */ + rctl &= ~E1000_RCTL_SBP; + /* Legacy descriptor type. */ + rctl &= ~E1000_RCTL_DTYP_MASK; + + /* + * Configure support of jumbo frames, if any. + */ + if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + rctl |= E1000_RCTL_LPE; + else + rctl &= ~E1000_RCTL_LPE; + + /* Enable Receives. */ + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + return 0; +} + +/********************************************************************* + * + * Enable transmit unit. + * + **********************************************************************/ +void +eth_em_tx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct em_tx_queue *txq; + uint32_t tctl; + uint32_t txdctl; + uint16_t i; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Setup the Base and Length of the Tx Descriptor Rings. */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + uint64_t bus_addr; + + txq = dev->data->tx_queues[i]; + bus_addr = txq->tx_ring_phys_addr; + E1000_WRITE_REG(hw, E1000_TDLEN(i), + txq->nb_tx_desc * + sizeof(*txq->tx_ring)); + E1000_WRITE_REG(hw, E1000_TDBAH(i), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr); + + /* Setup the HW Tx Head and Tail descriptor pointers. */ + E1000_WRITE_REG(hw, E1000_TDT(i), 0); + E1000_WRITE_REG(hw, E1000_TDH(i), 0); + + /* Setup Transmit threshold registers. */ + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i)); + /* + * bit 22 is reserved, on some models should always be 0, + * on others - always 1. + */ + txdctl &= E1000_TXDCTL_COUNT_DESC; + txdctl |= txq->pthresh & 0x3F; + txdctl |= (txq->hthresh & 0x3F) << 8; + txdctl |= (txq->wthresh & 0x3F) << 16; + txdctl |= E1000_TXDCTL_GRAN; + E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); + } + + /* Program the Transmit Control Register. */ + tctl = E1000_READ_REG(hw, E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); + + /* This write will effectively turn on the transmit unit. */ + E1000_WRITE_REG(hw, E1000_TCTL, tctl); +} + +void +em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct em_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; +} + +void +em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct em_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; +} diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c new file mode 100644 index 00000000..e0053fec --- /dev/null +++ b/drivers/net/e1000/igb_ethdev.c @@ -0,0 +1,4982 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <stdarg.h> + +#include <rte_common.h> +#include <rte_interrupts.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_eal.h> +#include <rte_atomic.h> +#include <rte_malloc.h> +#include <rte_dev.h> + +#include "e1000_logs.h" +#include "base/e1000_api.h" +#include "e1000_ethdev.h" +#include "igb_regs.h" + +/* + * Default values for port configuration + */ +#define IGB_DEFAULT_RX_FREE_THRESH 32 + +#define IGB_DEFAULT_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) +#define IGB_DEFAULT_RX_HTHRESH 8 +#define IGB_DEFAULT_RX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 4) + +#define IGB_DEFAULT_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) +#define IGB_DEFAULT_TX_HTHRESH 1 +#define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16) + +#define IGB_HKEY_MAX_INDEX 10 + +/* Bit shift and mask */ +#define IGB_4_BIT_WIDTH (CHAR_BIT / 2) +#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t) +#define IGB_8_BIT_WIDTH CHAR_BIT +#define IGB_8_BIT_MASK UINT8_MAX + +/* Additional timesync values. */ +#define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL +#define E1000_ETQF_FILTER_1588 3 +#define IGB_82576_TSYNC_SHIFT 16 +#define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) +#define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) +#define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000 + +static int eth_igb_configure(struct rte_eth_dev *dev); +static int eth_igb_start(struct rte_eth_dev *dev); +static void eth_igb_stop(struct rte_eth_dev *dev); +static void eth_igb_close(struct rte_eth_dev *dev); +static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev); +static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev); +static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev); +static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev); +static int eth_igb_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static void eth_igb_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *rte_stats); +static int eth_igb_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstats *xstats, unsigned n); +static void eth_igb_stats_reset(struct rte_eth_dev *dev); +static void eth_igb_xstats_reset(struct rte_eth_dev *dev); +static void eth_igb_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev); +static void eth_igbvf_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev); +static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev); +static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev); +static int eth_igb_interrupt_action(struct rte_eth_dev *dev); +static void eth_igb_interrupt_handler(struct rte_intr_handle *handle, + void *param); +static int igb_hardware_init(struct e1000_hw *hw); +static void igb_hw_control_acquire(struct e1000_hw *hw); +static void igb_hw_control_release(struct e1000_hw *hw); +static void igb_init_manageability(struct e1000_hw *hw); +static void igb_release_manageability(struct e1000_hw *hw); + +static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + +static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid_id); +static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask); + +static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev); +static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev); +static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev); +static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev); +static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev); +static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev); + +static int eth_igb_led_on(struct rte_eth_dev *dev); +static int eth_igb_led_off(struct rte_eth_dev *dev); + +static void igb_intr_disable(struct e1000_hw *hw); +static int igb_get_rx_buffer_size(struct e1000_hw *hw); +static void eth_igb_rar_set(struct rte_eth_dev *dev, + struct ether_addr *mac_addr, + uint32_t index, uint32_t pool); +static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index); +static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, + struct ether_addr *addr); + +static void igbvf_intr_disable(struct e1000_hw *hw); +static int igbvf_dev_configure(struct rte_eth_dev *dev); +static int igbvf_dev_start(struct rte_eth_dev *dev); +static void igbvf_dev_stop(struct rte_eth_dev *dev); +static void igbvf_dev_close(struct rte_eth_dev *dev); +static void igbvf_promiscuous_enable(struct rte_eth_dev *dev); +static void igbvf_promiscuous_disable(struct rte_eth_dev *dev); +static void igbvf_allmulticast_enable(struct rte_eth_dev *dev); +static void igbvf_allmulticast_disable(struct rte_eth_dev *dev); +static int eth_igbvf_link_update(struct e1000_hw *hw); +static void eth_igbvf_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *rte_stats); +static int eth_igbvf_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstats *xstats, unsigned n); +static void eth_igbvf_stats_reset(struct rte_eth_dev *dev); +static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on); +static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on); +static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev, + struct ether_addr *addr); +static int igbvf_get_reg_length(struct rte_eth_dev *dev); +static int igbvf_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); + +static int eth_igb_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int eth_igb_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); + +static int eth_igb_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add); +static int eth_igb_syn_filter_get(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter); +static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int igb_add_2tuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter); +static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter); +static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, + struct rte_eth_flex_filter *filter, + bool add); +static int eth_igb_get_flex_filter(struct rte_eth_dev *dev, + struct rte_eth_flex_filter *filter); +static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter); +static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter); +static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *filter, + bool add); +static int igb_get_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *filter); +static int igb_ntuple_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add); +static int igb_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int igb_get_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter); +static int eth_igb_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); +static int eth_igb_get_reg_length(struct rte_eth_dev *dev); +static int eth_igb_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); +static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev); +static int eth_igb_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); +static int eth_igb_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); +static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, + uint32_t nb_mc_addr); +static int igb_timesync_enable(struct rte_eth_dev *dev); +static int igb_timesync_disable(struct rte_eth_dev *dev); +static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags); +static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); +static int igb_timesync_read_time(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int igb_timesync_write_time(struct rte_eth_dev *dev, + const struct timespec *timestamp); +static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); +static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector); +static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, + uint8_t index, uint8_t offset); +static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev); + +/* + * Define VF Stats MACRO for Non "cleared on read" register + */ +#define UPDATE_VF_STAT(reg, last, cur) \ +{ \ + u32 latest = E1000_READ_REG(hw, reg); \ + cur += (latest - last) & UINT_MAX; \ + last = latest; \ +} + +#define IGB_FC_PAUSE_TIME 0x0680 +#define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ +#define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ + +#define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */ + +static enum e1000_fc_mode igb_fc_setting = e1000_fc_full; + +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id pci_id_igb_map[] = { + +#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" + +{0}, +}; + +/* + * The set of PCI devices this driver supports (for 82576&I350 VF) + */ +static const struct rte_pci_id pci_id_igbvf_map[] = { + +#define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" + +{0}, +}; + +static const struct rte_eth_desc_lim rx_desc_lim = { + .nb_max = E1000_MAX_RING_DESC, + .nb_min = E1000_MIN_RING_DESC, + .nb_align = IGB_RXD_ALIGN, +}; + +static const struct rte_eth_desc_lim tx_desc_lim = { + .nb_max = E1000_MAX_RING_DESC, + .nb_min = E1000_MIN_RING_DESC, + .nb_align = IGB_RXD_ALIGN, +}; + +static const struct eth_dev_ops eth_igb_ops = { + .dev_configure = eth_igb_configure, + .dev_start = eth_igb_start, + .dev_stop = eth_igb_stop, + .dev_close = eth_igb_close, + .promiscuous_enable = eth_igb_promiscuous_enable, + .promiscuous_disable = eth_igb_promiscuous_disable, + .allmulticast_enable = eth_igb_allmulticast_enable, + .allmulticast_disable = eth_igb_allmulticast_disable, + .link_update = eth_igb_link_update, + .stats_get = eth_igb_stats_get, + .xstats_get = eth_igb_xstats_get, + .stats_reset = eth_igb_stats_reset, + .xstats_reset = eth_igb_xstats_reset, + .dev_infos_get = eth_igb_infos_get, + .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, + .mtu_set = eth_igb_mtu_set, + .vlan_filter_set = eth_igb_vlan_filter_set, + .vlan_tpid_set = eth_igb_vlan_tpid_set, + .vlan_offload_set = eth_igb_vlan_offload_set, + .rx_queue_setup = eth_igb_rx_queue_setup, + .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable, + .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable, + .rx_queue_release = eth_igb_rx_queue_release, + .rx_queue_count = eth_igb_rx_queue_count, + .rx_descriptor_done = eth_igb_rx_descriptor_done, + .tx_queue_setup = eth_igb_tx_queue_setup, + .tx_queue_release = eth_igb_tx_queue_release, + .dev_led_on = eth_igb_led_on, + .dev_led_off = eth_igb_led_off, + .flow_ctrl_get = eth_igb_flow_ctrl_get, + .flow_ctrl_set = eth_igb_flow_ctrl_set, + .mac_addr_add = eth_igb_rar_set, + .mac_addr_remove = eth_igb_rar_clear, + .mac_addr_set = eth_igb_default_mac_addr_set, + .reta_update = eth_igb_rss_reta_update, + .reta_query = eth_igb_rss_reta_query, + .rss_hash_update = eth_igb_rss_hash_update, + .rss_hash_conf_get = eth_igb_rss_hash_conf_get, + .filter_ctrl = eth_igb_filter_ctrl, + .set_mc_addr_list = eth_igb_set_mc_addr_list, + .rxq_info_get = igb_rxq_info_get, + .txq_info_get = igb_txq_info_get, + .timesync_enable = igb_timesync_enable, + .timesync_disable = igb_timesync_disable, + .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp, + .get_reg_length = eth_igb_get_reg_length, + .get_reg = eth_igb_get_regs, + .get_eeprom_length = eth_igb_get_eeprom_length, + .get_eeprom = eth_igb_get_eeprom, + .set_eeprom = eth_igb_set_eeprom, + .timesync_adjust_time = igb_timesync_adjust_time, + .timesync_read_time = igb_timesync_read_time, + .timesync_write_time = igb_timesync_write_time, +}; + +/* + * dev_ops for virtual function, bare necessities for basic vf + * operation have been implemented + */ +static const struct eth_dev_ops igbvf_eth_dev_ops = { + .dev_configure = igbvf_dev_configure, + .dev_start = igbvf_dev_start, + .dev_stop = igbvf_dev_stop, + .dev_close = igbvf_dev_close, + .promiscuous_enable = igbvf_promiscuous_enable, + .promiscuous_disable = igbvf_promiscuous_disable, + .allmulticast_enable = igbvf_allmulticast_enable, + .allmulticast_disable = igbvf_allmulticast_disable, + .link_update = eth_igb_link_update, + .stats_get = eth_igbvf_stats_get, + .xstats_get = eth_igbvf_xstats_get, + .stats_reset = eth_igbvf_stats_reset, + .xstats_reset = eth_igbvf_stats_reset, + .vlan_filter_set = igbvf_vlan_filter_set, + .dev_infos_get = eth_igbvf_infos_get, + .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, + .rx_queue_setup = eth_igb_rx_queue_setup, + .rx_queue_release = eth_igb_rx_queue_release, + .tx_queue_setup = eth_igb_tx_queue_setup, + .tx_queue_release = eth_igb_tx_queue_release, + .set_mc_addr_list = eth_igb_set_mc_addr_list, + .rxq_info_get = igb_rxq_info_get, + .txq_info_get = igb_txq_info_get, + .mac_addr_set = igbvf_default_mac_addr_set, + .get_reg_length = igbvf_get_reg_length, + .get_reg = igbvf_get_regs, +}; + +/* store statistics names and its offset in stats structure */ +struct rte_igb_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned offset; +}; + +static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = { + {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)}, + {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)}, + {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)}, + {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)}, + {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)}, + {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)}, + {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats, + ecol)}, + {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)}, + {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)}, + {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)}, + {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)}, + {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)}, + {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)}, + {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)}, + {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)}, + {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)}, + {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)}, + {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats, + fcruc)}, + {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)}, + {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)}, + {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)}, + {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)}, + {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats, + prc1023)}, + {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats, + prc1522)}, + {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)}, + {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)}, + {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)}, + {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)}, + {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)}, + {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)}, + {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)}, + {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)}, + {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)}, + {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)}, + {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)}, + {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)}, + {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)}, + {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)}, + {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)}, + {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)}, + {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)}, + {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats, + ptc1023)}, + {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats, + ptc1522)}, + {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)}, + {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)}, + {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)}, + {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)}, + {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)}, + {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)}, + {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)}, + + {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)}, +}; + +#define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \ + sizeof(rte_igb_stats_strings[0])) + +static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = { + {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)}, + {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)}, + {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)}, + {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)}, + {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)}, +}; + +#define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \ + sizeof(rte_igbvf_stats_strings[0])) + +/** + * Atomically reads the link status information from global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &(dev->data->dev_link); + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/** + * Atomically writes the link status information into global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &(dev->data->dev_link); + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static inline void +igb_intr_enable(struct rte_eth_dev *dev) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + E1000_WRITE_REG(hw, E1000_IMS, intr->mask); + E1000_WRITE_FLUSH(hw); +} + +static void +igb_intr_disable(struct e1000_hw *hw) +{ + E1000_WRITE_REG(hw, E1000_IMC, ~0); + E1000_WRITE_FLUSH(hw); +} + +static inline int32_t +igb_pf_reset_hw(struct e1000_hw *hw) +{ + uint32_t ctrl_ext; + int32_t status; + + status = e1000_reset_hw(hw); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= E1000_CTRL_EXT_PFRSTD; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + return status; +} + +static void +igb_identify_hardware(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw->vendor_id = dev->pci_dev->id.vendor_id; + hw->device_id = dev->pci_dev->id.device_id; + hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id; + hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id; + + e1000_set_mac_type(hw); + + /* need to check if it is a vf device below */ +} + +static int +igb_reset_swfw_lock(struct e1000_hw *hw) +{ + int ret_val; + + /* + * Do mac ops initialization manually here, since we will need + * some function pointers set by this call. + */ + ret_val = e1000_init_mac_params(hw); + if (ret_val) + return ret_val; + + /* + * SMBI lock should not fail in this early stage. If this is the case, + * it is due to an improper exit of the application. + * So force the release of the faulty lock. + */ + if (e1000_get_hw_semaphore_generic(hw) < 0) { + PMD_DRV_LOG(DEBUG, "SMBI lock released"); + } + e1000_put_hw_semaphore_generic(hw); + + if (hw->mac.ops.acquire_swfw_sync != NULL) { + uint16_t mask; + + /* + * Phy lock should not fail in this early stage. If this is the case, + * it is due to an improper exit of the application. + * So force the release of the faulty lock. + */ + mask = E1000_SWFW_PHY0_SM << hw->bus.func; + if (hw->bus.func > E1000_FUNC_1) + mask <<= 2; + if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { + PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", + hw->bus.func); + } + hw->mac.ops.release_swfw_sync(hw, mask); + + /* + * This one is more tricky since it is common to all ports; but + * swfw_sync retries last long enough (1s) to be almost sure that if + * lock can not be taken it is due to an improper lock of the + * semaphore. + */ + mask = E1000_SWFW_EEP_SM; + if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { + PMD_DRV_LOG(DEBUG, "SWFW common locks released"); + } + hw->mac.ops.release_swfw_sync(hw, mask); + } + + return E1000_SUCCESS; +} + +static int +eth_igb_dev_init(struct rte_eth_dev *eth_dev) +{ + int error = 0; + struct rte_pci_device *pci_dev; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(eth_dev->data->dev_private); + + uint32_t ctrl_ext; + + pci_dev = eth_dev->pci_dev; + + eth_dev->dev_ops = ð_igb_ops; + eth_dev->rx_pkt_burst = ð_igb_recv_pkts; + eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (eth_dev->data->scattered_rx) + eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; + return 0; + } + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + hw->hw_addr= (void *)pci_dev->mem_resource[0].addr; + + igb_identify_hardware(eth_dev); + if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) { + error = -EIO; + goto err_late; + } + + e1000_get_bus_info(hw); + + /* Reset any pending lock */ + if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) { + error = -EIO; + goto err_late; + } + + /* Finish initialization */ + if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) { + error = -EIO; + goto err_late; + } + + hw->mac.autoneg = 1; + hw->phy.autoneg_wait_to_complete = 0; + hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = 0; /* AUTO_ALL_MODES */ + hw->phy.disable_polarity_correction = 0; + hw->phy.ms_type = e1000_ms_hw_default; + } + + /* + * Start from a known state, this is important in reading the nvm + * and mac from that. + */ + igb_pf_reset_hw(hw); + + /* Make sure we have a good EEPROM before we read from it */ + if (e1000_validate_nvm_checksum(hw) < 0) { + /* + * Some PCI-E parts fail the first check due to + * the link being in sleep state, call it again, + * if it fails a second time its a real issue. + */ + if (e1000_validate_nvm_checksum(hw) < 0) { + PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); + error = -EIO; + goto err_late; + } + } + + /* Read the permanent MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw) != 0) { + PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); + error = -EIO; + goto err_late; + } + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("e1000", + ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " + "store MAC addresses", + ETHER_ADDR_LEN * hw->mac.rar_entry_count); + error = -ENOMEM; + goto err_late; + } + + /* Copy the permanent MAC address */ + ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); + + /* initialize the vfta */ + memset(shadow_vfta, 0, sizeof(*shadow_vfta)); + + /* Now initialize the hardware */ + if (igb_hardware_init(hw) != 0) { + PMD_INIT_LOG(ERR, "Hardware initialization failed"); + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + error = -ENODEV; + goto err_late; + } + hw->mac.get_link_status = 1; + adapter->stopped = 0; + + /* Indicate SOL/IDER usage */ + if (e1000_check_reset_block(hw) < 0) { + PMD_INIT_LOG(ERR, "PHY reset is blocked due to" + "SOL/IDER session"); + } + + /* initialize PF if max_vfs not zero */ + igb_pf_host_init(eth_dev); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= E1000_CTRL_EXT_PFRSTD; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + rte_intr_callback_register(&pci_dev->intr_handle, + eth_igb_interrupt_handler, + (void *)eth_dev); + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(&pci_dev->intr_handle); + + /* enable support intr */ + igb_intr_enable(eth_dev); + + TAILQ_INIT(&filter_info->flex_list); + filter_info->flex_mask = 0; + TAILQ_INIT(&filter_info->twotuple_list); + filter_info->twotuple_mask = 0; + TAILQ_INIT(&filter_info->fivetuple_list); + filter_info->fivetuple_mask = 0; + + return 0; + +err_late: + igb_hw_control_release(hw); + + return error; +} + +static int +eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct e1000_hw *hw; + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(eth_dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + pci_dev = eth_dev->pci_dev; + + if (adapter->stopped == 0) + eth_igb_close(eth_dev); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + /* Reset any pending lock */ + igb_reset_swfw_lock(hw); + + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + + /* uninitialize PF if max_vfs not zero */ + igb_pf_host_uninit(eth_dev); + + /* disable uio intr before callback unregister */ + rte_intr_disable(&(pci_dev->intr_handle)); + rte_intr_callback_unregister(&(pci_dev->intr_handle), + eth_igb_interrupt_handler, (void *)eth_dev); + + return 0; +} + +/* + * Virtual Function device init + */ +static int +eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(eth_dev->data->dev_private); + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + int diag; + struct ether_addr *perm_addr = (struct ether_addr *)hw->mac.perm_addr; + + PMD_INIT_FUNC_TRACE(); + + eth_dev->dev_ops = &igbvf_eth_dev_ops; + eth_dev->rx_pkt_burst = ð_igb_recv_pkts; + eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (eth_dev->data->scattered_rx) + eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; + return 0; + } + + pci_dev = eth_dev->pci_dev; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; + adapter->stopped = 0; + + /* Initialize the shared code (base driver) */ + diag = e1000_setup_init_funcs(hw, TRUE); + if (diag != 0) { + PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d", + diag); + return -EIO; + } + + /* init_mailbox_params */ + hw->mbx.ops.init_params(hw); + + /* Disable the interrupts for VF */ + igbvf_intr_disable(hw); + + diag = hw->mac.ops.reset_hw(hw); + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN * + hw->mac.rar_entry_count, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate %d bytes needed to store MAC " + "addresses", + ETHER_ADDR_LEN * hw->mac.rar_entry_count); + return -ENOMEM; + } + + /* Generate a random MAC address, if none was assigned by PF. */ + if (is_zero_ether_addr(perm_addr)) { + eth_random_addr(perm_addr->addr_bytes); + diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0); + if (diag) { + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + return diag; + } + PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); + PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " + "%02x:%02x:%02x:%02x:%02x:%02x", + perm_addr->addr_bytes[0], + perm_addr->addr_bytes[1], + perm_addr->addr_bytes[2], + perm_addr->addr_bytes[3], + perm_addr->addr_bytes[4], + perm_addr->addr_bytes[5]); + } + + /* Copy the permanent MAC address */ + ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, + ð_dev->data->mac_addrs[0]); + + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x " + "mac.type=%s", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id, "igb_mac_82576_vf"); + + return 0; +} + +static int +eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(eth_dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + if (adapter->stopped == 0) + igbvf_dev_close(eth_dev); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + + return 0; +} + +static struct eth_driver rte_igb_pmd = { + .pci_drv = { + .name = "rte_igb_pmd", + .id_table = pci_id_igb_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_DETACHABLE, + }, + .eth_dev_init = eth_igb_dev_init, + .eth_dev_uninit = eth_igb_dev_uninit, + .dev_private_size = sizeof(struct e1000_adapter), +}; + +/* + * virtual function driver struct + */ +static struct eth_driver rte_igbvf_pmd = { + .pci_drv = { + .name = "rte_igbvf_pmd", + .id_table = pci_id_igbvf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, + }, + .eth_dev_init = eth_igbvf_dev_init, + .eth_dev_uninit = eth_igbvf_dev_uninit, + .dev_private_size = sizeof(struct e1000_adapter), +}; + +static int +rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused) +{ + rte_eth_driver_register(&rte_igb_pmd); + return 0; +} + +static void +igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* RCTL: enable VLAN filter since VMDq always use VLAN filter */ + uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +/* + * VF Driver initialization routine. + * Invoked one at EAL init time. + * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices. + */ +static int +rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + + rte_eth_driver_register(&rte_igbvf_pmd); + return 0; +} + +static int +igb_check_mq_mode(struct rte_eth_dev *dev) +{ + enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; + enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; + uint16_t nb_rx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_rx_queues; + + if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) || + tx_mq_mode == ETH_MQ_TX_DCB || + tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) { + PMD_INIT_LOG(ERR, "DCB mode is not supported."); + return -EINVAL; + } + if (RTE_ETH_DEV_SRIOV(dev).active != 0) { + /* Check multi-queue mode. + * To no break software we accept ETH_MQ_RX_NONE as this might + * be used to turn off VLAN filter. + */ + + if (rx_mq_mode == ETH_MQ_RX_NONE || + rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; + } else { + /* Only support one queue on VFs. + * RSS together with SRIOV is not supported. + */ + PMD_INIT_LOG(ERR, "SRIOV is active," + " wrong mq_mode rx %d.", + rx_mq_mode); + return -EINVAL; + } + /* TX mode is not used here, so mode might be ignored.*/ + if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { + /* SRIOV only works in VMDq enable mode */ + PMD_INIT_LOG(WARNING, "SRIOV is active," + " TX mode %d is not supported. " + " Driver will behave as %d mode.", + tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY); + } + + /* check valid queue number */ + if ((nb_rx_q > 1) || (nb_tx_q > 1)) { + PMD_INIT_LOG(ERR, "SRIOV is active," + " only support one queue on VFs."); + return -EINVAL; + } + } else { + /* To no break software that set invalid mode, only display + * warning if invalid mode is used. + */ + if (rx_mq_mode != ETH_MQ_RX_NONE && + rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY && + rx_mq_mode != ETH_MQ_RX_RSS) { + /* RSS together with VMDq not supported*/ + PMD_INIT_LOG(ERR, "RX mode %d is not supported.", + rx_mq_mode); + return -EINVAL; + } + + if (tx_mq_mode != ETH_MQ_TX_NONE && + tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { + PMD_INIT_LOG(WARNING, "TX mode %d is not supported." + " Due to txmode is meaningless in this" + " driver, just ignore.", + tx_mq_mode); + } + } + return 0; +} + +static int +eth_igb_configure(struct rte_eth_dev *dev) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* multipe queue mode checking */ + ret = igb_check_mq_mode(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.", + ret); + return ret; + } + + intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + PMD_INIT_FUNC_TRACE(); + + return 0; +} + +static int +eth_igb_start(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + int ret, mask; + uint32_t intr_vector = 0; + uint32_t ctrl_ext; + uint32_t *speeds; + int num_speeds; + bool autoneg; + + PMD_INIT_FUNC_TRACE(); + + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + + /* Power up the phy. Needed to make the link go Up */ + e1000_power_up_phy(hw); + + /* + * Packet Buffer Allocation (PBA) + * Writing PBA sets the receive portion of the buffer + * the remainder is used for the transmit buffer. + */ + if (hw->mac.type == e1000_82575) { + uint32_t pba; + + pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ + E1000_WRITE_REG(hw, E1000_PBA, pba); + } + + /* Put the address into the Receive Address Array */ + e1000_rar_set(hw, hw->mac.addr, 0); + + /* Initialize the hardware */ + if (igb_hardware_init(hw)) { + PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); + return -EIO; + } + adapter->stopped = 0; + + E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= E1000_CTRL_EXT_PFRSTD; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + /* configure PF module if SRIOV enabled */ + igb_pf_host_configure(dev); + + /* check and configure queue intr-vector mapping */ + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) && + dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec\n", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + /* confiugre msix for rx interrupt */ + eth_igb_configure_msix_intr(dev); + + /* Configure for OS presence */ + igb_init_manageability(hw); + + eth_igb_tx_init(dev); + + /* This can fail when allocating mbufs for descriptor rings */ + ret = eth_igb_rx_init(dev); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); + igb_dev_clear_queues(dev); + return ret; + } + + e1000_clear_hw_cntrs_base_generic(hw); + + /* + * VLAN Offload Settings + */ + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ + ETH_VLAN_EXTEND_MASK; + eth_igb_vlan_offload_set(dev, mask); + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + /* Enable VLAN filter since VMDq always use VLAN filter */ + igb_vmdq_vlan_hw_filter_enable(dev); + } + + if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) || + (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { + /* Configure EITR with the maximum possible value (0xFFFF) */ + E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF); + } + + /* Setup link speed and duplex */ + speeds = &dev->data->dev_conf.link_speeds; + if (*speeds == ETH_LINK_SPEED_AUTONEG) { + hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; + } else { + num_speeds = 0; + autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; + + /* Reset */ + hw->phy.autoneg_advertised = 0; + + if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { + num_speeds = -1; + goto error_invalid_config; + } + if (*speeds & ETH_LINK_SPEED_10M_HD) { + hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_10M) { + hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_100M_HD) { + hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_100M) { + hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_1G) { + hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; + num_speeds++; + } + if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) + goto error_invalid_config; + } + + e1000_setup_link(hw); + + if (rte_intr_allow_others(intr_handle)) { + /* check if lsc interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.lsc != 0) + eth_igb_lsc_interrupt_setup(dev); + } else { + rte_intr_callback_unregister(intr_handle, + eth_igb_interrupt_handler, + (void *)dev); + if (dev->data->dev_conf.intr_conf.lsc != 0) + PMD_INIT_LOG(INFO, "lsc won't enable because of" + " no intr multiplex\n"); + } + + /* check if rxq interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.rxq != 0 && + rte_intr_dp_is_en(intr_handle)) + eth_igb_rxq_interrupt_setup(dev); + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); + + /* resume enabled intr since hw reset */ + igb_intr_enable(dev); + + PMD_INIT_LOG(DEBUG, "<<"); + + return 0; + +error_invalid_config: + PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", + dev->data->dev_conf.link_speeds, dev->data->port_id); + igb_dev_clear_queues(dev); + return -EINVAL; +} + +/********************************************************************* + * + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC. + * + **********************************************************************/ +static void +eth_igb_stop(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct rte_eth_link link; + struct e1000_flex_filter *p_flex; + struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next; + struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + + igb_intr_disable(hw); + + /* disable intr eventfd mapping */ + rte_intr_disable(intr_handle); + + igb_pf_reset_hw(hw); + E1000_WRITE_REG(hw, E1000_WUC, 0); + + /* Set bit for Go Link disconnect */ + if (hw->mac.type >= e1000_82580) { + uint32_t phpm_reg; + + phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + phpm_reg |= E1000_82580_PM_GO_LINKD; + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); + } + + /* Power down the phy. Needed to make the link go Down */ + if (hw->phy.media_type == e1000_media_type_copper) + e1000_power_down_phy(hw); + else + e1000_shutdown_fiber_serdes_link(hw); + + igb_dev_clear_queues(dev); + + /* clear the recorded link status */ + memset(&link, 0, sizeof(link)); + rte_igb_dev_atomic_write_link_status(dev, &link); + + /* Remove all flex filters of the device */ + while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) { + TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries); + rte_free(p_flex); + } + filter_info->flex_mask = 0; + + /* Remove all ntuple filters of the device */ + for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list); + p_5tuple != NULL; p_5tuple = p_5tuple_next) { + p_5tuple_next = TAILQ_NEXT(p_5tuple, entries); + TAILQ_REMOVE(&filter_info->fivetuple_list, + p_5tuple, entries); + rte_free(p_5tuple); + } + filter_info->fivetuple_mask = 0; + for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list); + p_2tuple != NULL; p_2tuple = p_2tuple_next) { + p_2tuple_next = TAILQ_NEXT(p_2tuple, entries); + TAILQ_REMOVE(&filter_info->twotuple_list, + p_2tuple, entries); + rte_free(p_2tuple); + } + filter_info->twotuple_mask = 0; + + if (!rte_intr_allow_others(intr_handle)) + /* resume to the default handler */ + rte_intr_callback_register(intr_handle, + eth_igb_interrupt_handler, + (void *)dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + +static void +eth_igb_close(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(dev->data->dev_private); + struct rte_eth_link link; + struct rte_pci_device *pci_dev; + + eth_igb_stop(dev); + adapter->stopped = 1; + + e1000_phy_hw_reset(hw); + igb_release_manageability(hw); + igb_hw_control_release(hw); + + /* Clear bit for Go Link disconnect */ + if (hw->mac.type >= e1000_82580) { + uint32_t phpm_reg; + + phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); + } + + igb_dev_free_queues(dev); + + pci_dev = dev->pci_dev; + if (pci_dev->intr_handle.intr_vec) { + rte_free(pci_dev->intr_handle.intr_vec); + pci_dev->intr_handle.intr_vec = NULL; + } + + memset(&link, 0, sizeof(link)); + rte_igb_dev_atomic_write_link_status(dev, &link); +} + +static int +igb_get_rx_buffer_size(struct e1000_hw *hw) +{ + uint32_t rx_buf_size; + if (hw->mac.type == e1000_82576) { + rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10; + } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) { + /* PBS needs to be translated according to a lookup table */ + rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf); + rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size); + rx_buf_size = (rx_buf_size << 10); + } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { + rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10; + } else { + rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10; + } + + return rx_buf_size; +} + +/********************************************************************* + * + * Initialize the hardware + * + **********************************************************************/ +static int +igb_hardware_init(struct e1000_hw *hw) +{ + uint32_t rx_buf_size; + int diag; + + /* Let the firmware know the OS is in control */ + igb_hw_control_acquire(hw); + + /* + * These parameters control the automatic generation (Tx) and + * response (Rx) to Ethernet PAUSE frames. + * - High water mark should allow for at least two standard size (1518) + * frames to be received after sending an XOFF. + * - Low water mark works best when it is very near the high water mark. + * This allows the receiver to restart by sending XON when it has + * drained a bit. Here we use an arbitrary value of 1500 which will + * restart after one full frame is pulled from the buffer. There + * could be several smaller frames in the buffer and if so they will + * not trigger the XON until their total number reduces the buffer + * by 1500. + * - The pause time is fairly large at 1000 x 512ns = 512 usec. + */ + rx_buf_size = igb_get_rx_buffer_size(hw); + + hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2); + hw->fc.low_water = hw->fc.high_water - 1500; + hw->fc.pause_time = IGB_FC_PAUSE_TIME; + hw->fc.send_xon = 1; + + /* Set Flow control, use the tunable location if sane */ + if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4)) + hw->fc.requested_mode = igb_fc_setting; + else + hw->fc.requested_mode = e1000_fc_none; + + /* Issue a global reset */ + igb_pf_reset_hw(hw); + E1000_WRITE_REG(hw, E1000_WUC, 0); + + diag = e1000_init_hw(hw); + if (diag < 0) + return diag; + + E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN); + e1000_get_phy_info(hw); + e1000_check_for_link(hw); + + return 0; +} + +/* This function is based on igb_update_stats_counters() in igb/if_igb.c */ +static void +igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats) +{ + int pause_frames; + + uint64_t old_gprc = stats->gprc; + uint64_t old_gptc = stats->gptc; + uint64_t old_tpr = stats->tpr; + uint64_t old_tpt = stats->tpt; + uint64_t old_rpthc = stats->rpthc; + uint64_t old_hgptc = stats->hgptc; + + if(hw->phy.media_type == e1000_media_type_copper || + (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { + stats->symerrs += + E1000_READ_REG(hw,E1000_SYMERRS); + stats->sec += E1000_READ_REG(hw, E1000_SEC); + } + + stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); + stats->mpc += E1000_READ_REG(hw, E1000_MPC); + stats->scc += E1000_READ_REG(hw, E1000_SCC); + stats->ecol += E1000_READ_REG(hw, E1000_ECOL); + + stats->mcc += E1000_READ_REG(hw, E1000_MCC); + stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); + stats->colc += E1000_READ_REG(hw, E1000_COLC); + stats->dc += E1000_READ_REG(hw, E1000_DC); + stats->rlec += E1000_READ_REG(hw, E1000_RLEC); + stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); + stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); + /* + ** For watchdog management we need to know if we have been + ** paused during the last interval, so capture that here. + */ + pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); + stats->xoffrxc += pause_frames; + stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); + stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); + stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); + stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); + stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); + stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); + stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); + stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); + stats->gprc += E1000_READ_REG(hw, E1000_GPRC); + stats->bprc += E1000_READ_REG(hw, E1000_BPRC); + stats->mprc += E1000_READ_REG(hw, E1000_MPRC); + stats->gptc += E1000_READ_REG(hw, E1000_GPTC); + + /* For the 64-bit byte counters the low dword must be read first. */ + /* Both registers clear on the read of the high dword */ + + /* Workaround CRC bytes included in size, take away 4 bytes/packet */ + stats->gorc += E1000_READ_REG(hw, E1000_GORCL); + stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); + stats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN; + stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); + stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); + stats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN; + + stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); + stats->ruc += E1000_READ_REG(hw, E1000_RUC); + stats->rfc += E1000_READ_REG(hw, E1000_RFC); + stats->roc += E1000_READ_REG(hw, E1000_ROC); + stats->rjc += E1000_READ_REG(hw, E1000_RJC); + + stats->tpr += E1000_READ_REG(hw, E1000_TPR); + stats->tpt += E1000_READ_REG(hw, E1000_TPT); + + stats->tor += E1000_READ_REG(hw, E1000_TORL); + stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32); + stats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN; + stats->tot += E1000_READ_REG(hw, E1000_TOTL); + stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32); + stats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN; + + stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); + stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); + stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); + stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); + stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); + stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); + stats->mptc += E1000_READ_REG(hw, E1000_MPTC); + stats->bptc += E1000_READ_REG(hw, E1000_BPTC); + + /* Interrupt Counts */ + + stats->iac += E1000_READ_REG(hw, E1000_IAC); + stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); + stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); + stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); + stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); + stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); + stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); + stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); + stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); + + /* Host to Card Statistics */ + + stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); + stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); + stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); + stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); + stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); + stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); + stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); + stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL); + stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32); + stats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN; + stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL); + stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32); + stats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN; + stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); + stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); + stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); + + stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); + stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); + stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); + stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); + stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); + stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); +} + +static void +eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_hw_stats *stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + igb_read_stats_registers(hw, stats); + + if (rte_stats == NULL) + return; + + /* Rx Errors */ + rte_stats->imissed = stats->mpc; + rte_stats->ierrors = stats->crcerrs + + stats->rlec + stats->ruc + stats->roc + + stats->rxerrc + stats->algnerrc + stats->cexterr; + + /* Tx Errors */ + rte_stats->oerrors = stats->ecol + stats->latecol; + + rte_stats->ipackets = stats->gprc; + rte_stats->opackets = stats->gptc; + rte_stats->ibytes = stats->gorc; + rte_stats->obytes = stats->gotc; +} + +static void +eth_igb_stats_reset(struct rte_eth_dev *dev) +{ + struct e1000_hw_stats *hw_stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* HW registers are cleared on read */ + eth_igb_stats_get(dev, NULL); + + /* Reset software totals */ + memset(hw_stats, 0, sizeof(*hw_stats)); +} + +static void +eth_igb_xstats_reset(struct rte_eth_dev *dev) +{ + struct e1000_hw_stats *stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* HW registers are cleared on read */ + eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS); + + /* Reset software totals */ + memset(stats, 0, sizeof(*stats)); +} + +static int +eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, + unsigned n) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_hw_stats *hw_stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + unsigned i; + + if (n < IGB_NB_XSTATS) + return IGB_NB_XSTATS; + + igb_read_stats_registers(hw, hw_stats); + + /* If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + if (!xstats) + return 0; + + /* Extended stats */ + for (i = 0; i < IGB_NB_XSTATS; i++) { + snprintf(xstats[i].name, sizeof(xstats[i].name), + "%s", rte_igb_stats_strings[i].name); + xstats[i].value = *(uint64_t *)(((char *)hw_stats) + + rte_igb_stats_strings[i].offset); + } + + return IGB_NB_XSTATS; +} + +static void +igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats) +{ + /* Good Rx packets, include VF loopback */ + UPDATE_VF_STAT(E1000_VFGPRC, + hw_stats->last_gprc, hw_stats->gprc); + + /* Good Rx octets, include VF loopback */ + UPDATE_VF_STAT(E1000_VFGORC, + hw_stats->last_gorc, hw_stats->gorc); + + /* Good Tx packets, include VF loopback */ + UPDATE_VF_STAT(E1000_VFGPTC, + hw_stats->last_gptc, hw_stats->gptc); + + /* Good Tx octets, include VF loopback */ + UPDATE_VF_STAT(E1000_VFGOTC, + hw_stats->last_gotc, hw_stats->gotc); + + /* Rx Multicst packets */ + UPDATE_VF_STAT(E1000_VFMPRC, + hw_stats->last_mprc, hw_stats->mprc); + + /* Good Rx loopback packets */ + UPDATE_VF_STAT(E1000_VFGPRLBC, + hw_stats->last_gprlbc, hw_stats->gprlbc); + + /* Good Rx loopback octets */ + UPDATE_VF_STAT(E1000_VFGORLBC, + hw_stats->last_gorlbc, hw_stats->gorlbc); + + /* Good Tx loopback packets */ + UPDATE_VF_STAT(E1000_VFGPTLBC, + hw_stats->last_gptlbc, hw_stats->gptlbc); + + /* Good Tx loopback octets */ + UPDATE_VF_STAT(E1000_VFGOTLBC, + hw_stats->last_gotlbc, hw_stats->gotlbc); +} + +static int +eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, + unsigned n) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *) + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + unsigned i; + + if (n < IGBVF_NB_XSTATS) + return IGBVF_NB_XSTATS; + + igbvf_read_stats_registers(hw, hw_stats); + + if (!xstats) + return 0; + + for (i = 0; i < IGBVF_NB_XSTATS; i++) { + snprintf(xstats[i].name, sizeof(xstats[i].name), "%s", + rte_igbvf_stats_strings[i].name); + xstats[i].value = *(uint64_t *)(((char *)hw_stats) + + rte_igbvf_stats_strings[i].offset); + } + + return IGBVF_NB_XSTATS; +} + +static void +eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *) + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + igbvf_read_stats_registers(hw, hw_stats); + + if (rte_stats == NULL) + return; + + rte_stats->ipackets = hw_stats->gprc; + rte_stats->ibytes = hw_stats->gorc; + rte_stats->opackets = hw_stats->gptc; + rte_stats->obytes = hw_stats->gotc; + rte_stats->imcasts = hw_stats->mprc; + rte_stats->ilbpackets = hw_stats->gprlbc; + rte_stats->ilbbytes = hw_stats->gorlbc; + rte_stats->olbpackets = hw_stats->gptlbc; + rte_stats->olbbytes = hw_stats->gotlbc; +} + +static void +eth_igbvf_stats_reset(struct rte_eth_dev *dev) +{ + struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*) + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* Sync HW register to the last stats */ + eth_igbvf_stats_get(dev, NULL); + + /* reset HW current stats*/ + memset(&hw_stats->gprc, 0, sizeof(*hw_stats) - + offsetof(struct e1000_vf_stats, gprc)); +} + +static void +eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ + dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ + dev_info->max_mac_addrs = hw->mac.rar_entry_count; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + + switch (hw->mac.type) { + case e1000_82575: + dev_info->max_rx_queues = 4; + dev_info->max_tx_queues = 4; + dev_info->max_vmdq_pools = 0; + break; + + case e1000_82576: + dev_info->max_rx_queues = 16; + dev_info->max_tx_queues = 16; + dev_info->max_vmdq_pools = ETH_8_POOLS; + dev_info->vmdq_queue_num = 16; + break; + + case e1000_82580: + dev_info->max_rx_queues = 8; + dev_info->max_tx_queues = 8; + dev_info->max_vmdq_pools = ETH_8_POOLS; + dev_info->vmdq_queue_num = 8; + break; + + case e1000_i350: + dev_info->max_rx_queues = 8; + dev_info->max_tx_queues = 8; + dev_info->max_vmdq_pools = ETH_8_POOLS; + dev_info->vmdq_queue_num = 8; + break; + + case e1000_i354: + dev_info->max_rx_queues = 8; + dev_info->max_tx_queues = 8; + break; + + case e1000_i210: + dev_info->max_rx_queues = 4; + dev_info->max_tx_queues = 4; + dev_info->max_vmdq_pools = 0; + break; + + case e1000_i211: + dev_info->max_rx_queues = 2; + dev_info->max_tx_queues = 2; + dev_info->max_vmdq_pools = 0; + break; + + default: + /* Should not happen */ + break; + } + dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t); + dev_info->reta_size = ETH_RSS_RETA_SIZE_128; + dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = IGB_DEFAULT_RX_PTHRESH, + .hthresh = IGB_DEFAULT_RX_HTHRESH, + .wthresh = IGB_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IGB_DEFAULT_TX_PTHRESH, + .hthresh = IGB_DEFAULT_TX_HTHRESH, + .wthresh = IGB_DEFAULT_TX_WTHRESH, + }, + .txq_flags = 0, + }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + + dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G; +} + +static const uint32_t * +eth_igb_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* refers to igb_rxd_pkt_info_to_pkt_type() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_TUNNEL_IP, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV6_EXT, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == eth_igb_recv_pkts || + dev->rx_pkt_burst == eth_igb_recv_scattered_pkts) + return ptypes; + return NULL; +} + +static void +eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ + dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ + dev_info->max_mac_addrs = hw->mac.rar_entry_count; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + switch (hw->mac.type) { + case e1000_vfadapt: + dev_info->max_rx_queues = 2; + dev_info->max_tx_queues = 2; + break; + case e1000_vfadapt_i350: + dev_info->max_rx_queues = 1; + dev_info->max_tx_queues = 1; + break; + default: + /* Should not happen */ + break; + } + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = IGB_DEFAULT_RX_PTHRESH, + .hthresh = IGB_DEFAULT_RX_HTHRESH, + .wthresh = IGB_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IGB_DEFAULT_TX_PTHRESH, + .hthresh = IGB_DEFAULT_TX_HTHRESH, + .wthresh = IGB_DEFAULT_TX_WTHRESH, + }, + .txq_flags = 0, + }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; +} + +/* return 0 means link status changed, -1 means not changed */ +static int +eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link, old; + int link_check, count; + + link_check = 0; + hw->mac.get_link_status = 1; + + /* possible wait-to-complete in up to 9 seconds */ + for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) { + /* Read the real link status */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + /* Do the work to read phy */ + e1000_check_for_link(hw); + link_check = !hw->mac.get_link_status; + break; + + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_check = (E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_LU); + break; + + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_check = hw->mac.serdes_has_link; + break; + + /* VF device is type_unknown */ + case e1000_media_type_unknown: + eth_igbvf_link_update(hw); + link_check = !hw->mac.get_link_status; + break; + + default: + break; + } + if (link_check || wait_to_complete == 0) + break; + rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL); + } + memset(&link, 0, sizeof(link)); + rte_igb_dev_atomic_read_link_status(dev, &link); + old = link; + + /* Now we check if a transition has happened */ + if (link_check) { + uint16_t duplex, speed; + hw->mac.ops.get_link_up_info(hw, &speed, &duplex); + link.link_duplex = (duplex == FULL_DUPLEX) ? + ETH_LINK_FULL_DUPLEX : + ETH_LINK_HALF_DUPLEX; + link.link_speed = speed; + link.link_status = ETH_LINK_UP; + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + } else if (!link_check) { + link.link_speed = 0; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_status = ETH_LINK_DOWN; + link.link_autoneg = ETH_LINK_SPEED_FIXED; + } + rte_igb_dev_atomic_write_link_status(dev, &link); + + /* not changed */ + if (old.link_status == link.link_status) + return -1; + + /* changed */ + return 0; +} + +/* + * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means + * that the driver is loaded. + */ +static void +igb_hw_control_acquire(struct e1000_hw *hw) +{ + uint32_t ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); +} + +/* + * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + */ +static void +igb_hw_control_release(struct e1000_hw *hw) +{ + uint32_t ctrl_ext; + + /* Let firmware taken over control of h/w */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); +} + +/* + * Bit of a misnomer, what this really means is + * to enable OS management of the system... aka + * to disable special hardware management features. + */ +static void +igb_init_manageability(struct e1000_hw *hw) +{ + if (e1000_enable_mng_pass_thru(hw)) { + uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); + uint32_t manc = E1000_READ_REG(hw, E1000_MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + /* enable receiving management packets to the host */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h |= 1 << 5; /* Mng Port 623 */ + manc2h |= 1 << 6; /* Mng Port 664 */ + E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); + E1000_WRITE_REG(hw, E1000_MANC, manc); + } +} + +static void +igb_release_manageability(struct e1000_hw *hw) +{ + if (e1000_enable_mng_pass_thru(hw)) { + uint32_t manc = E1000_READ_REG(hw, E1000_MANC); + + manc |= E1000_MANC_ARP_EN; + manc &= ~E1000_MANC_EN_MNG2HOST; + + E1000_WRITE_REG(hw, E1000_MANC, manc); + } +} + +static void +eth_igb_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static void +eth_igb_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= (~E1000_RCTL_UPE); + if (dev->data->all_multicast == 1) + rctl |= E1000_RCTL_MPE; + else + rctl &= (~E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static void +eth_igb_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_MPE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static void +eth_igb_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + if (dev->data->promiscuous == 1) + return; /* must remain in all_multicast mode */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= (~E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static int +eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vfta; + uint32_t vid_idx; + uint32_t vid_bit; + + vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK); + vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); + if (on) + vfta |= vid_bit; + else + vfta &= ~vid_bit; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); + + /* update local VFTA copy */ + shadow_vfta->vfta[vid_idx] = vfta; + + return 0; +} + +static int +eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg = ETHER_TYPE_VLAN; + int ret = 0; + + switch (vlan_type) { + case ETH_VLAN_TYPE_INNER: + reg |= (tpid << 16); + E1000_WRITE_REG(hw, E1000_VET, reg); + break; + default: + ret = -EINVAL; + PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type); + break; + } + + return ret; +} + +static void +igb_vlan_hw_filter_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* Filter Table Disable */ + reg = E1000_READ_REG(hw, E1000_RCTL); + reg &= ~E1000_RCTL_CFIEN; + reg &= ~E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, reg); +} + +static void +igb_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t reg; + int i; + + /* Filter Table Enable, CFI not used for packet acceptance */ + reg = E1000_READ_REG(hw, E1000_RCTL); + reg &= ~E1000_RCTL_CFIEN; + reg |= E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, reg); + + /* restore VFTA table */ + for (i = 0; i < IGB_VFTA_SIZE; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); +} + +static void +igb_vlan_hw_strip_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* VLAN Mode Disable */ + reg = E1000_READ_REG(hw, E1000_CTRL); + reg &= ~E1000_CTRL_VME; + E1000_WRITE_REG(hw, E1000_CTRL, reg); +} + +static void +igb_vlan_hw_strip_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* VLAN Mode Enable */ + reg = E1000_READ_REG(hw, E1000_CTRL); + reg |= E1000_CTRL_VME; + E1000_WRITE_REG(hw, E1000_CTRL, reg); +} + +static void +igb_vlan_hw_extend_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* CTRL_EXT: Extended VLAN */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_EXTEND_VLAN; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* Update maximum packet length */ + if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + E1000_WRITE_REG(hw, E1000_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len + + VLAN_TAG_SIZE); +} + +static void +igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* CTRL_EXT: Extended VLAN */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_EXTEND_VLAN; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* Update maximum packet length */ + if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + E1000_WRITE_REG(hw, E1000_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * VLAN_TAG_SIZE); +} + +static void +eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + if(mask & ETH_VLAN_STRIP_MASK){ + if (dev->data->dev_conf.rxmode.hw_vlan_strip) + igb_vlan_hw_strip_enable(dev); + else + igb_vlan_hw_strip_disable(dev); + } + + if(mask & ETH_VLAN_FILTER_MASK){ + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + igb_vlan_hw_filter_enable(dev); + else + igb_vlan_hw_filter_disable(dev); + } + + if(mask & ETH_VLAN_EXTEND_MASK){ + if (dev->data->dev_conf.rxmode.hw_vlan_extend) + igb_vlan_hw_extend_enable(dev); + else + igb_vlan_hw_extend_disable(dev); + } +} + + +/** + * It enables the interrupt mask and then enable the interrupt. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + intr->mask |= E1000_ICR_LSC; + + return 0; +} + +/* It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev) +{ + uint32_t mask, regval; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_dev_info dev_info; + + memset(&dev_info, 0, sizeof(dev_info)); + eth_igb_infos_get(dev, &dev_info); + + mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues); + regval = E1000_READ_REG(hw, E1000_EIMS); + E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); + + return 0; +} + +/* + * It reads ICR and gets interrupt causes, check it and set a bit flag + * to update link status. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_igb_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t icr; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + igb_intr_disable(hw); + + /* read-on-clear nic registers here */ + icr = E1000_READ_REG(hw, E1000_ICR); + + intr->flags = 0; + if (icr & E1000_ICR_LSC) { + intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + } + + if (icr & E1000_ICR_VMMB) + intr->flags |= E1000_FLAG_MAILBOX; + + return 0; +} + +/* + * It executes link_update after knowing an interrupt is prsent. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_igb_interrupt_action(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + uint32_t tctl, rctl; + struct rte_eth_link link; + int ret; + + if (intr->flags & E1000_FLAG_MAILBOX) { + igb_pf_mbx_process(dev); + intr->flags &= ~E1000_FLAG_MAILBOX; + } + + igb_intr_enable(dev); + rte_intr_enable(&(dev->pci_dev->intr_handle)); + + if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) { + intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; + + /* set get_link_status to check register later */ + hw->mac.get_link_status = 1; + ret = eth_igb_link_update(dev, 0); + + /* check if link has changed */ + if (ret < 0) + return 0; + + memset(&link, 0, sizeof(link)); + rte_igb_dev_atomic_read_link_status(dev, &link); + if (link.link_status) { + PMD_INIT_LOG(INFO, + " Port %d: Link Up - speed %u Mbps - %s", + dev->data->port_id, + (unsigned)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + } else { + PMD_INIT_LOG(INFO, " Port %d: Link Down", + dev->data->port_id); + } + + PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", + dev->pci_dev->addr.domain, + dev->pci_dev->addr.bus, + dev->pci_dev->addr.devid, + dev->pci_dev->addr.function); + tctl = E1000_READ_REG(hw, E1000_TCTL); + rctl = E1000_READ_REG(hw, E1000_RCTL); + if (link.link_status) { + /* enable Tx/Rx */ + tctl |= E1000_TCTL_EN; + rctl |= E1000_RCTL_EN; + } else { + /* disable Tx/Rx */ + tctl &= ~E1000_TCTL_EN; + rctl &= ~E1000_RCTL_EN; + } + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); + } + + return 0; +} + +/** + * Interrupt handler which shall be registered at first. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + eth_igb_interrupt_get_status(dev); + eth_igb_interrupt_action(dev); +} + +static int +eth_igb_led_on(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; +} + +static int +eth_igb_led_off(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; +} + +static int +eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct e1000_hw *hw; + uint32_t ctrl; + int tx_pause; + int rx_pause; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + fc_conf->pause_time = hw->fc.pause_time; + fc_conf->high_water = hw->fc.high_water; + fc_conf->low_water = hw->fc.low_water; + fc_conf->send_xon = hw->fc.send_xon; + fc_conf->autoneg = hw->mac.autoneg; + + /* + * Return rx_pause and tx_pause status according to actual setting of + * the TFCE and RFCE bits in the CTRL register. + */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + if (ctrl & E1000_CTRL_TFCE) + tx_pause = 1; + else + tx_pause = 0; + + if (ctrl & E1000_CTRL_RFCE) + rx_pause = 1; + else + rx_pause = 0; + + if (rx_pause && tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + + return 0; +} + +static int +eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct e1000_hw *hw; + int err; + enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { + e1000_fc_none, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full + }; + uint32_t rx_buf_size; + uint32_t max_high_water; + uint32_t rctl; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (fc_conf->autoneg != hw->mac.autoneg) + return -ENOTSUP; + rx_buf_size = igb_get_rx_buffer_size(hw); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); + + /* At least reserve one Ethernet frame for watermark */ + max_high_water = rx_buf_size - ETHER_MAX_LEN; + if ((fc_conf->high_water > max_high_water) || + (fc_conf->high_water < fc_conf->low_water)) { + PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); + PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); + return -EINVAL; + } + + hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; + hw->fc.pause_time = fc_conf->pause_time; + hw->fc.high_water = fc_conf->high_water; + hw->fc.low_water = fc_conf->low_water; + hw->fc.send_xon = fc_conf->send_xon; + + err = e1000_setup_link_generic(hw); + if (err == E1000_SUCCESS) { + + /* check if we want to forward MAC frames - driver doesn't have native + * capability to do that, so we'll write the registers ourselves */ + + rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* set or clear MFLCN.PMCF bit depending on configuration */ + if (fc_conf->mac_ctrl_frame_fwd != 0) + rctl |= E1000_RCTL_PMCF; + else + rctl &= ~E1000_RCTL_PMCF; + + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + + return 0; + } + + PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); + return -EIO; +} + +#define E1000_RAH_POOLSEL_SHIFT (18) +static void +eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, __rte_unused uint32_t pool) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rah; + + e1000_rar_set(hw, mac_addr->addr_bytes, index); + rah = E1000_READ_REG(hw, E1000_RAH(index)); + rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool)); + E1000_WRITE_REG(hw, E1000_RAH(index), rah); +} + +static void +eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index) +{ + uint8_t addr[ETHER_ADDR_LEN]; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + memset(addr, 0, sizeof(addr)); + + e1000_rar_set(hw, addr, index); +} + +static void +eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, + struct ether_addr *addr) +{ + eth_igb_rar_clear(dev, 0); + + eth_igb_rar_set(dev, (void *)addr, 0, 0); +} +/* + * Virtual Function operations + */ +static void +igbvf_intr_disable(struct e1000_hw *hw) +{ + PMD_INIT_FUNC_TRACE(); + + /* Clear interrupt mask to stop from interrupts being generated */ + E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF); + + E1000_WRITE_FLUSH(hw); +} + +static void +igbvf_stop_adapter(struct rte_eth_dev *dev) +{ + u32 reg_val; + u16 i; + struct rte_eth_dev_info dev_info; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + memset(&dev_info, 0, sizeof(dev_info)); + eth_igbvf_infos_get(dev, &dev_info); + + /* Clear interrupt mask to stop from interrupts being generated */ + igbvf_intr_disable(hw); + + /* Clear any pending interrupts, flush previous writes */ + E1000_READ_REG(hw, E1000_EICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < dev_info.max_tx_queues; i++) + E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH); + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < dev_info.max_rx_queues; i++) { + reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i)); + reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val); + while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE) + ; + } + + /* flush all queues disables */ + E1000_WRITE_FLUSH(hw); + msec_delay(2); +} + +static int eth_igbvf_link_update(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + struct e1000_mac_info *mac = &hw->mac; + int ret_val = E1000_SUCCESS; + + PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf"); + + /* + * We only want to run this if there has been a rst asserted. + * in this case that could mean a link change, device reset, + * or a virtual function reset + */ + + /* If we were hit with a reset or timeout drop the link */ + if (!e1000_check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = TRUE; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) + goto out; + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link */ + mac->get_link_status = FALSE; + +out: + return ret_val; +} + + +static int +igbvf_dev_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_conf* conf = &dev->data->dev_conf; + + PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", + dev->data->port_id); + + /* + * VF has no ability to enable/disable HW CRC + * Keep the persistent behavior the same as Host PF + */ +#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC + if (!conf->rxmode.hw_strip_crc) { + PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); + conf->rxmode.hw_strip_crc = 1; + } +#else + if (conf->rxmode.hw_strip_crc) { + PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); + conf->rxmode.hw_strip_crc = 0; + } +#endif + + return 0; +} + +static int +igbvf_dev_start(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(dev->data->dev_private); + int ret; + + PMD_INIT_FUNC_TRACE(); + + hw->mac.ops.reset_hw(hw); + adapter->stopped = 0; + + /* Set all vfta */ + igbvf_set_vfta_all(dev,1); + + eth_igbvf_tx_init(dev); + + /* This can fail when allocating mbufs for descriptor rings */ + ret = eth_igbvf_rx_init(dev); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); + igb_dev_clear_queues(dev); + return ret; + } + + return 0; +} + +static void +igbvf_dev_stop(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + + igbvf_stop_adapter(dev); + + /* + * Clear what we set, but we still keep shadow_vfta to + * restore after device starts + */ + igbvf_set_vfta_all(dev,0); + + igb_dev_clear_queues(dev); +} + +static void +igbvf_dev_close(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_adapter *adapter = + E1000_DEV_PRIVATE(dev->data->dev_private); + struct ether_addr addr; + + PMD_INIT_FUNC_TRACE(); + + e1000_reset_hw(hw); + + igbvf_dev_stop(dev); + adapter->stopped = 1; + igb_dev_free_queues(dev); + + /** + * reprogram the RAR with a zero mac address, + * to ensure that the VF traffic goes to the PF + * after stop, close and detach of the VF. + **/ + + memset(&addr, 0, sizeof(addr)); + igbvf_default_mac_addr_set(dev, &addr); +} + +static void +igbvf_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Set both unicast and multicast promisc */ + e1000_promisc_set_vf(hw, e1000_promisc_enabled); +} + +static void +igbvf_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* If in allmulticast mode leave multicast promisc */ + if (dev->data->all_multicast == 1) + e1000_promisc_set_vf(hw, e1000_promisc_multicast); + else + e1000_promisc_set_vf(hw, e1000_promisc_disabled); +} + +static void +igbvf_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* In promiscuous mode multicast promisc already set */ + if (dev->data->promiscuous == 0) + e1000_promisc_set_vf(hw, e1000_promisc_multicast); +} + +static void +igbvf_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* In promiscuous mode leave multicast promisc enabled */ + if (dev->data->promiscuous == 0) + e1000_promisc_set_vf(hw, e1000_promisc_disabled); +} + +static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + uint32_t msgbuf[2]; + s32 err; + + /* After set vlan, vlan strip will also be enabled in igb driver*/ + msgbuf[0] = E1000_VF_SET_VLAN; + msgbuf[1] = vid; + /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ + if (on) + msgbuf[0] |= E1000_VF_SET_VLAN_ADD; + + err = mbx->ops.write_posted(hw, msgbuf, 2, 0); + if (err) + goto mbx_err; + + err = mbx->ops.read_posted(hw, msgbuf, 2, 0); + if (err) + goto mbx_err; + + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; + if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)) + err = -EINVAL; + +mbx_err: + return err; +} + +static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + int i = 0, j = 0, vfta = 0, mask = 1; + + for (i = 0; i < IGB_VFTA_SIZE; i++){ + vfta = shadow_vfta->vfta[i]; + if(vfta){ + mask = 1; + for (j = 0; j < 32; j++){ + if(vfta & mask) + igbvf_set_vfta(hw, + (uint16_t)((i<<5)+j), on); + mask<<=1; + } + } + } + +} + +static int +igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vid_idx = 0; + uint32_t vid_bit = 0; + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + + /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/ + ret = igbvf_set_vfta(hw, vlan_id, !!on); + if(ret){ + PMD_INIT_LOG(ERR, "Unable to set VF vlan"); + return ret; + } + vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); + vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); + + /*Save what we set and retore it after device reset*/ + if (on) + shadow_vfta->vfta[vid_idx] |= vid_bit; + else + shadow_vfta->vfta[vid_idx] &= ~vid_bit; + + return 0; +} + +static void +igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* index is not used by rar_set() */ + hw->mac.ops.rar_set(hw, (void *)addr, 0); +} + + +static int +eth_igb_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + uint8_t i, j, mask; + uint32_t reta, r; + uint16_t idx, shift; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (reta_size != ETH_RSS_RETA_SIZE_128) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); + return -EINVAL; + } + + for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IGB_4_BIT_MASK); + if (!mask) + continue; + if (mask == IGB_4_BIT_MASK) + r = 0; + else + r = E1000_READ_REG(hw, E1000_RETA(i >> 2)); + for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta |= reta_conf[idx].reta[shift + j] << + (CHAR_BIT * j); + else + reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j)); + } + E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta); + } + + return 0; +} + +static int +eth_igb_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + uint8_t i, j, mask; + uint32_t reta; + uint16_t idx, shift; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (reta_size != ETH_RSS_RETA_SIZE_128) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); + return -EINVAL; + } + + for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IGB_4_BIT_MASK); + if (!mask) + continue; + reta = E1000_READ_REG(hw, E1000_RETA(i >> 2)); + for (j = 0; j < IGB_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta_conf[idx].reta[shift + j] = + ((reta >> (CHAR_BIT * j)) & + IGB_8_BIT_MASK); + } + } + + return 0; +} + +#define MAC_TYPE_FILTER_SUP(type) do {\ + if ((type) != e1000_82580 && (type) != e1000_i350 &&\ + (type) != e1000_82576)\ + return -ENOTSUP;\ +} while (0) + +static int +eth_igb_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t synqf, rfctl; + + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) + return -EINVAL; + + synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); + + if (add) { + if (synqf & E1000_SYN_FILTER_ENABLE) + return -EINVAL; + + synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) & + E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE); + + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + if (filter->hig_pri) + rfctl |= E1000_RFCTL_SYNQFP; + else + rfctl &= ~E1000_RFCTL_SYNQFP; + + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + } else { + if (!(synqf & E1000_SYN_FILTER_ENABLE)) + return -ENOENT; + synqf = 0; + } + + E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); + E1000_WRITE_FLUSH(hw); + return 0; +} + +static int +eth_igb_syn_filter_get(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t synqf, rfctl; + + synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); + if (synqf & E1000_SYN_FILTER_ENABLE) { + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0; + filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >> + E1000_SYN_FILTER_QUEUE_SHIFT); + return 0; + } + + return -ENOENT; +} + +static int +eth_igb_syn_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = eth_igb_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = eth_igb_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = eth_igb_syn_filter_get(dev, + (struct rte_eth_syn_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); + ret = -EINVAL; + break; + } + + return ret; +} + +#define MAC_TYPE_FILTER_SUP_EXT(type) do {\ + if ((type) != e1000_82580 && (type) != e1000_i350)\ + return -ENOSYS; \ +} while (0) + +/* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/ +static inline int +ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, + struct e1000_2tuple_filter_info *filter_info) +{ + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) + return -EINVAL; + if (filter->priority > E1000_2TUPLE_MAX_PRI) + return -EINVAL; /* filter index is out of range. */ + if (filter->tcp_flags > TCP_FLAG_ALL) + return -EINVAL; /* flags is invalid. */ + + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = filter->proto; + break; + case 0: + filter_info->proto_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } + + filter_info->priority = (uint8_t)filter->priority; + if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) + filter_info->tcp_flags = filter->tcp_flags; + else + filter_info->tcp_flags = 0; + + return 0; +} + +static inline struct e1000_2tuple_filter * +igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list, + struct e1000_2tuple_filter_info *key) +{ + struct e1000_2tuple_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct e1000_2tuple_filter_info)) == 0) { + return it; + } + } + return NULL; +} + +/* + * igb_add_2tuple_filter - add a 2tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: ponter to the filter that will be added. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +igb_add_2tuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_2tuple_filter *filter; + uint32_t ttqf = E1000_TTQF_DISABLE_MASK; + uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP; + int i, ret; + + filter = rte_zmalloc("e1000_2tuple_filter", + sizeof(struct e1000_2tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; + + ret = ntuple_filter_to_2tuple(ntuple_filter, + &filter->filter_info); + if (ret < 0) { + rte_free(filter); + return ret; + } + if (igb_2tuple_filter_lookup(&filter_info->twotuple_list, + &filter->filter_info) != NULL) { + PMD_DRV_LOG(ERR, "filter exists."); + rte_free(filter); + return -EEXIST; + } + filter->queue = ntuple_filter->queue; + + /* + * look for an unused 2tuple filter index, + * and insert the filter to list. + */ + for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) { + if (!(filter_info->twotuple_mask & (1 << i))) { + filter_info->twotuple_mask |= 1 << i; + filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->twotuple_list, + filter, + entries); + break; + } + } + if (i >= E1000_MAX_TTQF_FILTERS) { + PMD_DRV_LOG(ERR, "2tuple filters are full."); + rte_free(filter); + return -ENOSYS; + } + + imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); + if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ + imir |= E1000_IMIR_PORT_BP; + else + imir &= ~E1000_IMIR_PORT_BP; + + imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; + + ttqf |= E1000_TTQF_QUEUE_ENABLE; + ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT); + ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK); + if (filter->filter_info.proto_mask == 0) + ttqf &= ~E1000_TTQF_MASK_ENABLE; + + /* tcp flags bits setting. */ + if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { + if (filter->filter_info.tcp_flags & TCP_URG_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_URG; + if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_ACK; + if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_PSH; + if (filter->filter_info.tcp_flags & TCP_RST_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_RST; + if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_SYN; + if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_FIN; + } else + imir_ext |= E1000_IMIREXT_CTRL_BP; + E1000_WRITE_REG(hw, E1000_IMIR(i), imir); + E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf); + E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); + return 0; +} + +/* + * igb_remove_2tuple_filter - remove a 2tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: ponter to the filter that will be removed. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +igb_remove_2tuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_2tuple_filter_info filter_2tuple; + struct e1000_2tuple_filter *filter; + int ret; + + memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info)); + ret = ntuple_filter_to_2tuple(ntuple_filter, + &filter_2tuple); + if (ret < 0) + return ret; + + filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list, + &filter_2tuple); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + + filter_info->twotuple_mask &= ~(1 << filter->index); + TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries); + rte_free(filter); + + E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK); + E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); + return 0; +} + +static inline struct e1000_flex_filter * +eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list, + struct e1000_flex_filter_info *key) +{ + struct e1000_flex_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct e1000_flex_filter_info)) == 0) + return it; + } + + return NULL; +} + +static int +eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, + struct rte_eth_flex_filter *filter, + bool add) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_flex_filter *flex_filter, *it; + uint32_t wufc, queueing, mask; + uint32_t reg_off; + uint8_t shift, i, j = 0; + + flex_filter = rte_zmalloc("e1000_flex_filter", + sizeof(struct e1000_flex_filter), 0); + if (flex_filter == NULL) + return -ENOMEM; + + flex_filter->filter_info.len = filter->len; + flex_filter->filter_info.priority = filter->priority; + memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len); + for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) { + mask = 0; + /* reverse bits in flex filter's mask*/ + for (shift = 0; shift < CHAR_BIT; shift++) { + if (filter->mask[i] & (0x01 << shift)) + mask |= (0x80 >> shift); + } + flex_filter->filter_info.mask[i] = mask; + } + + wufc = E1000_READ_REG(hw, E1000_WUFC); + if (flex_filter->index < E1000_MAX_FHFT) + reg_off = E1000_FHFT(flex_filter->index); + else + reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT); + + if (add) { + if (eth_igb_flex_filter_lookup(&filter_info->flex_list, + &flex_filter->filter_info) != NULL) { + PMD_DRV_LOG(ERR, "filter exists."); + rte_free(flex_filter); + return -EEXIST; + } + flex_filter->queue = filter->queue; + /* + * look for an unused flex filter index + * and insert the filter into the list. + */ + for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) { + if (!(filter_info->flex_mask & (1 << i))) { + filter_info->flex_mask |= 1 << i; + flex_filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->flex_list, + flex_filter, + entries); + break; + } + } + if (i >= E1000_MAX_FLEX_FILTERS) { + PMD_DRV_LOG(ERR, "flex filters are full."); + rte_free(flex_filter); + return -ENOSYS; + } + + E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ | + (E1000_WUFC_FLX0 << flex_filter->index)); + queueing = filter->len | + (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) | + (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT); + E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, + queueing); + for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) { + E1000_WRITE_REG(hw, reg_off, + flex_filter->filter_info.dwords[j]); + reg_off += sizeof(uint32_t); + E1000_WRITE_REG(hw, reg_off, + flex_filter->filter_info.dwords[++j]); + reg_off += sizeof(uint32_t); + E1000_WRITE_REG(hw, reg_off, + (uint32_t)flex_filter->filter_info.mask[i]); + reg_off += sizeof(uint32_t) * 2; + ++j; + } + } else { + it = eth_igb_flex_filter_lookup(&filter_info->flex_list, + &flex_filter->filter_info); + if (it == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + rte_free(flex_filter); + return -ENOENT; + } + + for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++) + E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0); + E1000_WRITE_REG(hw, E1000_WUFC, wufc & + (~(E1000_WUFC_FLX0 << it->index))); + + filter_info->flex_mask &= ~(1 << it->index); + TAILQ_REMOVE(&filter_info->flex_list, it, entries); + rte_free(it); + rte_free(flex_filter); + } + + return 0; +} + +static int +eth_igb_get_flex_filter(struct rte_eth_dev *dev, + struct rte_eth_flex_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_flex_filter flex_filter, *it; + uint32_t wufc, queueing, wufc_en = 0; + + memset(&flex_filter, 0, sizeof(struct e1000_flex_filter)); + flex_filter.filter_info.len = filter->len; + flex_filter.filter_info.priority = filter->priority; + memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len); + memcpy(flex_filter.filter_info.mask, filter->mask, + RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char)); + + it = eth_igb_flex_filter_lookup(&filter_info->flex_list, + &flex_filter.filter_info); + if (it == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + + wufc = E1000_READ_REG(hw, E1000_WUFC); + wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index); + + if ((wufc & wufc_en) == wufc_en) { + uint32_t reg_off = 0; + if (it->index < E1000_MAX_FHFT) + reg_off = E1000_FHFT(it->index); + else + reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT); + + queueing = E1000_READ_REG(hw, + reg_off + E1000_FHFT_QUEUEING_OFFSET); + filter->len = queueing & E1000_FHFT_QUEUEING_LEN; + filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >> + E1000_FHFT_QUEUEING_PRIO_SHIFT; + filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >> + E1000_FHFT_QUEUEING_QUEUE_SHIFT; + return 0; + } + return -ENOENT; +} + +static int +eth_igb_flex_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_flex_filter *filter; + int ret = 0; + + MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return ret; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", + filter_op); + return -EINVAL; + } + + filter = (struct rte_eth_flex_filter *)arg; + if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN + || filter->len % sizeof(uint64_t) != 0) { + PMD_DRV_LOG(ERR, "filter's length is out of range"); + return -EINVAL; + } + if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) { + PMD_DRV_LOG(ERR, "filter's priority is out of range"); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = eth_igb_add_del_flex_filter(dev, filter, TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = eth_igb_add_del_flex_filter(dev, filter, FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = eth_igb_get_flex_filter(dev, filter); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); + ret = -EINVAL; + break; + } + + return ret; +} + +/* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/ +static inline int +ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter, + struct e1000_5tuple_filter_info *filter_info) +{ + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) + return -EINVAL; + if (filter->priority > E1000_2TUPLE_MAX_PRI) + return -EINVAL; /* filter index is out of range. */ + if (filter->tcp_flags > TCP_FLAG_ALL) + return -EINVAL; /* flags is invalid. */ + + switch (filter->dst_ip_mask) { + case UINT32_MAX: + filter_info->dst_ip_mask = 0; + filter_info->dst_ip = filter->dst_ip; + break; + case 0: + filter_info->dst_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_ip mask."); + return -EINVAL; + } + + switch (filter->src_ip_mask) { + case UINT32_MAX: + filter_info->src_ip_mask = 0; + filter_info->src_ip = filter->src_ip; + break; + case 0: + filter_info->src_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_ip mask."); + return -EINVAL; + } + + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->src_port_mask) { + case UINT16_MAX: + filter_info->src_port_mask = 0; + filter_info->src_port = filter->src_port; + break; + case 0: + filter_info->src_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_port mask."); + return -EINVAL; + } + + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = filter->proto; + break; + case 0: + filter_info->proto_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } + + filter_info->priority = (uint8_t)filter->priority; + if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) + filter_info->tcp_flags = filter->tcp_flags; + else + filter_info->tcp_flags = 0; + + return 0; +} + +static inline struct e1000_5tuple_filter * +igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list, + struct e1000_5tuple_filter_info *key) +{ + struct e1000_5tuple_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct e1000_5tuple_filter_info)) == 0) { + return it; + } + } + return NULL; +} + +/* + * igb_add_5tuple_filter_82576 - add a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: ponter to the filter that will be added. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_5tuple_filter *filter; + uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK; + uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP; + uint8_t i; + int ret; + + filter = rte_zmalloc("e1000_5tuple_filter", + sizeof(struct e1000_5tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; + + ret = ntuple_filter_to_5tuple_82576(ntuple_filter, + &filter->filter_info); + if (ret < 0) { + rte_free(filter); + return ret; + } + + if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, + &filter->filter_info) != NULL) { + PMD_DRV_LOG(ERR, "filter exists."); + rte_free(filter); + return -EEXIST; + } + filter->queue = ntuple_filter->queue; + + /* + * look for an unused 5tuple filter index, + * and insert the filter to list. + */ + for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) { + if (!(filter_info->fivetuple_mask & (1 << i))) { + filter_info->fivetuple_mask |= 1 << i; + filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, + filter, + entries); + break; + } + } + if (i >= E1000_MAX_FTQF_FILTERS) { + PMD_DRV_LOG(ERR, "5tuple filters are full."); + rte_free(filter); + return -ENOSYS; + } + + ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK; + if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */ + ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP; + if (filter->filter_info.dst_ip_mask == 0) + ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP; + if (filter->filter_info.src_port_mask == 0) + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + if (filter->filter_info.proto_mask == 0) + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; + ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) & + E1000_FTQF_QUEUE_MASK; + ftqf |= E1000_FTQF_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf); + E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip); + E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip); + + spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT; + E1000_WRITE_REG(hw, E1000_SPQF(i), spqf); + + imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); + if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ + imir |= E1000_IMIR_PORT_BP; + else + imir &= ~E1000_IMIR_PORT_BP; + imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; + + /* tcp flags bits setting. */ + if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { + if (filter->filter_info.tcp_flags & TCP_URG_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_URG; + if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_ACK; + if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_PSH; + if (filter->filter_info.tcp_flags & TCP_RST_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_RST; + if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_SYN; + if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_FIN; + } else + imir_ext |= E1000_IMIREXT_CTRL_BP; + E1000_WRITE_REG(hw, E1000_IMIR(i), imir); + E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); + return 0; +} + +/* + * igb_remove_5tuple_filter_82576 - remove a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: ponter to the filter that will be removed. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_5tuple_filter_info filter_5tuple; + struct e1000_5tuple_filter *filter; + int ret; + + memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple_82576(ntuple_filter, + &filter_5tuple); + if (ret < 0) + return ret; + + filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + + filter_info->fivetuple_mask &= ~(1 << filter->index); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); + rte_free(filter); + + E1000_WRITE_REG(hw, E1000_FTQF(filter->index), + E1000_FTQF_VF_BP | E1000_FTQF_MASK); + E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0); + E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0); + E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); + return 0; +} + +static int +eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + uint32_t rctl; + struct e1000_hw *hw; + struct rte_eth_dev_info dev_info; + uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + + VLAN_TAG_SIZE); + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + +#ifdef RTE_LIBRTE_82571_SUPPORT + /* XXX: not bigger than max_rx_pktlen */ + if (hw->mac.type == e1000_82571) + return -ENOTSUP; +#endif + eth_igb_infos_get(dev, &dev_info); + + /* check that mtu is within the allowed range */ + if ((mtu < ETHER_MIN_MTU) || + (frame_size > dev_info.max_rx_pktlen)) + return -EINVAL; + + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. */ + if (!dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + return -EINVAL; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* switch to jumbo mode if needed */ + if (frame_size > ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.jumbo_frame = 1; + rctl |= E1000_RCTL_LPE; + } else { + dev->data->dev_conf.rxmode.jumbo_frame = 0; + rctl &= ~E1000_RCTL_LPE; + } + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + E1000_WRITE_REG(hw, E1000_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + + return 0; +} + +/* + * igb_add_del_ntuple_filter - add or delete a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * add: if true, add filter, if false, remove filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +igb_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter, + bool add) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + switch (ntuple_filter->flags) { + case RTE_5TUPLE_FLAGS: + case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + if (hw->mac.type != e1000_82576) + return -ENOTSUP; + if (add) + ret = igb_add_5tuple_filter_82576(dev, + ntuple_filter); + else + ret = igb_remove_5tuple_filter_82576(dev, + ntuple_filter); + break; + case RTE_2TUPLE_FLAGS: + case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350) + return -ENOTSUP; + if (add) + ret = igb_add_2tuple_filter(dev, ntuple_filter); + else + ret = igb_remove_2tuple_filter(dev, ntuple_filter); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +/* + * igb_get_ntuple_filter - get a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +igb_get_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_5tuple_filter_info filter_5tuple; + struct e1000_2tuple_filter_info filter_2tuple; + struct e1000_5tuple_filter *p_5tuple_filter; + struct e1000_2tuple_filter *p_2tuple_filter; + int ret; + + switch (ntuple_filter->flags) { + case RTE_5TUPLE_FLAGS: + case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + if (hw->mac.type != e1000_82576) + return -ENOTSUP; + memset(&filter_5tuple, + 0, + sizeof(struct e1000_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple_82576(ntuple_filter, + &filter_5tuple); + if (ret < 0) + return ret; + p_5tuple_filter = igb_5tuple_filter_lookup_82576( + &filter_info->fivetuple_list, + &filter_5tuple); + if (p_5tuple_filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + ntuple_filter->queue = p_5tuple_filter->queue; + break; + case RTE_2TUPLE_FLAGS: + case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350) + return -ENOTSUP; + memset(&filter_2tuple, + 0, + sizeof(struct e1000_2tuple_filter_info)); + ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple); + if (ret < 0) + return ret; + p_2tuple_filter = igb_2tuple_filter_lookup( + &filter_info->twotuple_list, + &filter_2tuple); + if (p_2tuple_filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + ntuple_filter->queue = p_2tuple_filter->queue; + break; + default: + ret = -EINVAL; + break; + } + + return 0; +} + +/* + * igb_ntuple_filter_handle - Handle operations for ntuple filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +static int +igb_ntuple_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = igb_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = igb_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = igb_get_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static inline int +igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info, + uint16_t ethertype) +{ + int i; + + for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_filters[i] == ethertype && + (filter_info->ethertype_mask & (1 << i))) + return i; + } + return -1; +} + +static inline int +igb_ethertype_filter_insert(struct e1000_filter_info *filter_info, + uint16_t ethertype) +{ + int i; + + for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { + if (!(filter_info->ethertype_mask & (1 << i))) { + filter_info->ethertype_mask |= 1 << i; + filter_info->ethertype_filters[i] = ethertype; + return i; + } + } + return -1; +} + +static inline int +igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, + uint8_t idx) +{ + if (idx >= E1000_MAX_ETQF_FILTERS) + return -1; + filter_info->ethertype_mask &= ~(1 << idx); + filter_info->ethertype_filters[idx] = 0; + return idx; +} + + +static int +igb_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf = 0; + int ret; + + if (filter->ether_type == ETHER_TYPE_IPv4 || + filter->ether_type == ETHER_TYPE_IPv6) { + PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" + " ethertype filter.", filter->ether_type); + return -EINVAL; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + PMD_DRV_LOG(ERR, "mac compare is unsupported."); + return -EINVAL; + } + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + PMD_DRV_LOG(ERR, "drop option is unsupported."); + return -EINVAL; + } + + ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret >= 0 && add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", + filter->ether_type); + return -EEXIST; + } + if (ret < 0 && !add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + if (add) { + ret = igb_ethertype_filter_insert(filter_info, + filter->ether_type); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype filters are full."); + return -ENOSYS; + } + + etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE; + etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE); + etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT; + } else { + ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret); + if (ret < 0) + return -ENOSYS; + } + E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf); + E1000_WRITE_FLUSH(hw); + + return 0; +} + +static int +igb_get_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf; + int ret; + + ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + etqf = E1000_READ_REG(hw, E1000_ETQF(ret)); + if (etqf & E1000_ETQF_FILTER_ENABLE) { + filter->ether_type = etqf & E1000_ETQF_ETHERTYPE; + filter->flags = 0; + filter->queue = (etqf & E1000_ETQF_QUEUE) >> + E1000_ETQF_QUEUE_SHIFT; + return 0; + } + + return -ENOENT; +} + +/* + * igb_ethertype_filter_handle - Handle operations for ethertype filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +static int +igb_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = igb_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = igb_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = igb_get_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +eth_igb_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = -EINVAL; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ret = igb_ntuple_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = igb_ethertype_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_SYN: + ret = eth_igb_syn_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_FLEXIBLE: + ret = eth_igb_flex_filter_handle(dev, filter_op, arg); + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + break; + } + + return ret; +} + +static int +eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); + return 0; +} + +static uint64_t +igb_read_systime_cyclecounter(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t systime_cycles; + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + /* + * Need to read System Time Residue Register to be able + * to read the other two registers. + */ + E1000_READ_REG(hw, E1000_SYSTIMR); + /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ + systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); + systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH) + * NSEC_PER_SEC; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + /* + * Need to read System Time Residue Register to be able + * to read the other two registers. + */ + E1000_READ_REG(hw, E1000_SYSTIMR); + systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); + /* Only the 8 LSB are valid. */ + systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH) + & 0xff) << 32; + break; + default: + systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); + systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH) + << 32; + break; + } + + return systime_cycles; +} + +static uint64_t +igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_tstamp_cycles; + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + /* RXSTMPL stores ns and RXSTMPH stores seconds. */ + rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); + rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH) + * NSEC_PER_SEC; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); + /* Only the 8 LSB are valid. */ + rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH) + & 0xff) << 32; + break; + default: + rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); + rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH) + << 32; + break; + } + + return rx_tstamp_cycles; +} + +static uint64_t +igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t tx_tstamp_cycles; + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + /* RXSTMPL stores ns and RXSTMPH stores seconds. */ + tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); + tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH) + * NSEC_PER_SEC; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); + /* Only the 8 LSB are valid. */ + tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH) + & 0xff) << 32; + break; + default: + tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); + tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH) + << 32; + break; + } + + return tx_tstamp_cycles; +} + +static void +igb_start_timecounters(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_adapter *adapter = + (struct e1000_adapter *)dev->data->dev_private; + uint32_t incval = 1; + uint32_t shift = 0; + uint64_t mask = E1000_CYCLECOUNTER_MASK; + + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + /* 32 LSB bits + 8 MSB bits = 40 bits */ + mask = (1ULL << 40) - 1; + /* fall-through */ + case e1000_i210: + case e1000_i211: + /* + * Start incrementing the register + * used to timestamp PTP packets. + */ + E1000_WRITE_REG(hw, E1000_TIMINCA, incval); + break; + case e1000_82576: + incval = E1000_INCVALUE_82576; + shift = IGB_82576_TSYNC_SHIFT; + E1000_WRITE_REG(hw, E1000_TIMINCA, + E1000_INCPERIOD_82576 | incval); + break; + default: + /* Not supported */ + return; + } + + memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + + adapter->systime_tc.cc_mask = mask; + adapter->systime_tc.cc_shift = shift; + adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; + + adapter->rx_tstamp_tc.cc_mask = mask; + adapter->rx_tstamp_tc.cc_shift = shift; + adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; + + adapter->tx_tstamp_tc.cc_mask = mask; + adapter->tx_tstamp_tc.cc_shift = shift; + adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; +} + +static int +igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) +{ + struct e1000_adapter *adapter = + (struct e1000_adapter *)dev->data->dev_private; + + adapter->systime_tc.nsec += delta; + adapter->rx_tstamp_tc.nsec += delta; + adapter->tx_tstamp_tc.nsec += delta; + + return 0; +} + +static int +igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) +{ + uint64_t ns; + struct e1000_adapter *adapter = + (struct e1000_adapter *)dev->data->dev_private; + + ns = rte_timespec_to_ns(ts); + + /* Set the timecounters to a new value. */ + adapter->systime_tc.nsec = ns; + adapter->rx_tstamp_tc.nsec = ns; + adapter->tx_tstamp_tc.nsec = ns; + + return 0; +} + +static int +igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +{ + uint64_t ns, systime_cycles; + struct e1000_adapter *adapter = + (struct e1000_adapter *)dev->data->dev_private; + + systime_cycles = igb_read_systime_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); + *ts = rte_ns_to_timespec(ns); + + return 0; +} + +static int +igb_timesync_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl; + uint32_t tsauxc; + + /* Stop the timesync system time. */ + E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0); + /* Reset the timesync system time value. */ + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0); + /* fall-through */ + case e1000_82576: + E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0); + E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0); + break; + default: + /* Not supported. */ + return -ENOTSUP; + } + + /* Enable system time for it isn't on by default. */ + tsauxc = E1000_READ_REG(hw, E1000_TSAUXC); + tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME; + E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc); + + igb_start_timecounters(dev); + + /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ + E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), + (ETHER_TYPE_1588 | + E1000_ETQF_FILTER_ENABLE | + E1000_ETQF_1588)); + + /* Enable timestamping of received PTP packets. */ + tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); + tsync_ctl |= E1000_TSYNCRXCTL_ENABLED; + E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl); + + /* Enable Timestamping of transmitted PTP packets. */ + tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); + tsync_ctl |= E1000_TSYNCTXCTL_ENABLED; + E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl); + + return 0; +} + +static int +igb_timesync_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl; + + /* Disable timestamping of transmitted PTP packets. */ + tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); + tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED; + E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl); + + /* Disable timestamping of received PTP packets. */ + tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); + tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED; + E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl); + + /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ + E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0); + + /* Stop incrementating the System Time registers. */ + E1000_WRITE_REG(hw, E1000_TIMINCA, 0); + + return 0; +} + +static int +igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags __rte_unused) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_adapter *adapter = + (struct e1000_adapter *)dev->data->dev_private; + uint32_t tsync_rxctl; + uint64_t rx_tstamp_cycles; + uint64_t ns; + + tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); + if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0) + return -EINVAL; + + rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_adapter *adapter = + (struct e1000_adapter *)dev->data->dev_private; + uint32_t tsync_txctl; + uint64_t tx_tstamp_cycles; + uint64_t ns; + + tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); + if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0) + return -EINVAL; + + tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused) +{ + int count = 0; + int g_ind = 0; + const struct reg_info *reg_group; + + while ((reg_group = igb_regs[g_ind++])) + count += igb_reg_group_count(reg_group); + + return count; +} + +static int +igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused) +{ + int count = 0; + int g_ind = 0; + const struct reg_info *reg_group; + + while ((reg_group = igbvf_regs[g_ind++])) + count += igb_reg_group_count(reg_group); + + return count; +} + +static int +eth_igb_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *data = regs->data; + int g_ind = 0; + int count = 0; + const struct reg_info *reg_group; + + /* Support only full register dump */ + if ((regs->length == 0) || + (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) { + regs->version = hw->mac.type << 24 | hw->revision_id << 16 | + hw->device_id; + while ((reg_group = igb_regs[g_ind++])) + count += igb_read_regs_group(dev, &data[count], + reg_group); + return 0; + } + + return -ENOTSUP; +} + +static int +igbvf_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *data = regs->data; + int g_ind = 0; + int count = 0; + const struct reg_info *reg_group; + + /* Support only full register dump */ + if ((regs->length == 0) || + (regs->length == (uint32_t)igbvf_get_reg_length(dev))) { + regs->version = hw->mac.type << 24 | hw->revision_id << 16 | + hw->device_id; + while ((reg_group = igbvf_regs[g_ind++])) + count += igb_read_regs_group(dev, &data[count], + reg_group); + return 0; + } + + return -ENOTSUP; +} + +static int +eth_igb_get_eeprom_length(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Return unit is byte count */ + return hw->nvm.word_size * 2; +} + +static int +eth_igb_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_nvm_info *nvm = &hw->nvm; + uint16_t *data = in_eeprom->data; + int first, length; + + first = in_eeprom->offset >> 1; + length = in_eeprom->length >> 1; + if ((first >= hw->nvm.word_size) || + ((first + length) >= hw->nvm.word_size)) + return -EINVAL; + + in_eeprom->magic = hw->vendor_id | + ((uint32_t)hw->device_id << 16); + + if ((nvm->ops.read) == NULL) + return -ENOTSUP; + + return nvm->ops.read(hw, first, length, data); +} + +static int +eth_igb_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_nvm_info *nvm = &hw->nvm; + uint16_t *data = in_eeprom->data; + int first, length; + + first = in_eeprom->offset >> 1; + length = in_eeprom->length >> 1; + if ((first >= hw->nvm.word_size) || + ((first + length) >= hw->nvm.word_size)) + return -EINVAL; + + in_eeprom->magic = (uint32_t)hw->vendor_id | + ((uint32_t)hw->device_id << 16); + + if ((nvm->ops.write) == NULL) + return -ENOTSUP; + return nvm->ops.write(hw, first, length, data); +} + +static struct rte_driver pmd_igb_drv = { + .type = PMD_PDEV, + .init = rte_igb_pmd_init, +}; + +static struct rte_driver pmd_igbvf_drv = { + .type = PMD_PDEV, + .init = rte_igbvf_pmd_init, +}; + +static int +eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t mask = 1 << queue_id; + + E1000_WRITE_REG(hw, E1000_EIMC, mask); + E1000_WRITE_FLUSH(hw); + + return 0; +} + +static int +eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t mask = 1 << queue_id; + uint32_t regval; + + regval = E1000_READ_REG(hw, E1000_EIMS); + E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); + E1000_WRITE_FLUSH(hw); + + rte_intr_enable(&dev->pci_dev->intr_handle); + + return 0; +} + +static void +eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, + uint8_t index, uint8_t offset) +{ + uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); + + /* clear bits */ + val &= ~((uint32_t)0xFF << offset); + + /* write vector and valid bit */ + val |= (msix_vector | E1000_IVAR_VALID) << offset; + + E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val); +} + +static void +eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector) +{ + uint32_t tmp = 0; + + if (hw->mac.type == e1000_82575) { + if (direction == 0) + tmp = E1000_EICR_RX_QUEUE0 << queue; + else if (direction == 1) + tmp = E1000_EICR_TX_QUEUE0 << queue; + E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp); + } else if (hw->mac.type == e1000_82576) { + if ((direction == 0) || (direction == 1)) + eth_igb_write_ivar(hw, msix_vector, queue & 0x7, + ((queue & 0x8) << 1) + + 8 * direction); + } else if ((hw->mac.type == e1000_82580) || + (hw->mac.type == e1000_i350) || + (hw->mac.type == e1000_i354) || + (hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { + if ((direction == 0) || (direction == 1)) + eth_igb_write_ivar(hw, msix_vector, + queue >> 1, + ((queue & 0x1) << 4) + + 8 * direction); + } +} + +/* Sets up the hardware to generate MSI-X interrupts properly + * @hw + * board private structure + */ +static void +eth_igb_configure_msix_intr(struct rte_eth_dev *dev) +{ + int queue_id; + uint32_t tmpval, regval, intr_mask; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vec = E1000_MISC_VEC_ID; + uint32_t base = E1000_MISC_VEC_ID; + uint32_t misc_shift = 0; + + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + + /* won't configure msix register if no mapping is done + * between intr vector and event fd + */ + if (!rte_intr_dp_is_en(intr_handle)) + return; + + if (rte_intr_allow_others(intr_handle)) { + vec = base = E1000_RX_VEC_START; + misc_shift = 1; + } + + /* set interrupt vector for other causes */ + if (hw->mac.type == e1000_82575) { + tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* enable MSI-X PBA support */ + tmpval |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask interrupts upon ICR read */ + tmpval |= E1000_CTRL_EXT_EIAME; + tmpval |= E1000_CTRL_EXT_IRCA; + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval); + + /* enable msix_other interrupt */ + E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER); + regval = E1000_READ_REG(hw, E1000_EIAC); + E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER); + regval = E1000_READ_REG(hw, E1000_EIAM); + E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER); + } else if ((hw->mac.type == e1000_82576) || + (hw->mac.type == e1000_82580) || + (hw->mac.type == e1000_i350) || + (hw->mac.type == e1000_i354) || + (hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { + /* turn on MSI-X capability first */ + E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | + E1000_GPIE_PBA | E1000_GPIE_EIAME | + E1000_GPIE_NSICR); + intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << + misc_shift; + regval = E1000_READ_REG(hw, E1000_EIAC); + E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask); + + /* enable msix_other interrupt */ + regval = E1000_READ_REG(hw, E1000_EIMS); + E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask); + tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8; + E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval); + } + + /* use EIAM to auto-mask when MSI-X interrupt + * is asserted, this saves a register write for every interrupt + */ + intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << + misc_shift; + regval = E1000_READ_REG(hw, E1000_EIAM); + E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask); + + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) { + eth_igb_assign_msix_vector(hw, 0, queue_id, vec); + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + + E1000_WRITE_FLUSH(hw); +} + +PMD_REGISTER_DRIVER(pmd_igb_drv); +PMD_REGISTER_DRIVER(pmd_igbvf_drv); diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c new file mode 100644 index 00000000..5845bc22 --- /dev/null +++ b/drivers/net/e1000/igb_pf.c @@ -0,0 +1,534 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <stdlib.h> +#include <unistd.h> +#include <stdarg.h> +#include <inttypes.h> + +#include <rte_interrupts.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_eal.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_memcpy.h> +#include <rte_malloc.h> +#include <rte_random.h> + +#include "base/e1000_defines.h" +#include "base/e1000_regs.h" +#include "base/e1000_hw.h" +#include "e1000_ethdev.h" + +static inline uint16_t +dev_num_vf(struct rte_eth_dev *eth_dev) +{ + return eth_dev->pci_dev->max_vfs; +} + +static inline +int igb_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num) +{ + unsigned char vf_mac_addr[ETHER_ADDR_LEN]; + struct e1000_vf_info *vfinfo = + *E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + uint16_t vfn; + + for (vfn = 0; vfn < vf_num; vfn++) { + eth_random_addr(vf_mac_addr); + /* keep the random address as default */ + memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr, + ETHER_ADDR_LEN); + } + + return 0; +} + +static inline int +igb_mb_intr_setup(struct rte_eth_dev *dev) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + intr->mask |= E1000_ICR_VMMB; + + return 0; +} + +void igb_pf_host_init(struct rte_eth_dev *eth_dev) +{ + struct e1000_vf_info **vfinfo = + E1000_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + uint16_t vf_num; + uint8_t nb_queue; + + RTE_ETH_DEV_SRIOV(eth_dev).active = 0; + if (0 == (vf_num = dev_num_vf(eth_dev))) + return; + + if (hw->mac.type == e1000_i350) + nb_queue = 1; + else if(hw->mac.type == e1000_82576) + /* per datasheet, it should be 2, but 1 seems correct */ + nb_queue = 1; + else + return; + + *vfinfo = rte_zmalloc("vf_info", sizeof(struct e1000_vf_info) * vf_num, 0); + if (*vfinfo == NULL) + rte_panic("Cannot allocate memory for private VF data\n"); + + RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS; + RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue; + RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num; + RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue); + + igb_vf_perm_addr_gen(eth_dev, vf_num); + + /* set mb interrupt mask */ + igb_mb_intr_setup(eth_dev); + + return; +} + +void igb_pf_host_uninit(struct rte_eth_dev *dev) +{ + struct e1000_vf_info **vfinfo; + uint16_t vf_num; + + PMD_INIT_FUNC_TRACE(); + + vfinfo = E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + + RTE_ETH_DEV_SRIOV(dev).active = 0; + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 0; + RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx = 0; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 0; + + vf_num = dev_num_vf(dev); + if (vf_num == 0) + return; + + rte_free(*vfinfo); + *vfinfo = NULL; +} + +#define E1000_RAH_POOLSEL_SHIFT (18) +int igb_pf_host_configure(struct rte_eth_dev *eth_dev) +{ + uint32_t vtctl; + uint16_t vf_num; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + uint32_t vlanctrl; + int i; + uint32_t rah; + + if (0 == (vf_num = dev_num_vf(eth_dev))) + return -1; + + /* enable VMDq and set the default pool for PF */ + vtctl = E1000_READ_REG(hw, E1000_VT_CTL); + vtctl &= ~E1000_VT_CTL_DEFAULT_POOL_MASK; + vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx + << E1000_VT_CTL_DEFAULT_POOL_SHIFT; + vtctl |= E1000_VT_CTL_VM_REPL_EN; + E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl); + + /* Enable pools reserved to PF only */ + E1000_WRITE_REG(hw, E1000_VFRE, (~0U) << vf_num); + E1000_WRITE_REG(hw, E1000_VFTE, (~0U) << vf_num); + + /* PFDMA Tx General Switch Control Enables VMDQ loopback */ + if (hw->mac.type == e1000_i350) + E1000_WRITE_REG(hw, E1000_TXSWC, E1000_DTXSWC_VMDQ_LOOPBACK_EN); + else + E1000_WRITE_REG(hw, E1000_DTXSWC, E1000_DTXSWC_VMDQ_LOOPBACK_EN); + + /* clear VMDq map to perment rar 0 */ + rah = E1000_READ_REG(hw, E1000_RAH(0)); + rah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT); + E1000_WRITE_REG(hw, E1000_RAH(0), rah); + + /* clear VMDq map to scan rar 32 */ + rah = E1000_READ_REG(hw, E1000_RAH(hw->mac.rar_entry_count)); + rah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT); + E1000_WRITE_REG(hw, E1000_RAH(hw->mac.rar_entry_count), rah); + + /* set VMDq map to default PF pool */ + rah = E1000_READ_REG(hw, E1000_RAH(0)); + rah |= (0x1 << (RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx + + E1000_RAH_POOLSEL_SHIFT)); + E1000_WRITE_REG(hw, E1000_RAH(0), rah); + + /* + * enable vlan filtering and allow all vlan tags through + */ + vlanctrl = E1000_READ_REG(hw, E1000_RCTL); + vlanctrl |= E1000_RCTL_VFE ; /* enable vlan filters */ + E1000_WRITE_REG(hw, E1000_RCTL, vlanctrl); + + /* VFTA - enable all vlan filters */ + for (i = 0; i < IGB_VFTA_SIZE; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, 0xFFFFFFFF); + } + + /* Enable/Disable MAC Anti-Spoofing */ + e1000_vmdq_set_anti_spoofing_pf(hw, FALSE, vf_num); + + return 0; +} + +static void +set_rx_mode(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *dev_data = dev->data; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fctrl, vmolr = E1000_VMOLR_BAM | E1000_VMOLR_AUPE; + uint16_t vfn = dev_num_vf(dev); + + /* Check for Promiscuous and All Multicast modes */ + fctrl = E1000_READ_REG(hw, E1000_RCTL); + + /* set all bits that we expect to always be set */ + fctrl &= ~E1000_RCTL_SBP; /* disable store-bad-packets */ + fctrl |= E1000_RCTL_BAM; + + /* clear the bits we are changing the status of */ + fctrl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + + if (dev_data->promiscuous) { + fctrl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); + } else { + if (dev_data->all_multicast) { + fctrl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else { + vmolr |= E1000_VMOLR_ROMPE; + } + } + + if ((hw->mac.type == e1000_82576) || + (hw->mac.type == e1000_i350)) { + vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) & + ~(E1000_VMOLR_MPME | E1000_VMOLR_ROMPE | + E1000_VMOLR_ROPE); + E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr); + } + + E1000_WRITE_REG(hw, E1000_RCTL, fctrl); +} + +static inline void +igb_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vf_info *vfinfo = + *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + uint32_t vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf)); + + vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | + E1000_VMOLR_BAM | E1000_VMOLR_AUPE); + E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr); + + E1000_WRITE_REG(hw, E1000_VMVIR(vf), 0); + + /* reset multicast table array for vf */ + vfinfo[vf].num_vf_mc_hashes = 0; + + /* reset rx mode */ + set_rx_mode(dev); +} + +static inline void +igb_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* enable transmit and receive for vf */ + reg = E1000_READ_REG(hw, E1000_VFTE); + reg |= (reg | (1 << vf)); + E1000_WRITE_REG(hw, E1000_VFTE, reg); + + reg = E1000_READ_REG(hw, E1000_VFRE); + reg |= (reg | (1 << vf)); + E1000_WRITE_REG(hw, E1000_VFRE, reg); + + igb_vf_reset_event(dev, vf); +} + +static int +igb_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vf_info *vfinfo = + *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses; + int rar_entry = hw->mac.rar_entry_count - (vf + 1); + uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); + uint32_t rah; + + igb_vf_reset_msg(dev, vf); + + hw->mac.ops.rar_set(hw, vf_mac, rar_entry); + rah = E1000_READ_REG(hw, E1000_RAH(rar_entry)); + rah |= (0x1 << (vf + E1000_RAH_POOLSEL_SHIFT)); + E1000_WRITE_REG(hw, E1000_RAH(rar_entry), rah); + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; + rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN); + e1000_write_mbx(hw, msgbuf, 3, vf); + + return 0; +} + +static int +igb_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vf_info *vfinfo = + *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + int rar_entry = hw->mac.rar_entry_count - (vf + 1); + uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); + + if (is_unicast_ether_addr((struct ether_addr *)new_mac)) { + if (!is_zero_ether_addr((struct ether_addr *)new_mac)) + rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, + sizeof(vfinfo[vf].vf_mac_addresses)); + hw->mac.ops.rar_set(hw, new_mac, rar_entry); + return 0; + } + return -1; +} + +static int +igb_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf) +{ + int i; + uint32_t vector_bit; + uint32_t vector_reg; + uint32_t mta_reg; + int entries = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> + E1000_VT_MSGINFO_SHIFT; + uint16_t *hash_list = (uint16_t *)&msgbuf[1]; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vf_info *vfinfo = + *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + + /* only so many hash values supported */ + entries = RTE_MIN(entries, E1000_MAX_VF_MC_ENTRIES); + + /* + * salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = (uint16_t)entries; + + /* + * VFs are limited to using the MTA hash table for their multicast + * addresses + */ + for (i = 0; i < entries; i++) { + vfinfo->vf_mc_hashes[i] = hash_list[i]; + } + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; + mta_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, vector_reg); + mta_reg |= (1 << vector_bit); + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, vector_reg, mta_reg); + } + + return 0; +} + +static int +igb_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + int add, vid; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vf_info *vfinfo = + *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + uint32_t vid_idx, vid_bit, vfta; + + add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) + >> E1000_VT_MSGINFO_SHIFT; + vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + + if (add) + vfinfo[vf].vlan_count++; + else if (vfinfo[vf].vlan_count) + vfinfo[vf].vlan_count--; + + vid_idx = (uint32_t)((vid >> E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK); + vid_bit = (uint32_t)(1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); + if (add) + vfta |= vid_bit; + else + vfta &= ~vid_bit; + + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); + E1000_WRITE_FLUSH(hw); + + return 0; +} + +static int +igb_vf_set_rlpml(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t rlpml = msgbuf[1] & E1000_VMOLR_RLPML_MASK; + uint32_t max_frame = rlpml + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t vmolr; + + if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) + return -1; + + vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf)); + + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= rlpml; + + /* Enable Long Packet support */ + vmolr |= E1000_VMOLR_LPE; + + E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr); + E1000_WRITE_FLUSH(hw); + + return 0; +} + +static int +igb_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) +{ + uint16_t mbx_size = E1000_VFMAILBOX_SIZE; + uint32_t msgbuf[E1000_VFMAILBOX_SIZE]; + int32_t retval; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + retval = e1000_read_mbx(hw, msgbuf, mbx_size, vf); + if (retval) { + PMD_INIT_LOG(ERR, "Error mbx recv msg from VF %d", vf); + return retval; + } + + /* do nothing with the message already processed */ + if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) + return retval; + + /* flush the ack before we write any messages back */ + E1000_WRITE_FLUSH(hw); + + /* perform VF reset */ + if (msgbuf[0] == E1000_VF_RESET) { + return igb_vf_reset(dev, vf, msgbuf); + } + + /* check & process VF to PF mailbox message */ + switch ((msgbuf[0] & 0xFFFF)) { + case E1000_VF_SET_MAC_ADDR: + retval = igb_vf_set_mac_addr(dev, vf, msgbuf); + break; + case E1000_VF_SET_MULTICAST: + retval = igb_vf_set_multicast(dev, vf, msgbuf); + break; + case E1000_VF_SET_LPE: + retval = igb_vf_set_rlpml(dev, vf, msgbuf); + break; + case E1000_VF_SET_VLAN: + retval = igb_vf_set_vlan(dev, vf, msgbuf); + break; + default: + PMD_INIT_LOG(DEBUG, "Unhandled Msg %8.8x", + (unsigned) msgbuf[0]); + retval = E1000_ERR_MBX; + break; + } + + /* response the VF according to the message process result */ + if (retval) + msgbuf[0] |= E1000_VT_MSGTYPE_NACK; + else + msgbuf[0] |= E1000_VT_MSGTYPE_ACK; + + msgbuf[0] |= E1000_VT_MSGTYPE_CTS; + + e1000_write_mbx(hw, msgbuf, 1, vf); + + return retval; +} + +static inline void +igb_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf) +{ + uint32_t msg = E1000_VT_MSGTYPE_NACK; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + e1000_write_mbx(hw, &msg, 1, vf); +} + +void igb_pf_mbx_process(struct rte_eth_dev *eth_dev) +{ + uint16_t vf; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + for (vf = 0; vf < dev_num_vf(eth_dev); vf++) { + /* check & process vf function level reset */ + if (!e1000_check_for_rst(hw, vf)) + igb_vf_reset_event(eth_dev, vf); + + /* check & process vf mailbox messages */ + if (!e1000_check_for_msg(hw, vf)) + igb_rcv_msg_from_vf(eth_dev, vf); + + /* check & process acks from vf */ + if (!e1000_check_for_ack(hw, vf)) + igb_rcv_ack_from_vf(eth_dev, vf); + } +} diff --git a/drivers/net/e1000/igb_regs.h b/drivers/net/e1000/igb_regs.h new file mode 100644 index 00000000..0b5e5e58 --- /dev/null +++ b/drivers/net/e1000/igb_regs.h @@ -0,0 +1,223 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _IGB_REGS_H_ +#define _IGB_REGS_H_ + +#include "e1000_ethdev.h" + +struct reg_info { + uint32_t base_addr; + uint32_t count; + uint32_t stride; + const char *name; +}; + +static const struct reg_info igb_regs_general[] = { + {E1000_CTRL, 1, 1, "E1000_CTRL"}, + {E1000_STATUS, 1, 1, "E1000_STATUS"}, + {E1000_CTRL_EXT, 1, 1, "E1000_CTRL_EXT"}, + {E1000_MDIC, 1, 1, "E1000_MDIC"}, + {E1000_SCTL, 1, 1, "E1000_SCTL"}, + {E1000_CONNSW, 1, 1, "E1000_CONNSW"}, + {E1000_VET, 1, 1, "E1000_VET"}, + {E1000_LEDCTL, 1, 1, "E1000_LEDCTL"}, + {E1000_PBA, 1, 1, "E1000_PBA"}, + {E1000_PBS, 1, 1, "E1000_PBS"}, + {E1000_FRTIMER, 1, 1, "E1000_FRTIMER"}, + {E1000_TCPTIMER, 1, 1, "E1000_TCPTIMER"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_nvm[] = { + {E1000_EECD, 1, 1, "E1000_EECD"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_interrupt[] = { + {E1000_EICS, 1, 1, "E1000_EICS"}, + {E1000_EIMS, 1, 1, "E1000_EIMS"}, + {E1000_EIMC, 1, 1, "E1000_EIMC"}, + {E1000_EIAC, 1, 1, "E1000_EIAC"}, + {E1000_EIAM, 1, 1, "E1000_EIAM"}, + {E1000_ICS, 1, 1, "E1000_ICS"}, + {E1000_IMS, 1, 1, "E1000_IMS"}, + {E1000_IMC, 1, 1, "E1000_IMC"}, + {E1000_IAC, 1, 1, "E1000_IAC"}, + {E1000_IAM, 1, 1, "E1000_IAM"}, + {E1000_IMIRVP, 1, 1, "E1000_IMIRVP"}, + {E1000_EITR(0), 10, 4, "E1000_EITR"}, + {E1000_IMIR(0), 8, 4, "E1000_IMIR"}, + {E1000_IMIREXT(0), 8, 4, "E1000_IMIREXT"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_fctl[] = { + {E1000_FCAL, 1, 1, "E1000_FCAL"}, + {E1000_FCAH, 1, 1, "E1000_FCAH"}, + {E1000_FCTTV, 1, 1, "E1000_FCTTV"}, + {E1000_FCRTL, 1, 1, "E1000_FCRTL"}, + {E1000_FCRTH, 1, 1, "E1000_FCRTH"}, + {E1000_FCRTV, 1, 1, "E1000_FCRTV"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_rxdma[] = { + {E1000_RDBAL(0), 4, 0x100, "E1000_RDBAL"}, + {E1000_RDBAH(0), 4, 0x100, "E1000_RDBAH"}, + {E1000_RDLEN(0), 4, 0x100, "E1000_RDLEN"}, + {E1000_RDH(0), 4, 0x100, "E1000_RDH"}, + {E1000_RDT(0), 4, 0x100, "E1000_RDT"}, + {E1000_RXCTL(0), 4, 0x100, "E1000_RXCTL"}, + {E1000_SRRCTL(0), 4, 0x100, "E1000_SRRCTL"}, + {E1000_DCA_RXCTRL(0), 4, 0x100, "E1000_DCA_RXCTRL"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_rx[] = { + {E1000_RCTL, 1, 1, "E1000_RCTL"}, + {E1000_RXCSUM, 1, 1, "E1000_RXCSUM"}, + {E1000_RLPML, 1, 1, "E1000_RLPML"}, + {E1000_RFCTL, 1, 1, "E1000_RFCTL"}, + {E1000_MRQC, 1, 1, "E1000_MRQC"}, + {E1000_VT_CTL, 1, 1, "E1000_VT_CTL"}, + {E1000_RAL(0), 16, 8, "E1000_RAL"}, + {E1000_RAH(0), 16, 8, "E1000_RAH"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_tx[] = { + {E1000_TCTL, 1, 1, "E1000_TCTL"}, + {E1000_TCTL_EXT, 1, 1, "E1000_TCTL_EXT"}, + {E1000_TIPG, 1, 1, "E1000_TIPG"}, + {E1000_DTXCTL, 1, 1, "E1000_DTXCTL"}, + {E1000_TDBAL(0), 4, 0x100, "E1000_TDBAL"}, + {E1000_TDBAH(0), 4, 0x100, "E1000_TDBAH"}, + {E1000_TDLEN(0), 4, 0x100, "E1000_TDLEN"}, + {E1000_TDH(0), 4, 0x100, "E1000_TDLEN"}, + {E1000_TDT(0), 4, 0x100, "E1000_TDT"}, + {E1000_TXDCTL(0), 4, 0x100, "E1000_TXDCTL"}, + {E1000_TDWBAL(0), 4, 0x100, "E1000_TDWBAL"}, + {E1000_TDWBAH(0), 4, 0x100, "E1000_TDWBAH"}, + {E1000_DCA_TXCTRL(0), 4, 0x100, "E1000_DCA_TXCTRL"}, + {E1000_TDFH, 1, 1, "E1000_TDFH"}, + {E1000_TDFT, 1, 1, "E1000_TDFT"}, + {E1000_TDFHS, 1, 1, "E1000_TDFHS"}, + {E1000_TDFPC, 1, 1, "E1000_TDFPC"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_wakeup[] = { + {E1000_WUC, 1, 1, "E1000_WUC"}, + {E1000_WUFC, 1, 1, "E1000_WUFC"}, + {E1000_WUS, 1, 1, "E1000_WUS"}, + {E1000_IPAV, 1, 1, "E1000_IPAV"}, + {E1000_WUPL, 1, 1, "E1000_WUPL"}, + {E1000_IP4AT_REG(0), 4, 8, "E1000_IP4AT_REG"}, + {E1000_IP6AT_REG(0), 4, 4, "E1000_IP6AT_REG"}, + {E1000_WUPM_REG(0), 4, 4, "E1000_WUPM_REG"}, + {E1000_FFMT_REG(0), 4, 8, "E1000_FFMT_REG"}, + {E1000_FFVT_REG(0), 4, 8, "E1000_FFVT_REG"}, + {E1000_FFLT_REG(0), 4, 8, "E1000_FFLT_REG"}, + {0, 0, 0, ""} +}; + +static const struct reg_info igb_regs_mac[] = { + {E1000_PCS_CFG0, 1, 1, "E1000_PCS_CFG0"}, + {E1000_PCS_LCTL, 1, 1, "E1000_PCS_LCTL"}, + {E1000_PCS_LSTAT, 1, 1, "E1000_PCS_LSTAT"}, + {E1000_PCS_ANADV, 1, 1, "E1000_PCS_ANADV"}, + {E1000_PCS_LPAB, 1, 1, "E1000_PCS_LPAB"}, + {E1000_PCS_NPTX, 1, 1, "E1000_PCS_NPTX"}, + {E1000_PCS_LPABNP, 1, 1, "E1000_PCS_LPABNP"}, + {0, 0, 0, ""} +}; + +static const struct reg_info *igb_regs[] = { + igb_regs_general, + igb_regs_nvm, + igb_regs_interrupt, + igb_regs_fctl, + igb_regs_rxdma, + igb_regs_rx, + igb_regs_tx, + igb_regs_wakeup, + igb_regs_mac, + NULL}; + +/* FIXME: reading igb_regs_interrupt results side-effect which doesn't + * work with VFIO; re-install igb_regs_interrupt once issue is resolved. + */ +static const struct reg_info *igbvf_regs[] = { + igb_regs_general, + igb_regs_rxdma, + igb_regs_tx, + NULL}; + +static inline int +igb_read_regs(struct e1000_hw *hw, const struct reg_info *reg, + uint32_t *reg_buf) +{ + unsigned int i; + + for (i = 0; i < reg->count; i++) { + reg_buf[i] = E1000_READ_REG(hw, + reg->base_addr + i * reg->stride); + } + return reg->count; +}; + +static inline int +igb_reg_group_count(const struct reg_info *regs) +{ + int count = 0; + int i = 0; + + while (regs[i].count) + count += regs[i++].count; + return count; +}; + +static inline int +igb_read_regs_group(struct rte_eth_dev *dev, uint32_t *reg_buf, + const struct reg_info *regs) +{ + int count = 0; + int i = 0; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + while (regs[i].count) + count += igb_read_regs(hw, ®s[i++], ®_buf[count]); + return count; +}; + +#endif /* _IGB_REGS_H_ */ diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c new file mode 100644 index 00000000..4a987e3c --- /dev/null +++ b/drivers/net/e1000/igb_rxtx.c @@ -0,0 +1,2526 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> +#include <stdint.h> +#include <stdarg.h> +#include <inttypes.h> + +#include <rte_interrupts.h> +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_pci.h> +#include <rte_memory.h> +#include <rte_memcpy.h> +#include <rte_memzone.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_ring.h> +#include <rte_mempool.h> +#include <rte_malloc.h> +#include <rte_mbuf.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_prefetch.h> +#include <rte_udp.h> +#include <rte_tcp.h> +#include <rte_sctp.h> +#include <rte_string_fns.h> + +#include "e1000_logs.h" +#include "base/e1000_api.h" +#include "e1000_ethdev.h" + +/* Bit Mask to indicate what bits required for building TX context */ +#define IGB_TX_OFFLOAD_MASK ( \ + PKT_TX_VLAN_PKT | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG) + +static inline struct rte_mbuf * +rte_rxmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + m = __rte_mbuf_raw_alloc(mp); + __rte_mbuf_sanity_check_raw(m, 0); + return m; +} + +/** + * Structure associated with each descriptor of the RX ring of a RX queue. + */ +struct igb_rx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ +}; + +/** + * Structure associated with each descriptor of the TX ring of a TX queue. + */ +struct igb_tx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ + uint16_t next_id; /**< Index of next descriptor in ring. */ + uint16_t last_id; /**< Index of last scattered descriptor. */ +}; + +/** + * Structure associated with each RX queue. + */ +struct igb_rx_queue { + struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ + volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */ + uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ + volatile uint32_t *rdt_reg_addr; /**< RDT register address. */ + volatile uint32_t *rdh_reg_addr; /**< RDH register address. */ + struct igb_rx_entry *sw_ring; /**< address of RX software ring. */ + struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint16_t nb_rx_desc; /**< number of RX descriptors. */ + uint16_t rx_tail; /**< current value of RDT register. */ + uint16_t nb_rx_hold; /**< number of held free RX desc. */ + uint16_t rx_free_thresh; /**< max free RX desc to hold. */ + uint16_t queue_id; /**< RX queue index. */ + uint16_t reg_idx; /**< RX queue register index. */ + uint8_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold register. */ + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ + uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ +}; + +/** + * Hardware context number + */ +enum igb_advctx_num { + IGB_CTX_0 = 0, /**< CTX0 */ + IGB_CTX_1 = 1, /**< CTX1 */ + IGB_CTX_NUM = 2, /**< CTX_NUM */ +}; + +/** Offload features */ +union igb_tx_offload { + uint64_t data; + struct { + uint64_t l3_len:9; /**< L3 (IP) Header Length. */ + uint64_t l2_len:7; /**< L2 (MAC) Header Length. */ + uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */ + uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */ + uint64_t tso_segsz:16; /**< TCP TSO segment size. */ + + /* uint64_t unused:8; */ + }; +}; + +/* + * Compare mask for igb_tx_offload.data, + * should be in sync with igb_tx_offload layout. + * */ +#define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */ +#define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */ +#define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */ +#define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */ +/** Mac + IP + TCP + Mss mask. */ +#define TX_TSO_CMP_MASK \ + (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK) + +/** + * Strucutre to check if new context need be built + */ +struct igb_advctx_info { + uint64_t flags; /**< ol_flags related to context build. */ + /** tx offload: vlan, tso, l2-l3-l4 lengths. */ + union igb_tx_offload tx_offload; + /** compare mask for tx offload. */ + union igb_tx_offload tx_offload_mask; +}; + +/** + * Structure associated with each TX queue. + */ +struct igb_tx_queue { + volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */ + uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ + struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */ + volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ + uint32_t txd_type; /**< Device-specific TXD type */ + uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_tail; /**< Current value of TDT register. */ + uint16_t tx_head; + /**< Index of first used TX descriptor. */ + uint16_t queue_id; /**< TX queue index. */ + uint16_t reg_idx; /**< TX queue register index. */ + uint8_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold register. */ + uint32_t ctx_curr; + /**< Current used hardware descriptor. */ + uint32_t ctx_start; + /**< Start context position for transmit queue. */ + struct igb_advctx_info ctx_cache[IGB_CTX_NUM]; + /**< Hardware context history.*/ +}; + +#if 1 +#define RTE_PMD_USE_PREFETCH +#endif + +#ifdef RTE_PMD_USE_PREFETCH +#define rte_igb_prefetch(p) rte_prefetch0(p) +#else +#define rte_igb_prefetch(p) do {} while(0) +#endif + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while(0) +#endif + +/* + * Macro for VMDq feature for 1 GbE NIC. + */ +#define E1000_VMOLR_SIZE (8) +#define IGB_TSO_MAX_HDRLEN (512) +#define IGB_TSO_MAX_MSS (9216) + +/********************************************************************* + * + * TX function + * + **********************************************************************/ + +/* + *There're some limitations in hardware for TCP segmentation offload. We + *should check whether the parameters are valid. + */ +static inline uint64_t +check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para) +{ + if (!(ol_req & PKT_TX_TCP_SEG)) + return ol_req; + if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len + + ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) { + ol_req &= ~PKT_TX_TCP_SEG; + ol_req |= PKT_TX_TCP_CKSUM; + } + return ol_req; +} + +/* + * Advanced context descriptor are almost same between igb/ixgbe + * This is a separate function, looking for optimization opportunity here + * Rework required to go with the pre-defined values. + */ + +static inline void +igbe_set_xmit_ctx(struct igb_tx_queue* txq, + volatile struct e1000_adv_tx_context_desc *ctx_txd, + uint64_t ol_flags, union igb_tx_offload tx_offload) +{ + uint32_t type_tucmd_mlhl; + uint32_t mss_l4len_idx; + uint32_t ctx_idx, ctx_curr; + uint32_t vlan_macip_lens; + union igb_tx_offload tx_offload_mask; + + ctx_curr = txq->ctx_curr; + ctx_idx = ctx_curr + txq->ctx_start; + + tx_offload_mask.data = 0; + type_tucmd_mlhl = 0; + + /* Specify which HW CTX to upload. */ + mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT); + + if (ol_flags & PKT_TX_VLAN_PKT) + tx_offload_mask.data |= TX_VLAN_CMP_MASK; + + /* check if TCP segmentation required for this packet */ + if (ol_flags & PKT_TX_TCP_SEG) { + /* implies IP cksum in IPv4 */ + if (ol_flags & PKT_TX_IP_CKSUM) + type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 | + E1000_ADVTXD_TUCMD_L4T_TCP | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + else + type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 | + E1000_ADVTXD_TUCMD_L4T_TCP | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + + tx_offload_mask.data |= TX_TSO_CMP_MASK; + mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT; + mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT; + } else { /* no TSO, check if hardware checksum is needed */ + if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)) + tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK; + + if (ol_flags & PKT_TX_IP_CKSUM) + type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4; + + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + break; + case PKT_TX_TCP_CKSUM: + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + break; + case PKT_TX_SCTP_CKSUM: + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + break; + default: + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + break; + } + } + + txq->ctx_cache[ctx_curr].flags = ol_flags; + txq->ctx_cache[ctx_curr].tx_offload.data = + tx_offload_mask.data & tx_offload.data; + txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask; + + ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl); + vlan_macip_lens = (uint32_t)tx_offload.data; + ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens); + ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx); + ctx_txd->seqnum_seed = 0; +} + +/* + * Check which hardware context can be used. Use the existing match + * or create a new context descriptor. + */ +static inline uint32_t +what_advctx_update(struct igb_tx_queue *txq, uint64_t flags, + union igb_tx_offload tx_offload) +{ + /* If match with the current context */ + if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) { + return txq->ctx_curr; + } + + /* If match with the second context */ + txq->ctx_curr ^= 1; + if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) { + return txq->ctx_curr; + } + + /* Mismatch, use the previous context */ + return IGB_CTX_NUM; +} + +static inline uint32_t +tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags) +{ + static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM}; + static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM}; + uint32_t tmp; + + tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM]; + tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0]; + tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0]; + return tmp; +} + +static inline uint32_t +tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags) +{ + uint32_t cmdtype; + static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE}; + static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE}; + cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0]; + cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0]; + return cmdtype; +} + +uint16_t +eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct igb_tx_queue *txq; + struct igb_tx_entry *sw_ring; + struct igb_tx_entry *txe, *txn; + volatile union e1000_adv_tx_desc *txr; + volatile union e1000_adv_tx_desc *txd; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint64_t buf_dma_addr; + uint32_t olinfo_status; + uint32_t cmd_type_len; + uint32_t pkt_len; + uint16_t slen; + uint64_t ol_flags; + uint16_t tx_end; + uint16_t tx_id; + uint16_t tx_last; + uint16_t nb_tx; + uint64_t tx_ol_req; + uint32_t new_ctx = 0; + uint32_t ctx = 0; + union igb_tx_offload tx_offload = {0}; + + txq = tx_queue; + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + tx_pkt = *tx_pkts++; + pkt_len = tx_pkt->pkt_len; + + RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + + /* + * The number of descriptors that must be allocated for a + * packet is the number of segments of that packet, plus 1 + * Context Descriptor for the VLAN Tag Identifier, if any. + * Determine the last TX descriptor to allocate in the TX ring + * for the packet, starting from the current position (tx_id) + * in the ring. + */ + tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1); + + ol_flags = tx_pkt->ol_flags; + tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK; + + /* If a Context Descriptor need be built . */ + if (tx_ol_req) { + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.vlan_tci = tx_pkt->vlan_tci; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + tx_ol_req = check_tso_para(tx_ol_req, tx_offload); + + ctx = what_advctx_update(txq, tx_ol_req, tx_offload); + /* Only allocate context descriptor if required*/ + new_ctx = (ctx == IGB_CTX_NUM); + ctx = txq->ctx_curr + txq->ctx_start; + tx_last = (uint16_t) (tx_last + new_ctx); + } + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" + " tx_first=%u tx_last=%u", + (unsigned) txq->port_id, + (unsigned) txq->queue_id, + (unsigned) pkt_len, + (unsigned) tx_id, + (unsigned) tx_last); + + /* + * Check if there are enough free descriptors in the TX ring + * to transmit the next packet. + * This operation is based on the two following rules: + * + * 1- Only check that the last needed TX descriptor can be + * allocated (by construction, if that descriptor is free, + * all intermediate ones are also free). + * + * For this purpose, the index of the last TX descriptor + * used for a packet (the "last descriptor" of a packet) + * is recorded in the TX entries (the last one included) + * that are associated with all TX descriptors allocated + * for that packet. + * + * 2- Avoid to allocate the last free TX descriptor of the + * ring, in order to never set the TDT register with the + * same value stored in parallel by the NIC in the TDH + * register, which makes the TX engine of the NIC enter + * in a deadlock situation. + * + * By extension, avoid to allocate a free descriptor that + * belongs to the last set of free descriptors allocated + * to the same packet previously transmitted. + */ + + /* + * The "last descriptor" of the previously sent packet, if any, + * which used the last descriptor to allocate. + */ + tx_end = sw_ring[tx_last].last_id; + + /* + * The next descriptor following that "last descriptor" in the + * ring. + */ + tx_end = sw_ring[tx_end].next_id; + + /* + * The "last descriptor" associated with that next descriptor. + */ + tx_end = sw_ring[tx_end].last_id; + + /* + * Check that this descriptor is free. + */ + if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) { + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + + /* + * Set common flags of all TX Data Descriptors. + * + * The following bits must be set in all Data Descriptors: + * - E1000_ADVTXD_DTYP_DATA + * - E1000_ADVTXD_DCMD_DEXT + * + * The following bits must be set in the first Data Descriptor + * and are ignored in the other ones: + * - E1000_ADVTXD_DCMD_IFCS + * - E1000_ADVTXD_MAC_1588 + * - E1000_ADVTXD_DCMD_VLE + * + * The following bits must only be set in the last Data + * Descriptor: + * - E1000_TXD_CMD_EOP + * + * The following bits can be set in any Data Descriptor, but + * are only set in the last Data Descriptor: + * - E1000_TXD_CMD_RS + */ + cmd_type_len = txq->txd_type | + E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; + if (tx_ol_req & PKT_TX_TCP_SEG) + pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len); + olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT); +#if defined(RTE_LIBRTE_IEEE1588) + if (ol_flags & PKT_TX_IEEE1588_TMST) + cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP; +#endif + if (tx_ol_req) { + /* Setup TX Advanced context descriptor if required */ + if (new_ctx) { + volatile struct e1000_adv_tx_context_desc * + ctx_txd; + + ctx_txd = (volatile struct + e1000_adv_tx_context_desc *) + &txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + /* Setup the TX Advanced Data Descriptor */ + cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req); + olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req); + olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT); + } + + m_seg = tx_pkt; + do { + txn = &sw_ring[txe->next_id]; + txd = &txr[tx_id]; + + if (txe->mbuf != NULL) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* + * Set up transmit descriptor. + */ + slen = (uint16_t) m_seg->data_len; + buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); + txd->read.buffer_addr = + rte_cpu_to_le_64(buf_dma_addr); + txd->read.cmd_type_len = + rte_cpu_to_le_32(cmd_type_len | slen); + txd->read.olinfo_status = + rte_cpu_to_le_32(olinfo_status); + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->next; + } while (m_seg != NULL); + + /* + * The last packet data descriptor needs End Of Packet (EOP) + * and Report Status (RS). + */ + txd->read.cmd_type_len |= + rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS); + } + end_of_tx: + rte_wmb(); + + /* + * Set the Transmit Descriptor Tail (TDT). + */ + E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id); + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + (unsigned) txq->port_id, (unsigned) txq->queue_id, + (unsigned) tx_id, (unsigned) nb_tx); + txq->tx_tail = tx_id; + + return nb_tx; +} + +/********************************************************************* + * + * RX functions + * + **********************************************************************/ +#define IGB_PACKET_TYPE_IPV4 0X01 +#define IGB_PACKET_TYPE_IPV4_TCP 0X11 +#define IGB_PACKET_TYPE_IPV4_UDP 0X21 +#define IGB_PACKET_TYPE_IPV4_SCTP 0X41 +#define IGB_PACKET_TYPE_IPV4_EXT 0X03 +#define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43 +#define IGB_PACKET_TYPE_IPV6 0X04 +#define IGB_PACKET_TYPE_IPV6_TCP 0X14 +#define IGB_PACKET_TYPE_IPV6_UDP 0X24 +#define IGB_PACKET_TYPE_IPV6_EXT 0X0C +#define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C +#define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C +#define IGB_PACKET_TYPE_IPV4_IPV6 0X05 +#define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15 +#define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25 +#define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D +#define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D +#define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D +#define IGB_PACKET_TYPE_MAX 0X80 +#define IGB_PACKET_TYPE_MASK 0X7F +#define IGB_PACKET_TYPE_SHIFT 0X04 +static inline uint32_t +igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info) +{ + static const uint32_t + ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = { + [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4, + [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT, + [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6, + [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6, + [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT, + [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, + [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, + [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, + }; + if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF)) + return RTE_PTYPE_UNKNOWN; + + pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK; + + return ptype_table[pkt_info]; +} + +static inline uint64_t +rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs) +{ + uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH; + +#if defined(RTE_LIBRTE_IEEE1588) + static uint32_t ip_pkt_etqf_map[8] = { + 0, 0, 0, PKT_RX_IEEE1588_PTP, + 0, 0, 0, 0, + }; + + struct rte_eth_dev dev = rte_eth_devices[rxq->port_id]; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private); + + /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */ + if (hw->mac.type == e1000_i210) + pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07]; + else + pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07]; +#else + RTE_SET_USED(rxq); +#endif + + return pkt_flags; +} + +static inline uint64_t +rx_desc_status_to_pkt_flags(uint32_t rx_status) +{ + uint64_t pkt_flags; + + /* Check if VLAN present */ + pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0; + +#if defined(RTE_LIBRTE_IEEE1588) + if (rx_status & E1000_RXD_STAT_TMST) + pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST; +#endif + return pkt_flags; +} + +static inline uint64_t +rx_desc_error_to_pkt_flags(uint32_t rx_status) +{ + /* + * Bit 30: IPE, IPv4 checksum error + * Bit 29: L4I, L4I integrity error + */ + + static uint64_t error_to_pkt_flags_map[4] = { + 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD + }; + return error_to_pkt_flags_map[(rx_status >> + E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK]; +} + +uint16_t +eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct igb_rx_queue *rxq; + volatile union e1000_adv_rx_desc *rx_ring; + volatile union e1000_adv_rx_desc *rxdp; + struct igb_rx_entry *sw_ring; + struct igb_rx_entry *rxe; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + union e1000_adv_rx_desc rxd; + uint64_t dma_addr; + uint32_t staterr; + uint32_t hlen_type_rss; + uint16_t pkt_len; + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint64_t pkt_flags; + + nb_rx = 0; + nb_hold = 0; + rxq = rx_queue; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + while (nb_rx < nb_pkts) { + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + staterr = rxdp->wb.upper.status_error; + if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD))) + break; + rxd = *rxdp; + + /* + * End of packet. + * + * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is + * likely to be invalid and to be dropped by the various + * validation checks performed by the network stack. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy do not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x pkt_len=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) staterr, + (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); + + nmb = rte_rxmbuf_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_igb_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_igb_prefetch(&rx_ring[rx_id]); + rte_igb_prefetch(&sw_ring[rx_id]); + } + + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + + /* + * Initialize the returned mbuf. + * 1) setup generic mbuf fields: + * - number of segments, + * - next segment, + * - packet length, + * - RX port identifier. + * 2) integrate hardware offload data, if any: + * - RSS flag & hash, + * - IP checksum flag, + * - VLAN TCI, if any, + * - error flags. + */ + pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) - + rxq->crc_len); + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = pkt_len; + rxm->data_len = pkt_len; + rxm->port = rxq->port_id; + + rxm->hash.rss = rxd.wb.lower.hi_dword.rss; + hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); + /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); + + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss); + pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); + pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); + rxm->ol_flags = pkt_flags; + rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower. + lo_dword.hs_rss.pkt_info); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return nb_rx; +} + +uint16_t +eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct igb_rx_queue *rxq; + volatile union e1000_adv_rx_desc *rx_ring; + volatile union e1000_adv_rx_desc *rxdp; + struct igb_rx_entry *sw_ring; + struct igb_rx_entry *rxe; + struct rte_mbuf *first_seg; + struct rte_mbuf *last_seg; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + union e1000_adv_rx_desc rxd; + uint64_t dma; /* Physical address of mbuf data buffer */ + uint32_t staterr; + uint32_t hlen_type_rss; + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint16_t data_len; + uint64_t pkt_flags; + + nb_rx = 0; + nb_hold = 0; + rxq = rx_queue; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + + /* + * Retrieve RX context of current packet, if any. + */ + first_seg = rxq->pkt_first_seg; + last_seg = rxq->pkt_last_seg; + + while (nb_rx < nb_pkts) { + next_desc: + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + staterr = rxdp->wb.upper.status_error; + if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD))) + break; + rxd = *rxdp; + + /* + * Descriptor done. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy does not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x data_len=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) staterr, + (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); + + nmb = rte_rxmbuf_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_igb_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_igb_prefetch(&rx_ring[rx_id]); + rte_igb_prefetch(&sw_ring[rx_id]); + } + + /* + * Update RX descriptor with the physical address of the new + * data buffer of the new allocated mbuf. + */ + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rxdp->read.pkt_addr = dma; + rxdp->read.hdr_addr = 0; + + /* + * Set data length & data buffer address of mbuf. + */ + data_len = rte_le_to_cpu_16(rxd.wb.upper.length); + rxm->data_len = data_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet and + * initialize its context. + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (first_seg == NULL) { + first_seg = rxm; + first_seg->pkt_len = data_len; + first_seg->nb_segs = 1; + } else { + first_seg->pkt_len += data_len; + first_seg->nb_segs++; + last_seg->next = rxm; + } + + /* + * If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (! (staterr & E1000_RXD_STAT_EOP)) { + last_seg = rxm; + goto next_desc; + } + + /* + * This is the last buffer of the received packet. + * If the CRC is not stripped by the hardware: + * - Subtract the CRC length from the total packet length. + * - If the last buffer only contains the whole CRC or a part + * of it, free the mbuf associated to the last buffer. + * If part of the CRC is also contained in the previous + * mbuf, subtract the length of that CRC part from the + * data length of the previous mbuf. + */ + rxm->next = NULL; + if (unlikely(rxq->crc_len > 0)) { + first_seg->pkt_len -= ETHER_CRC_LEN; + if (data_len <= ETHER_CRC_LEN) { + rte_pktmbuf_free_seg(rxm); + first_seg->nb_segs--; + last_seg->data_len = (uint16_t) + (last_seg->data_len - + (ETHER_CRC_LEN - data_len)); + last_seg->next = NULL; + } else + rxm->data_len = + (uint16_t) (data_len - ETHER_CRC_LEN); + } + + /* + * Initialize the first mbuf of the returned packet: + * - RX port identifier, + * - hardware offload data, if any: + * - RSS flag & hash, + * - IP checksum flag, + * - VLAN TCI, if any, + * - error flags. + */ + first_seg->port = rxq->port_id; + first_seg->hash.rss = rxd.wb.lower.hi_dword.rss; + + /* + * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is + * set in the pkt_flags field. + */ + first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); + hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss); + pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr); + pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); + first_seg->ol_flags = pkt_flags; + first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb. + lower.lo_dword.hs_rss.pkt_info); + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch((char *)first_seg->buf_addr + + first_seg->data_off); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = first_seg; + + /* + * Setup receipt context for a new packet. + */ + first_seg = NULL; + } + + /* + * Record index of the next RX descriptor to probe. + */ + rxq->rx_tail = rx_id; + + /* + * Save receive context. + */ + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return nb_rx; +} + +/* + * Maximum number of Ring Descriptors. + * + * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring + * desscriptors should meet the following condition: + * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0 + */ + +static void +igb_tx_queue_release_mbufs(struct igb_tx_queue *txq) +{ + unsigned i; + + if (txq->sw_ring != NULL) { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +igb_tx_queue_release(struct igb_tx_queue *txq) +{ + if (txq != NULL) { + igb_tx_queue_release_mbufs(txq); + rte_free(txq->sw_ring); + rte_free(txq); + } +} + +void +eth_igb_tx_queue_release(void *txq) +{ + igb_tx_queue_release(txq); +} + +static void +igb_reset_tx_queue_stat(struct igb_tx_queue *txq) +{ + txq->tx_head = 0; + txq->tx_tail = 0; + txq->ctx_curr = 0; + memset((void*)&txq->ctx_cache, 0, + IGB_CTX_NUM * sizeof(struct igb_advctx_info)); +} + +static void +igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev) +{ + static const union e1000_adv_tx_desc zeroed_desc = {{0}}; + struct igb_tx_entry *txe = txq->sw_ring; + uint16_t i, prev; + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* Zero out HW ring memory */ + for (i = 0; i < txq->nb_tx_desc; i++) { + txq->tx_ring[i] = zeroed_desc; + } + + /* Initialize ring entries */ + prev = (uint16_t)(txq->nb_tx_desc - 1); + for (i = 0; i < txq->nb_tx_desc; i++) { + volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]); + + txd->wb.status = E1000_TXD_STAT_DD; + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->txd_type = E1000_ADVTXD_DTYP_DATA; + /* 82575 specific, each tx queue will use 2 hw contexts */ + if (hw->mac.type == e1000_82575) + txq->ctx_start = txq->queue_id * IGB_CTX_NUM; + + igb_reset_tx_queue_stat(txq); +} + +int +eth_igb_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + const struct rte_memzone *tz; + struct igb_tx_queue *txq; + struct e1000_hw *hw; + uint32_t size; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Validate number of transmit descriptors. + * It must not exceed hardware maximum, and must be multiple + * of E1000_ALIGN. + */ + if (nb_desc % IGB_TXD_ALIGN != 0 || + (nb_desc > E1000_MAX_RING_DESC) || + (nb_desc < E1000_MIN_RING_DESC)) { + return -EINVAL; + } + + /* + * The tx_free_thresh and tx_rs_thresh values are not used in the 1G + * driver. + */ + if (tx_conf->tx_free_thresh != 0) + PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not " + "used for the 1G driver."); + if (tx_conf->tx_rs_thresh != 0) + PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not " + "used for the 1G driver."); + if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576) + PMD_INIT_LOG(INFO, "To improve 1G driver performance, " + "consider setting the TX WTHRESH value to 4, 8, " + "or 16."); + + /* Free memory prior to re-allocation if needed */ + if (dev->data->tx_queues[queue_idx] != NULL) { + igb_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* First allocate the tx queue data structure */ + txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue), + RTE_CACHE_LINE_SIZE); + if (txq == NULL) + return -ENOMEM; + + /* + * Allocate TX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC; + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size, + E1000_ALIGN, socket_id); + if (tz == NULL) { + igb_tx_queue_release(txq); + return -ENOMEM; + } + + txq->nb_tx_desc = nb_desc; + txq->pthresh = tx_conf->tx_thresh.pthresh; + txq->hthresh = tx_conf->tx_thresh.hthresh; + txq->wthresh = tx_conf->tx_thresh.wthresh; + if (txq->wthresh > 0 && hw->mac.type == e1000_82576) + txq->wthresh = 1; + txq->queue_id = queue_idx; + txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? + queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); + txq->port_id = dev->data->port_id; + + txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx)); + txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); + + txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr; + /* Allocate software ring */ + txq->sw_ring = rte_zmalloc("txq->sw_ring", + sizeof(struct igb_tx_entry) * nb_desc, + RTE_CACHE_LINE_SIZE); + if (txq->sw_ring == NULL) { + igb_tx_queue_release(txq); + return -ENOMEM; + } + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); + + igb_reset_tx_queue(txq, dev); + dev->tx_pkt_burst = eth_igb_xmit_pkts; + dev->data->tx_queues[queue_idx] = txq; + + return 0; +} + +static void +igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq) +{ + unsigned i; + + if (rxq->sw_ring != NULL) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +igb_rx_queue_release(struct igb_rx_queue *rxq) +{ + if (rxq != NULL) { + igb_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq); + } +} + +void +eth_igb_rx_queue_release(void *rxq) +{ + igb_rx_queue_release(rxq); +} + +static void +igb_reset_rx_queue(struct igb_rx_queue *rxq) +{ + static const union e1000_adv_rx_desc zeroed_desc = {{0}}; + unsigned i; + + /* Zero out HW ring memory */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + rxq->rx_ring[i] = zeroed_desc; + } + + rxq->rx_tail = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; +} + +int +eth_igb_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + const struct rte_memzone *rz; + struct igb_rx_queue *rxq; + struct e1000_hw *hw; + unsigned int size; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Validate number of receive descriptors. + * It must not exceed hardware maximum, and must be multiple + * of E1000_ALIGN. + */ + if (nb_desc % IGB_RXD_ALIGN != 0 || + (nb_desc > E1000_MAX_RING_DESC) || + (nb_desc < E1000_MIN_RING_DESC)) { + return -EINVAL; + } + + /* Free memory prior to re-allocation if needed */ + if (dev->data->rx_queues[queue_idx] != NULL) { + igb_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* First allocate the RX queue data structure. */ + rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue), + RTE_CACHE_LINE_SIZE); + if (rxq == NULL) + return -ENOMEM; + rxq->mb_pool = mp; + rxq->nb_rx_desc = nb_desc; + rxq->pthresh = rx_conf->rx_thresh.pthresh; + rxq->hthresh = rx_conf->rx_thresh.hthresh; + rxq->wthresh = rx_conf->rx_thresh.wthresh; + if (rxq->wthresh > 0 && + (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350)) + rxq->wthresh = 1; + rxq->drop_en = rx_conf->rx_drop_en; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->queue_id = queue_idx; + rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? + queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); + rxq->port_id = dev->data->port_id; + rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : + ETHER_CRC_LEN); + + /* + * Allocate RX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC; + rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, + E1000_ALIGN, socket_id); + if (rz == NULL) { + igb_rx_queue_release(rxq); + return -ENOMEM; + } + rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx)); + rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx)); + rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); + rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr; + + /* Allocate software ring. */ + rxq->sw_ring = rte_zmalloc("rxq->sw_ring", + sizeof(struct igb_rx_entry) * nb_desc, + RTE_CACHE_LINE_SIZE); + if (rxq->sw_ring == NULL) { + igb_rx_queue_release(rxq); + return -ENOMEM; + } + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); + + dev->data->rx_queues[queue_idx] = rxq; + igb_reset_rx_queue(rxq); + + return 0; +} + +uint32_t +eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define IGB_RXQ_SCAN_INTERVAL 4 + volatile union e1000_adv_rx_desc *rxdp; + struct igb_rx_queue *rxq; + uint32_t desc = 0; + + if (rx_queue_id >= dev->data->nb_rx_queues) { + PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id); + return 0; + } + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &(rxq->rx_ring[rxq->rx_tail]); + + while ((desc < rxq->nb_rx_desc) && + (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) { + desc += IGB_RXQ_SCAN_INTERVAL; + rxdp += IGB_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return 0; +} + +int +eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile union e1000_adv_rx_desc *rxdp; + struct igb_rx_queue *rxq = rx_queue; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return 0; + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &rxq->rx_ring[desc]; + return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD); +} + +void +igb_dev_clear_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + struct igb_tx_queue *txq; + struct igb_rx_queue *rxq; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq != NULL) { + igb_tx_queue_release_mbufs(txq); + igb_reset_tx_queue(txq, dev); + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq != NULL) { + igb_rx_queue_release_mbufs(rxq); + igb_reset_rx_queue(rxq); + } + } +} + +void +igb_dev_free_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + eth_igb_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + eth_igb_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + +/** + * Receive Side Scaling (RSS). + * See section 7.1.1.7 in the following document: + * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009 + * + * Principles: + * The source and destination IP addresses of the IP header and the source and + * destination ports of TCP/UDP headers, if any, of received packets are hashed + * against a configurable random key to compute a 32-bit RSS hash result. + * The seven (7) LSBs of the 32-bit hash result are used as an index into a + * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit + * RSS output index which is used as the RX queue index where to store the + * received packets. + * The following output is supplied in the RX write-back descriptor: + * - 32-bit result of the Microsoft RSS hash function, + * - 4-bit RSS type field. + */ + +/* + * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet. + * Used as the default key. + */ +static uint8_t rss_intel_key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, +}; + +static void +igb_rss_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + uint32_t mrqc; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mrqc = E1000_READ_REG(hw, E1000_MRQC); + mrqc &= ~E1000_MRQC_ENABLE_MASK; + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); +} + +static void +igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf) +{ + uint8_t *hash_key; + uint32_t rss_key; + uint32_t mrqc; + uint64_t rss_hf; + uint16_t i; + + hash_key = rss_conf->rss_key; + if (hash_key != NULL) { + /* Fill in RSS hash key */ + for (i = 0; i < 10; i++) { + rss_key = hash_key[(i * 4)]; + rss_key |= hash_key[(i * 4) + 1] << 8; + rss_key |= hash_key[(i * 4) + 2] << 16; + rss_key |= hash_key[(i * 4) + 3] << 24; + E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key); + } + } + + /* Set configured hashing protocols in MRQC register */ + rss_hf = rss_conf->rss_hf; + mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */ + if (rss_hf & ETH_RSS_IPV4) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP; + if (rss_hf & ETH_RSS_IPV6) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6; + if (rss_hf & ETH_RSS_IPV6_EX) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX; + if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP; + if (rss_hf & ETH_RSS_IPV6_TCP_EX) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + if (rss_hf & ETH_RSS_IPV6_UDP_EX) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX; + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); +} + +int +eth_igb_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct e1000_hw *hw; + uint32_t mrqc; + uint64_t rss_hf; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Before changing anything, first check that the update RSS operation + * does not attempt to disable RSS, if RSS was enabled at + * initialization time, or does not attempt to enable RSS, if RSS was + * disabled at initialization time. + */ + rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL; + mrqc = E1000_READ_REG(hw, E1000_MRQC); + if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */ + if (rss_hf != 0) /* Enable RSS */ + return -(EINVAL); + return 0; /* Nothing to do */ + } + /* RSS enabled */ + if (rss_hf == 0) /* Disable RSS */ + return -(EINVAL); + igb_hw_rss_hash_set(hw, rss_conf); + return 0; +} + +int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct e1000_hw *hw; + uint8_t *hash_key; + uint32_t rss_key; + uint32_t mrqc; + uint64_t rss_hf; + uint16_t i; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + hash_key = rss_conf->rss_key; + if (hash_key != NULL) { + /* Return RSS hash key */ + for (i = 0; i < 10; i++) { + rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i); + hash_key[(i * 4)] = rss_key & 0x000000FF; + hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF; + hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF; + hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF; + } + } + + /* Get RSS functions configured in MRQC register */ + mrqc = E1000_READ_REG(hw, E1000_MRQC); + if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */ + rss_conf->rss_hf = 0; + return 0; + } + rss_hf = 0; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4) + rss_hf |= ETH_RSS_IPV4; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6) + rss_hf |= ETH_RSS_IPV6; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX) + rss_hf |= ETH_RSS_IPV6_EX; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX) + rss_hf |= ETH_RSS_IPV6_TCP_EX; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX) + rss_hf |= ETH_RSS_IPV6_UDP_EX; + rss_conf->rss_hf = rss_hf; + return 0; +} + +static void +igb_rss_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_rss_conf rss_conf; + struct e1000_hw *hw; + uint32_t shift; + uint16_t i; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Fill in redirection table. */ + shift = (hw->mac.type == e1000_82575) ? 6 : 0; + for (i = 0; i < 128; i++) { + union e1000_reta { + uint32_t dword; + uint8_t bytes[4]; + } reta; + uint8_t q_idx; + + q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ? + i % dev->data->nb_rx_queues : 0); + reta.bytes[i & 3] = (uint8_t) (q_idx << shift); + if ((i & 3) == 3) + E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword); + } + + /* + * Configure the RSS key and the RSS protocols used to compute + * the RSS hash of input packets. + */ + rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf; + if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) { + igb_rss_disable(dev); + return; + } + if (rss_conf.rss_key == NULL) + rss_conf.rss_key = rss_intel_key; /* Default hash key */ + igb_hw_rss_hash_set(hw, &rss_conf); +} + +/* + * Check if the mac type support VMDq or not. + * Return 1 if it supports, otherwise, return 0. + */ +static int +igb_is_vmdq_supported(const struct rte_eth_dev *dev) +{ + const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (hw->mac.type) { + case e1000_82576: + case e1000_82580: + case e1000_i350: + return 1; + case e1000_82540: + case e1000_82541: + case e1000_82542: + case e1000_82543: + case e1000_82544: + case e1000_82545: + case e1000_82546: + case e1000_82547: + case e1000_82571: + case e1000_82572: + case e1000_82573: + case e1000_82574: + case e1000_82583: + case e1000_i210: + case e1000_i211: + default: + PMD_INIT_LOG(ERR, "Cannot support VMDq feature"); + return 0; + } +} + +static int +igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_vmdq_rx_conf *cfg; + struct e1000_hw *hw; + uint32_t mrqc, vt_ctl, vmolr, rctl; + int i; + + PMD_INIT_FUNC_TRACE(); + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + /* Check if mac type can support VMDq, return value of 0 means NOT support */ + if (igb_is_vmdq_supported(dev) == 0) + return -1; + + igb_rss_disable(dev); + + /* RCTL: eanble VLAN filter */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* MRQC: enable vmdq */ + mrqc = E1000_READ_REG(hw, E1000_MRQC); + mrqc |= E1000_MRQC_ENABLE_VMDQ; + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); + + /* VTCTL: pool selection according to VLAN tag */ + vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); + if (cfg->enable_default_pool) + vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT); + vt_ctl |= E1000_VT_CTL_IGNORE_MAC; + E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); + + for (i = 0; i < E1000_VMOLR_SIZE; i++) { + vmolr = E1000_READ_REG(hw, E1000_VMOLR(i)); + vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE | + E1000_VMOLR_ROPE | E1000_VMOLR_BAM | + E1000_VMOLR_MPME); + + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG) + vmolr |= E1000_VMOLR_AUPE; + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC) + vmolr |= E1000_VMOLR_ROMPE; + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC) + vmolr |= E1000_VMOLR_ROPE; + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST) + vmolr |= E1000_VMOLR_BAM; + if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST) + vmolr |= E1000_VMOLR_MPME; + + E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr); + } + + /* + * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1 + * Both 82576 and 82580 support it + */ + if (hw->mac.type != e1000_i350) { + for (i = 0; i < E1000_VMOLR_SIZE; i++) { + vmolr = E1000_READ_REG(hw, E1000_VMOLR(i)); + vmolr |= E1000_VMOLR_STRVLAN; + E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr); + } + } + + /* VFTA - enable all vlan filters */ + for (i = 0; i < IGB_VFTA_SIZE; i++) + E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX); + + /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */ + if (hw->mac.type != e1000_82580) + E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK); + + /* + * RAH/RAL - allow pools to read specific mac addresses + * In this case, all pools should be able to read from mac addr 0 + */ + E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX)); + E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX); + + /* VLVF: set up filters for vlan tags as configured */ + for (i = 0; i < cfg->nb_pool_maps; i++) { + /* set vlan id in VF register and set the valid bit */ + E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \ + (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \ + ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \ + E1000_VLVF_POOLSEL_MASK))); + } + + E1000_WRITE_FLUSH(hw); + + return 0; +} + + +/********************************************************************* + * + * Enable receive unit. + * + **********************************************************************/ + +static int +igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq) +{ + struct igb_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + unsigned i; + + /* Initialize software ring entries. */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile union e1000_adv_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); + + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, "RX mbuf alloc failed " + "queue_id=%hu", rxq->queue_id); + return -ENOMEM; + } + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); + rxd = &rxq->rx_ring[i]; + rxd->read.hdr_addr = 0; + rxd->read.pkt_addr = dma_addr; + rxe[i].mbuf = mbuf; + } + + return 0; +} + +#define E1000_MRQC_DEF_Q_SHIFT (3) +static int +igb_dev_mq_rx_configure(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t mrqc; + + if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) { + /* + * SRIOV active scheme + * FIXME if support RSS together with VMDq & SRIOV + */ + mrqc = E1000_MRQC_ENABLE_VMDQ; + /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */ + mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT; + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); + } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * SRIOV inactive scheme + */ + switch (dev->data->dev_conf.rxmode.mq_mode) { + case ETH_MQ_RX_RSS: + igb_rss_configure(dev); + break; + case ETH_MQ_RX_VMDQ_ONLY: + /*Configure general VMDQ only RX parameters*/ + igb_vmdq_rx_hw_configure(dev); + break; + case ETH_MQ_RX_NONE: + /* if mq_mode is none, disable rss mode.*/ + default: + igb_rss_disable(dev); + break; + } + } + + return 0; +} + +int +eth_igb_rx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct igb_rx_queue *rxq; + uint32_t rctl; + uint32_t rxcsum; + uint32_t srrctl; + uint16_t buf_size; + uint16_t rctl_bsize; + uint16_t i; + int ret; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + srrctl = 0; + + /* + * Make sure receives are disabled while setting + * up the descriptor ring. + */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + + /* + * Configure support of jumbo frames, if any. + */ + if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { + rctl |= E1000_RCTL_LPE; + + /* + * Set maximum packet length by default, and might be updated + * together with enabling/disabling dual VLAN. + */ + E1000_WRITE_REG(hw, E1000_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len + + VLAN_TAG_SIZE); + } else + rctl &= ~E1000_RCTL_LPE; + + /* Configure and enable each RX queue. */ + rctl_bsize = 0; + dev->rx_pkt_burst = eth_igb_recv_pkts; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + uint64_t bus_addr; + uint32_t rxdctl; + + rxq = dev->data->rx_queues[i]; + + /* Allocate buffers for descriptor rings and set up queue */ + ret = igb_alloc_rx_queue_mbufs(rxq); + if (ret) + return ret; + + /* + * Reset crc_len in case it was changed after queue setup by a + * call to configure + */ + rxq->crc_len = + (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ? + 0 : ETHER_CRC_LEN); + + bus_addr = rxq->rx_ring_phys_addr; + E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx), + rxq->nb_rx_desc * + sizeof(union e1000_adv_rx_desc)); + E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr); + + srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + + /* + * Configure RX buffer size. + */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + if (buf_size >= 1024) { + /* + * Configure the BSIZEPACKET field of the SRRCTL + * register of the queue. + * Value is in 1 KB resolution, from 1 KB to 127 KB. + * If this field is equal to 0b, then RCTL.BSIZE + * determines the RX packet buffer size. + */ + srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) & + E1000_SRRCTL_BSIZEPKT_MASK); + buf_size = (uint16_t) ((srrctl & + E1000_SRRCTL_BSIZEPKT_MASK) << + E1000_SRRCTL_BSIZEPKT_SHIFT); + + /* It adds dual VLAN length for supporting dual VLAN */ + if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * VLAN_TAG_SIZE) > buf_size){ + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, + "forcing scatter mode"); + dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + } else { + /* + * Use BSIZE field of the device RCTL register. + */ + if ((rctl_bsize == 0) || (rctl_bsize > buf_size)) + rctl_bsize = buf_size; + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); + dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + + /* Set if packets are dropped when no descriptors available */ + if (rxq->drop_en) + srrctl |= E1000_SRRCTL_DROP_EN; + + E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl); + + /* Enable this RX queue. */ + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx)); + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + rxdctl &= 0xFFF00000; + rxdctl |= (rxq->pthresh & 0x1F); + rxdctl |= ((rxq->hthresh & 0x1F) << 8); + rxdctl |= ((rxq->wthresh & 0x1F) << 16); + E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl); + } + + if (dev->data->dev_conf.rxmode.enable_scatter) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); + dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + + /* + * Setup BSIZE field of RCTL register, if needed. + * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL + * register, since the code above configures the SRRCTL register of + * the RX queue in such a case. + * All configurable sizes are: + * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX); + * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX); + * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX); + * 2048: rctl |= E1000_RCTL_SZ_2048; + * 1024: rctl |= E1000_RCTL_SZ_1024; + * 512: rctl |= E1000_RCTL_SZ_512; + * 256: rctl |= E1000_RCTL_SZ_256; + */ + if (rctl_bsize > 0) { + if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */ + rctl |= E1000_RCTL_SZ_512; + else /* 256 <= buf_size < 512 - use 256 */ + rctl |= E1000_RCTL_SZ_256; + } + + /* + * Configure RSS if device configured with multiple RX queues. + */ + igb_dev_mq_rx_configure(dev); + + /* Update the rctl since igb_dev_mq_rx_configure may change its value */ + rctl |= E1000_READ_REG(hw, E1000_RCTL); + + /* + * Setup the Checksum Register. + * Receive Full-Packet Checksum Offload is mutually exclusive with RSS. + */ + rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + /* Enable both L3/L4 rx checksum offload */ + if (dev->data->dev_conf.rxmode.hw_ip_checksum) + rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); + else + rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); + + /* Setup the Receive Control Register. */ + if (dev->data->dev_conf.rxmode.hw_strip_crc) { + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ + + /* set STRCRC bit in all queues */ + if (hw->mac.type == e1000_i350 || + hw->mac.type == e1000_i210 || + hw->mac.type == e1000_i211 || + hw->mac.type == e1000_i354) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + uint32_t dvmolr = E1000_READ_REG(hw, + E1000_DVMOLR(rxq->reg_idx)); + dvmolr |= E1000_DVMOLR_STRCRC; + E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); + } + } + } else { + rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + + /* clear STRCRC bit in all queues */ + if (hw->mac.type == e1000_i350 || + hw->mac.type == e1000_i210 || + hw->mac.type == e1000_i211 || + hw->mac.type == e1000_i354) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + uint32_t dvmolr = E1000_READ_REG(hw, + E1000_DVMOLR(rxq->reg_idx)); + dvmolr &= ~E1000_DVMOLR_STRCRC; + E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); + } + } + } + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* Make sure VLAN Filters are off. */ + if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY) + rctl &= ~E1000_RCTL_VFE; + /* Don't store bad packets. */ + rctl &= ~E1000_RCTL_SBP; + + /* Enable Receives. */ + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers. + * This needs to be done after enable. + */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0); + E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1); + } + + return 0; +} + +/********************************************************************* + * + * Enable transmit unit. + * + **********************************************************************/ +void +eth_igb_tx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct igb_tx_queue *txq; + uint32_t tctl; + uint32_t txdctl; + uint16_t i; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Setup the Base and Length of the Tx Descriptor Rings. */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + uint64_t bus_addr; + txq = dev->data->tx_queues[i]; + bus_addr = txq->tx_ring_phys_addr; + + E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx), + txq->nb_tx_desc * + sizeof(union e1000_adv_tx_desc)); + E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr); + + /* Setup the HW Tx Head and Tail descriptor pointers. */ + E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0); + E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0); + + /* Setup Transmit threshold registers. */ + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx)); + txdctl |= txq->pthresh & 0x1F; + txdctl |= ((txq->hthresh & 0x1F) << 8); + txdctl |= ((txq->wthresh & 0x1F) << 16); + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl); + } + + /* Program the Transmit Control Register. */ + tctl = E1000_READ_REG(hw, E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); + + e1000_config_collision_dist(hw); + + /* This write will effectively turn on the transmit unit. */ + E1000_WRITE_REG(hw, E1000_TCTL, tctl); +} + +/********************************************************************* + * + * Enable VF receive unit. + * + **********************************************************************/ +int +eth_igbvf_rx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct igb_rx_queue *rxq; + uint32_t srrctl; + uint16_t buf_size; + uint16_t rctl_bsize; + uint16_t i; + int ret; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* setup MTU */ + e1000_rlpml_set_vf(hw, + (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len + + VLAN_TAG_SIZE)); + + /* Configure and enable each RX queue. */ + rctl_bsize = 0; + dev->rx_pkt_burst = eth_igb_recv_pkts; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + uint64_t bus_addr; + uint32_t rxdctl; + + rxq = dev->data->rx_queues[i]; + + /* Allocate buffers for descriptor rings and set up queue */ + ret = igb_alloc_rx_queue_mbufs(rxq); + if (ret) + return ret; + + bus_addr = rxq->rx_ring_phys_addr; + E1000_WRITE_REG(hw, E1000_RDLEN(i), + rxq->nb_rx_desc * + sizeof(union e1000_adv_rx_desc)); + E1000_WRITE_REG(hw, E1000_RDBAH(i), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr); + + srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + + /* + * Configure RX buffer size. + */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + if (buf_size >= 1024) { + /* + * Configure the BSIZEPACKET field of the SRRCTL + * register of the queue. + * Value is in 1 KB resolution, from 1 KB to 127 KB. + * If this field is equal to 0b, then RCTL.BSIZE + * determines the RX packet buffer size. + */ + srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) & + E1000_SRRCTL_BSIZEPKT_MASK); + buf_size = (uint16_t) ((srrctl & + E1000_SRRCTL_BSIZEPKT_MASK) << + E1000_SRRCTL_BSIZEPKT_SHIFT); + + /* It adds dual VLAN length for supporting dual VLAN */ + if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * VLAN_TAG_SIZE) > buf_size){ + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, + "forcing scatter mode"); + dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + } else { + /* + * Use BSIZE field of the device RCTL register. + */ + if ((rctl_bsize == 0) || (rctl_bsize > buf_size)) + rctl_bsize = buf_size; + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); + dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + + /* Set if packets are dropped when no descriptors available */ + if (rxq->drop_en) + srrctl |= E1000_SRRCTL_DROP_EN; + + E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); + + /* Enable this RX queue. */ + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + rxdctl &= 0xFFF00000; + rxdctl |= (rxq->pthresh & 0x1F); + rxdctl |= ((rxq->hthresh & 0x1F) << 8); + if (hw->mac.type == e1000_vfadapt) { + /* + * Workaround of 82576 VF Erratum + * force set WTHRESH to 1 + * to avoid Write-Back not triggered sometimes + */ + rxdctl |= 0x10000; + PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !"); + } + else + rxdctl |= ((rxq->wthresh & 0x1F) << 16); + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); + } + + if (dev->data->dev_conf.rxmode.enable_scatter) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); + dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers. + * This needs to be done after enable. + */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + E1000_WRITE_REG(hw, E1000_RDH(i), 0); + E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1); + } + + return 0; +} + +/********************************************************************* + * + * Enable VF transmit unit. + * + **********************************************************************/ +void +eth_igbvf_tx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct igb_tx_queue *txq; + uint32_t txdctl; + uint16_t i; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Setup the Base and Length of the Tx Descriptor Rings. */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + uint64_t bus_addr; + + txq = dev->data->tx_queues[i]; + bus_addr = txq->tx_ring_phys_addr; + E1000_WRITE_REG(hw, E1000_TDLEN(i), + txq->nb_tx_desc * + sizeof(union e1000_adv_tx_desc)); + E1000_WRITE_REG(hw, E1000_TDBAH(i), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr); + + /* Setup the HW Tx Head and Tail descriptor pointers. */ + E1000_WRITE_REG(hw, E1000_TDT(i), 0); + E1000_WRITE_REG(hw, E1000_TDH(i), 0); + + /* Setup Transmit threshold registers. */ + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i)); + txdctl |= txq->pthresh & 0x1F; + txdctl |= ((txq->hthresh & 0x1F) << 8); + if (hw->mac.type == e1000_82576) { + /* + * Workaround of 82576 VF Erratum + * force set WTHRESH to 1 + * to avoid Write-Back not triggered sometimes + */ + txdctl |= 0x10000; + PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !"); + } + else + txdctl |= ((txq->wthresh & 0x1F) << 16); + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); + } + +} + +void +igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct igb_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; +} + +void +igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct igb_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; +} diff --git a/drivers/net/e1000/rte_pmd_e1000_version.map b/drivers/net/e1000/rte_pmd_e1000_version.map new file mode 100644 index 00000000..ef353984 --- /dev/null +++ b/drivers/net/e1000/rte_pmd_e1000_version.map @@ -0,0 +1,4 @@ +DPDK_2.0 { + + local: *; +}; diff --git a/drivers/net/ena/Makefile b/drivers/net/ena/Makefile new file mode 100644 index 00000000..ac2b55dc --- /dev/null +++ b/drivers/net/ena/Makefile @@ -0,0 +1,61 @@ +# +# BSD LICENSE +# +# Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of copyright holder nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_ena.a +CFLAGS += $(WERROR_FLAGS) -O2 +INCLUDES :=-I$(SRCDIR) -I$(SRCDIR)/base/ena_defs -I$(SRCDIR)/base + +EXPORT_MAP := rte_pmd_ena_version.map +LIBABIVER := 1 + +VPATH += $(SRCDIR)/base +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_com.c +SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_eth_com.c + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_net lib/librte_malloc + +CFLAGS += $(INCLUDES) + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c new file mode 100644 index 00000000..a21a9513 --- /dev/null +++ b/drivers/net/ena/base/ena_com.c @@ -0,0 +1,2809 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include "ena_com.h" + +/*****************************************************************************/ +/*****************************************************************************/ + +/* Timeout in micro-sec */ +#define ADMIN_CMD_TIMEOUT_US (1000000) + +#define ENA_ASYNC_QUEUE_DEPTH 4 +#define ENA_ADMIN_QUEUE_DEPTH 32 + +#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF) +#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16) + +#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \ + | (ENA_COMMON_SPEC_VERSION_MINOR)) + +#define ENA_CTRL_MAJOR 0 +#define ENA_CTRL_MINOR 0 +#define ENA_CTRL_SUB_MINOR 1 + +#define MIN_ENA_CTRL_VER \ + (((ENA_CTRL_MAJOR) << \ + (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ + ((ENA_CTRL_MINOR) << \ + (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ + (ENA_CTRL_SUB_MINOR)) + +#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) +#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) + +#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF + +static int ena_alloc_cnt; + +/*****************************************************************************/ +/*****************************************************************************/ +/*****************************************************************************/ + +enum ena_cmd_status { + ENA_CMD_SUBMITTED, + ENA_CMD_COMPLETED, + /* Abort - canceled by the driver */ + ENA_CMD_ABORTED, +}; + +struct ena_comp_ctx { + ena_wait_event_t wait_event; + struct ena_admin_acq_entry *user_cqe; + u32 comp_size; + enum ena_cmd_status status; + /* status from the device */ + u8 comp_status; + u8 cmd_opcode; + bool occupied; +}; + +static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, + struct ena_common_mem_addr *ena_addr, + dma_addr_t addr) +{ + if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { + ena_trc_err("dma address has more bits that the device supports\n"); + return ENA_COM_INVAL; + } + + ena_addr->mem_addr_low = (u32)addr; + ena_addr->mem_addr_high = + ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 32)) >> 32); + + return 0; +} + +static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) +{ + ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, + ADMIN_SQ_SIZE(queue->q_depth), + queue->sq.entries, + queue->sq.dma_addr, + queue->sq.mem_handle); + + if (!queue->sq.entries) { + ena_trc_err("memory allocation failed"); + return ENA_COM_NO_MEM; + } + + queue->sq.head = 0; + queue->sq.tail = 0; + queue->sq.phase = 1; + + queue->sq.db_addr = NULL; + + return 0; +} + +static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) +{ + ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, + ADMIN_CQ_SIZE(queue->q_depth), + queue->cq.entries, + queue->cq.dma_addr, + queue->cq.mem_handle); + + if (!queue->cq.entries) { + ena_trc_err("memory allocation failed"); + return ENA_COM_NO_MEM; + } + + queue->cq.head = 0; + queue->cq.phase = 1; + + return 0; +} + +static int ena_com_admin_init_aenq(struct ena_com_dev *dev, + struct ena_aenq_handlers *aenq_handlers) +{ + u32 addr_low, addr_high, aenq_caps; + + dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; + ENA_MEM_ALLOC_COHERENT(dev->dmadev, + ADMIN_AENQ_SIZE(dev->aenq.q_depth), + dev->aenq.entries, + dev->aenq.dma_addr, + dev->aenq.mem_handle); + + if (!dev->aenq.entries) { + ena_trc_err("memory allocation failed"); + return ENA_COM_NO_MEM; + } + + dev->aenq.head = dev->aenq.q_depth; + dev->aenq.phase = 1; + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(dev->aenq.dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(dev->aenq.dma_addr); + + ENA_REG_WRITE32(addr_low, (unsigned char *)dev->reg_bar + + ENA_REGS_AENQ_BASE_LO_OFF); + ENA_REG_WRITE32(addr_high, (unsigned char *)dev->reg_bar + + ENA_REGS_AENQ_BASE_HI_OFF); + + aenq_caps = 0; + aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; + aenq_caps |= (sizeof(struct ena_admin_aenq_entry) << + ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & + ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; + + ENA_REG_WRITE32(aenq_caps, (unsigned char *)dev->reg_bar + + ENA_REGS_AENQ_CAPS_OFF); + + if (unlikely(!aenq_handlers)) + ena_trc_err("aenq handlers pointer is NULL\n"); + + dev->aenq.aenq_handlers = aenq_handlers; + + return 0; +} + +static inline void comp_ctxt_release(struct ena_com_admin_queue *queue, + struct ena_comp_ctx *comp_ctx) +{ + comp_ctx->occupied = false; + ATOMIC32_DEC(&queue->outstanding_cmds); +} + +static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, + u16 command_id, bool capture) +{ + ENA_ASSERT(command_id < queue->q_depth, + "command id is larger than the queue size. cmd_id: %u queue size %d\n", + command_id, queue->q_depth); + + ENA_ASSERT(!(queue->comp_ctx[command_id].occupied && capture), + "Completion context is occupied"); + + if (capture) { + ATOMIC32_INC(&queue->outstanding_cmds); + queue->comp_ctx[command_id].occupied = true; + } + + return &queue->comp_ctx[command_id]; +} + +static struct ena_comp_ctx * +__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size_in_bytes, + struct ena_admin_acq_entry *comp, + size_t comp_size_in_bytes) +{ + struct ena_comp_ctx *comp_ctx; + u16 tail_masked, cmd_id; + u16 queue_size_mask; + u16 cnt; + + queue_size_mask = admin_queue->q_depth - 1; + + tail_masked = admin_queue->sq.tail & queue_size_mask; + + /* In case of queue FULL */ + cnt = admin_queue->sq.tail - admin_queue->sq.head; + if (cnt >= admin_queue->q_depth) { + ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n", + admin_queue->sq.tail, + admin_queue->sq.head, + admin_queue->q_depth); + admin_queue->stats.out_of_space++; + return ERR_PTR(ENA_COM_NO_SPACE); + } + + cmd_id = admin_queue->curr_cmd_id; + + cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & + ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; + + cmd->aq_common_descriptor.command_id |= cmd_id & + ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; + + comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true); + + comp_ctx->status = ENA_CMD_SUBMITTED; + comp_ctx->comp_size = (u32)comp_size_in_bytes; + comp_ctx->user_cqe = comp; + comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; + + ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event); + + memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); + + admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & + queue_size_mask; + + admin_queue->sq.tail++; + admin_queue->stats.submitted_cmd++; + + if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) + admin_queue->sq.phase = !admin_queue->sq.phase; + + ENA_REG_WRITE32(admin_queue->sq.tail, admin_queue->sq.db_addr); + + return comp_ctx; +} + +static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) +{ + size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); + struct ena_comp_ctx *comp_ctx; + u16 i; + + queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size); + if (unlikely(!queue->comp_ctx)) { + ena_trc_err("memory allocation failed"); + return ENA_COM_NO_MEM; + } + + for (i = 0; i < queue->q_depth; i++) { + comp_ctx = get_comp_ctxt(queue, i, false); + ENA_WAIT_EVENT_INIT(comp_ctx->wait_event); + } + + return 0; +} + +static struct ena_comp_ctx * +ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size_in_bytes, + struct ena_admin_acq_entry *comp, + size_t comp_size_in_bytes) +{ + unsigned long flags = 0; + struct ena_comp_ctx *comp_ctx; + + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + if (unlikely(!admin_queue->running_state)) { + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + return ERR_PTR(ENA_COM_NO_DEVICE); + } + comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd, + cmd_size_in_bytes, + comp, + comp_size_in_bytes); + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + + return comp_ctx; +} + +static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, + struct ena_com_io_sq *io_sq) +{ + size_t size; + + memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr)); + + io_sq->desc_entry_size = + (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? + sizeof(struct ena_eth_io_tx_desc) : + sizeof(struct ena_eth_io_rx_desc); + + size = io_sq->desc_entry_size * io_sq->q_depth; + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + size, + io_sq->desc_addr.virt_addr, + io_sq->desc_addr.phys_addr, + io_sq->desc_addr.mem_handle); + else + io_sq->desc_addr.virt_addr = + ENA_MEM_ALLOC(ena_dev->dmadev, size); + + if (!io_sq->desc_addr.virt_addr) { + ena_trc_err("memory allocation failed"); + return ENA_COM_NO_MEM; + } + + io_sq->tail = 0; + io_sq->next_to_comp = 0; + io_sq->phase = 1; + + return 0; +} + +static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq) +{ + size_t size; + + memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr)); + + /* Use the basic completion descriptor for Rx */ + io_cq->cdesc_entry_size_in_bytes = + (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? + sizeof(struct ena_eth_io_tx_cdesc) : + sizeof(struct ena_eth_io_rx_cdesc_base); + + size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; + + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + size, + io_cq->cdesc_addr.virt_addr, + io_cq->cdesc_addr.phys_addr, + io_cq->cdesc_addr.mem_handle); + + if (!io_cq->cdesc_addr.virt_addr) { + ena_trc_err("memory allocation failed"); + return ENA_COM_NO_MEM; + } + + io_cq->phase = 1; + io_cq->head = 0; + + return 0; +} + +static void +ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, + struct ena_admin_acq_entry *cqe) +{ + struct ena_comp_ctx *comp_ctx; + u16 cmd_id; + + cmd_id = cqe->acq_common_descriptor.command & + ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; + + comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); + + comp_ctx->status = ENA_CMD_COMPLETED; + comp_ctx->comp_status = cqe->acq_common_descriptor.status; + + if (comp_ctx->user_cqe) + memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); + + if (!admin_queue->polling) + ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event); +} + +static void +ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) +{ + struct ena_admin_acq_entry *cqe = NULL; + u16 comp_num = 0; + u16 head_masked; + u8 phase; + + head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); + phase = admin_queue->cq.phase; + + cqe = &admin_queue->cq.entries[head_masked]; + + /* Go over all the completions */ + while ((cqe->acq_common_descriptor.flags & + ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { + /* Do not read the rest of the completion entry before the + * phase bit was validated + */ + rmb(); + ena_com_handle_single_admin_completion(admin_queue, cqe); + + head_masked++; + comp_num++; + if (unlikely(head_masked == admin_queue->q_depth)) { + head_masked = 0; + phase = !phase; + } + + cqe = &admin_queue->cq.entries[head_masked]; + } + + admin_queue->cq.head += comp_num; + admin_queue->cq.phase = phase; + admin_queue->sq.head += comp_num; + admin_queue->stats.completed_cmd += comp_num; +} + +static int ena_com_comp_status_to_errno(u8 comp_status) +{ + if (unlikely(comp_status != 0)) + ena_trc_err("admin command failed[%u]\n", comp_status); + + if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR)) + return ENA_COM_INVAL; + + switch (comp_status) { + case ENA_ADMIN_SUCCESS: + return 0; + case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: + return ENA_COM_NO_MEM; + case ENA_ADMIN_UNSUPPORTED_OPCODE: + return ENA_COM_PERMISSION; + case ENA_ADMIN_BAD_OPCODE: + case ENA_ADMIN_MALFORMED_REQUEST: + case ENA_ADMIN_ILLEGAL_PARAMETER: + case ENA_ADMIN_UNKNOWN_ERROR: + return ENA_COM_INVAL; + } + + return 0; +} + +static int +ena_com_wait_and_process_admin_cq_polling( + struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) +{ + unsigned long flags = 0; + u64 start_time; + int ret; + + start_time = ENA_GET_SYSTEM_USECS(); + + while (comp_ctx->status == ENA_CMD_SUBMITTED) { + if ((ENA_GET_SYSTEM_USECS() - start_time) > + ADMIN_CMD_TIMEOUT_US) { + ena_trc_err("Wait for completion (polling) timeout\n"); + /* ENA didn't have any completion */ + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + admin_queue->stats.no_completion++; + admin_queue->running_state = false; + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + + ret = ENA_COM_TIMER_EXPIRED; + goto err; + } + + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + ena_com_handle_admin_completion(admin_queue); + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + } + + if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { + ena_trc_err("Command was aborted\n"); + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + admin_queue->stats.aborted_cmd++; + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + ret = ENA_COM_NO_DEVICE; + goto err; + } + + ENA_ASSERT(comp_ctx->status == ENA_CMD_COMPLETED, + "Invalid comp status %d\n", comp_ctx->status); + + ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); +err: + comp_ctxt_release(admin_queue, comp_ctx); + return ret; +} + +static int +ena_com_wait_and_process_admin_cq_interrupts( + struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) +{ + unsigned long flags = 0; + int ret = 0; + + ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event, + ADMIN_CMD_TIMEOUT_US); + + /* In case the command wasn't completed find out the root cause. + * There might be 2 kinds of errors + * 1) No completion (timeout reached) + * 2) There is completion but the device didn't get any msi-x interrupt. + */ + if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + ena_com_handle_admin_completion(admin_queue); + admin_queue->stats.no_completion++; + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + + if (comp_ctx->status == ENA_CMD_COMPLETED) + ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n", + comp_ctx->cmd_opcode); + else + ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n", + comp_ctx->cmd_opcode, comp_ctx->status); + + admin_queue->running_state = false; + ret = ENA_COM_TIMER_EXPIRED; + goto err; + } + + ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); +err: + comp_ctxt_release(admin_queue, comp_ctx); + return ret; +} + +/* This method read the hardware device register through posting writes + * and waiting for response + * On timeout the function will return ENA_MMIO_READ_TIMEOUT + */ +static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = + mmio_read->read_resp; + u32 mmio_read_reg, ret; + unsigned long flags = 0; + int i; + + ENA_MIGHT_SLEEP(); + + /* If readless is disabled, perform regular read */ + if (!mmio_read->readless_supported) + return ENA_REG_READ32((unsigned char *)ena_dev->reg_bar + + offset); + + ENA_SPINLOCK_LOCK(mmio_read->lock, flags); + mmio_read->seq_num++; + + read_resp->req_id = mmio_read->seq_num + 0xDEAD; + mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & + ENA_REGS_MMIO_REG_READ_REG_OFF_MASK; + mmio_read_reg |= mmio_read->seq_num & + ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; + + /* make sure read_resp->req_id get updated before the hw can write + * there + */ + wmb(); + + ENA_REG_WRITE32(mmio_read_reg, (unsigned char *)ena_dev->reg_bar + + ENA_REGS_MMIO_REG_READ_OFF); + + for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) { + if (read_resp->req_id == mmio_read->seq_num) + break; + + ENA_UDELAY(1); + } + + if (unlikely(i == ENA_REG_READ_TIMEOUT)) { + ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", + mmio_read->seq_num, + offset, + read_resp->req_id, + read_resp->reg_off); + ret = ENA_MMIO_READ_TIMEOUT; + goto err; + } + + ENA_ASSERT(read_resp->reg_off == offset, + "Invalid MMIO read return value"); + + ret = read_resp->reg_val; +err: + ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags); + + return ret; +} + +/* There are two types to wait for completion. + * Polling mode - wait until the completion is available. + * Async mode - wait on wait queue until the completion is ready + * (or the timeout expired). + * It is expected that the IRQ called ena_com_handle_admin_completion + * to mark the completions. + */ +static int +ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) +{ + if (admin_queue->polling) + return ena_com_wait_and_process_admin_cq_polling(comp_ctx, + admin_queue); + + return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx, + admin_queue); +} + +static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, + struct ena_com_io_sq *io_sq) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_destroy_sq_cmd destroy_cmd; + struct ena_admin_acq_destroy_sq_resp_desc destroy_resp; + u8 direction; + int ret; + + memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd)); + + if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) + direction = ENA_ADMIN_SQ_DIRECTION_TX; + else + direction = ENA_ADMIN_SQ_DIRECTION_RX; + + destroy_cmd.sq.sq_identity |= (direction << + ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & + ENA_ADMIN_SQ_SQ_DIRECTION_MASK; + + destroy_cmd.sq.sq_idx = io_sq->idx; + destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; + + ret = ena_com_execute_admin_command( + admin_queue, + (struct ena_admin_aq_entry *)&destroy_cmd, + sizeof(destroy_cmd), + (struct ena_admin_acq_entry *)&destroy_resp, + sizeof(destroy_resp)); + + if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) + ena_trc_err("failed to destroy io sq error: %d\n", ret); + + return ret; +} + +static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, + struct ena_com_io_sq *io_sq, + struct ena_com_io_cq *io_cq) +{ + size_t size; + + if (io_cq->cdesc_addr.virt_addr) { + size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; + + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + size, + io_cq->cdesc_addr.virt_addr, + io_cq->cdesc_addr.phys_addr, + io_cq->cdesc_addr.mem_handle); + + io_cq->cdesc_addr.virt_addr = NULL; + } + + if (io_sq->desc_addr.virt_addr) { + size = io_sq->desc_entry_size * io_sq->q_depth; + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + size, + io_sq->desc_addr.virt_addr, + io_sq->desc_addr.phys_addr, + io_sq->desc_addr.mem_handle); + else + ENA_MEM_FREE(ena_dev->dmadev, + io_sq->desc_addr.virt_addr); + + io_sq->desc_addr.virt_addr = NULL; + } +} + +static int wait_for_reset_state(struct ena_com_dev *ena_dev, + u32 timeout, u16 exp_state) +{ + u32 val, i; + + for (i = 0; i < timeout; i++) { + val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); + + if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { + ena_trc_err("Reg read timeout occurred\n"); + return ENA_COM_TIMER_EXPIRED; + } + + if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == + exp_state) + return 0; + + /* The resolution of the timeout is 100ms */ + ENA_MSLEEP(100); + } + + return ENA_COM_TIMER_EXPIRED; +} + +static bool +ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, + enum ena_admin_aq_feature_id feature_id) +{ + u32 feature_mask = 1 << feature_id; + + /* Device attributes is always supported */ + if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) && + !(ena_dev->supported_features & feature_mask)) + return false; + + return true; +} + +static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *get_resp, + enum ena_admin_aq_feature_id feature_id, + dma_addr_t control_buf_dma_addr, + u32 control_buff_size) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_get_feat_cmd get_cmd; + int ret; + + if (!ena_dev) { + ena_trc_err("%s : ena_dev is NULL\n", __func__); + return ENA_COM_NO_DEVICE; + } + + if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { + ena_trc_info("Feature %d isn't supported\n", feature_id); + return ENA_COM_PERMISSION; + } + + memset(&get_cmd, 0x0, sizeof(get_cmd)); + admin_queue = &ena_dev->admin_queue; + + get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE; + + if (control_buff_size) + get_cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + else + get_cmd.aq_common_descriptor.flags = 0; + + ret = ena_com_mem_addr_set(ena_dev, + &get_cmd.control_buffer.address, + control_buf_dma_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + + get_cmd.control_buffer.length = control_buff_size; + + get_cmd.feat_common.feature_id = feature_id; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *) + &get_cmd, + sizeof(get_cmd), + (struct ena_admin_acq_entry *) + get_resp, + sizeof(*get_resp)); + + if (unlikely(ret)) + ena_trc_err("Failed to submit get_feature command %d error: %d\n", + feature_id, ret); + + return ret; +} + +static int ena_com_get_feature(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *get_resp, + enum ena_admin_aq_feature_id feature_id) +{ + return ena_com_get_feature_ex(ena_dev, + get_resp, + feature_id, + 0, + 0); +} + +static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + sizeof(*rss->hash_key), + rss->hash_key, + rss->hash_key_dma_addr, + rss->hash_key_mem_handle); + + if (unlikely(!rss->hash_key)) + return ENA_COM_NO_MEM; + + return 0; +} + +static int ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + if (rss->hash_key) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + sizeof(*rss->hash_key), + rss->hash_key, + rss->hash_key_dma_addr, + rss->hash_key_mem_handle); + rss->hash_key = NULL; + return 0; +} + +static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + sizeof(*rss->hash_ctrl), + rss->hash_ctrl, + rss->hash_ctrl_dma_addr, + rss->hash_ctrl_mem_handle); + + return 0; +} + +static int ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + if (rss->hash_ctrl) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + sizeof(*rss->hash_ctrl), + rss->hash_ctrl, + rss->hash_ctrl_dma_addr, + rss->hash_ctrl_mem_handle); + rss->hash_ctrl = NULL; + + return 0; +} + +static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, + u16 log_size) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + size_t tbl_size; + int ret; + + ret = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); + if (unlikely(ret)) + return ret; + + if ((get_resp.u.ind_table.min_size > log_size) || + (get_resp.u.ind_table.max_size < log_size)) { + ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", + 1 << log_size, + 1 << get_resp.u.ind_table.min_size, + 1 << get_resp.u.ind_table.max_size); + return ENA_COM_INVAL; + } + + tbl_size = (1 << log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + tbl_size, + rss->rss_ind_tbl, + rss->rss_ind_tbl_dma_addr, + rss->rss_ind_tbl_mem_handle); + if (unlikely(!rss->rss_ind_tbl)) + goto mem_err1; + + tbl_size = (1 << log_size) * sizeof(u16); + rss->host_rss_ind_tbl = + ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size); + if (unlikely(!rss->host_rss_ind_tbl)) + goto mem_err2; + + rss->tbl_log_size = log_size; + + return 0; + +mem_err2: + tbl_size = (1 << log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + tbl_size, + rss->rss_ind_tbl, + rss->rss_ind_tbl_dma_addr, + rss->rss_ind_tbl_mem_handle); + rss->rss_ind_tbl = NULL; +mem_err1: + rss->tbl_log_size = 0; + return ENA_COM_NO_MEM; +} + +static int ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + size_t tbl_size = (1 << rss->tbl_log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + if (rss->rss_ind_tbl) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + tbl_size, + rss->rss_ind_tbl, + rss->rss_ind_tbl_dma_addr, + rss->rss_ind_tbl_mem_handle); + rss->rss_ind_tbl = NULL; + + if (rss->host_rss_ind_tbl) + ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl); + rss->host_rss_ind_tbl = NULL; + + return 0; +} + +static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, + struct ena_com_io_sq *io_sq, u16 cq_idx) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_create_sq_cmd create_cmd; + struct ena_admin_acq_create_sq_resp_desc cmd_completion; + u8 direction; + int ret; + + memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd)); + + create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; + + if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) + direction = ENA_ADMIN_SQ_DIRECTION_TX; + else + direction = ENA_ADMIN_SQ_DIRECTION_RX; + + create_cmd.sq_identity |= (direction << + ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & + ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; + + create_cmd.sq_caps_2 |= io_sq->mem_queue_type & + ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; + + create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC << + ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & + ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; + + create_cmd.sq_caps_3 |= + ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; + + create_cmd.cq_idx = cq_idx; + create_cmd.sq_depth = io_sq->q_depth; + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { + ret = ena_com_mem_addr_set(ena_dev, + &create_cmd.sq_ba, + io_sq->desc_addr.phys_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + } + + ret = ena_com_execute_admin_command( + admin_queue, + (struct ena_admin_aq_entry *)&create_cmd, + sizeof(create_cmd), + (struct ena_admin_acq_entry *)&cmd_completion, + sizeof(cmd_completion)); + if (unlikely(ret)) { + ena_trc_err("Failed to create IO SQ. error: %d\n", ret); + return ret; + } + + io_sq->idx = cmd_completion.sq_idx; + + io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + (uintptr_t)cmd_completion.sq_doorbell_offset); + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + + cmd_completion.llq_headers_offset); + + io_sq->desc_addr.pbuf_dev_addr = + (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + + cmd_completion.llq_descriptors_offset); + } + + ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); + + return ret; +} + +static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_com_io_sq *io_sq; + u16 qid; + int i; + + for (i = 0; i < 1 << rss->tbl_log_size; i++) { + qid = rss->host_rss_ind_tbl[i]; + if (qid >= ENA_TOTAL_NUM_QUEUES) + return ENA_COM_INVAL; + + io_sq = &ena_dev->io_sq_queues[qid]; + + if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) + return ENA_COM_INVAL; + + rss->rss_ind_tbl[i].cq_idx = io_sq->idx; + } + + return 0; +} + +static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) +{ + u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { -1 }; + struct ena_rss *rss = &ena_dev->rss; + u16 idx, i; + + for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++) + dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i; + + for (i = 0; i < 1 << rss->tbl_log_size; i++) { + idx = rss->rss_ind_tbl[i].cq_idx; + if (idx > ENA_TOTAL_NUM_QUEUES) + return ENA_COM_INVAL; + + if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES) + return ENA_COM_INVAL; + + rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx]; + } + + return 0; +} + +static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev) +{ + size_t size; + + size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS; + + ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size); + if (!ena_dev->intr_moder_tbl) + return ENA_COM_NO_MEM; + + ena_com_config_default_interrupt_moderation_table(ena_dev); + + return 0; +} + +static void +ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, + unsigned int intr_delay_resolution) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + unsigned int i; + + if (!intr_delay_resolution) { + ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); + intr_delay_resolution = 1; + } + ena_dev->intr_delay_resolution = intr_delay_resolution; + + /* update Rx */ + for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++) + intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution; + + /* update Tx */ + ena_dev->intr_moder_tx_interval /= intr_delay_resolution; +} + +/*****************************************************************************/ +/******************************* API ******************************/ +/*****************************************************************************/ + +int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size, + struct ena_admin_acq_entry *comp, + size_t comp_size) +{ + struct ena_comp_ctx *comp_ctx; + int ret = 0; + + comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, + comp, comp_size); + if (unlikely(IS_ERR(comp_ctx))) { + ena_trc_err("Failed to submit command [%ld]\n", + PTR_ERR(comp_ctx)); + return PTR_ERR(comp_ctx); + } + + ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); + if (unlikely(ret)) { + if (admin_queue->running_state) + ena_trc_err("Failed to process command. ret = %d\n", + ret); + else + ena_trc_dbg("Failed to process command. ret = %d\n", + ret); + } + return ret; +} + +int ena_com_create_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_create_cq_cmd create_cmd; + struct ena_admin_acq_create_cq_resp_desc cmd_completion; + int ret; + + memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd)); + + create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; + + create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & + ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; + create_cmd.cq_caps_1 |= + ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; + + create_cmd.msix_vector = io_cq->msix_vector; + create_cmd.cq_depth = io_cq->q_depth; + + ret = ena_com_mem_addr_set(ena_dev, + &create_cmd.cq_ba, + io_cq->cdesc_addr.phys_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + + ret = ena_com_execute_admin_command( + admin_queue, + (struct ena_admin_aq_entry *)&create_cmd, + sizeof(create_cmd), + (struct ena_admin_acq_entry *)&cmd_completion, + sizeof(cmd_completion)); + if (unlikely(ret)) { + ena_trc_err("Failed to create IO CQ. error: %d\n", ret); + return ret; + } + + io_cq->idx = cmd_completion.cq_idx; + io_cq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + cmd_completion.cq_doorbell_offset); + + if (io_cq->q_depth != cmd_completion.cq_actual_depth) { + ena_trc_err("completion actual queue size (%d) is differ from requested size (%d)\n", + cmd_completion.cq_actual_depth, io_cq->q_depth); + ena_com_destroy_io_cq(ena_dev, io_cq); + return ENA_COM_NO_SPACE; + } + + io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + cmd_completion.cq_interrupt_unmask_register); + + if (cmd_completion.cq_head_db_offset) + io_cq->cq_head_db_reg = + (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + cmd_completion.cq_head_db_offset); + + ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); + + return ret; +} + +int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, + struct ena_com_io_sq **io_sq, + struct ena_com_io_cq **io_cq) +{ + if (qid >= ENA_TOTAL_NUM_QUEUES) { + ena_trc_err("Invalid queue number %d but the max is %d\n", + qid, ENA_TOTAL_NUM_QUEUES); + return ENA_COM_INVAL; + } + + *io_sq = &ena_dev->io_sq_queues[qid]; + *io_cq = &ena_dev->io_cq_queues[qid]; + + return 0; +} + +void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_comp_ctx *comp_ctx; + u16 i; + + if (!admin_queue->comp_ctx) + return; + + for (i = 0; i < admin_queue->q_depth; i++) { + comp_ctx = get_comp_ctxt(admin_queue, i, false); + comp_ctx->status = ENA_CMD_ABORTED; + + ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event); + } +} + +void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + unsigned long flags = 0; + + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) { + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); + ENA_MSLEEP(20); + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + } + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); +} + +int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_destroy_cq_cmd destroy_cmd; + struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; + int ret; + + memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd)); + + destroy_cmd.cq_idx = io_cq->idx; + destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; + + ret = ena_com_execute_admin_command( + admin_queue, + (struct ena_admin_aq_entry *)&destroy_cmd, + sizeof(destroy_cmd), + (struct ena_admin_acq_entry *)&destroy_resp, + sizeof(destroy_resp)); + + if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) + ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret); + + return ret; +} + +bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) +{ + return ena_dev->admin_queue.running_state; +} + +void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + unsigned long flags = 0; + + ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); + ena_dev->admin_queue.running_state = state; + ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); +} + +void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) +{ + u16 depth = ena_dev->aenq.q_depth; + + ENA_ASSERT(ena_dev->aenq.head == depth, "Invliad AENQ state\n"); + + /* Init head_db to mark that all entries in the queue + * are initially available + */ + ENA_REG_WRITE32(depth, (unsigned char *)ena_dev->reg_bar + + ENA_REGS_AENQ_HEAD_DB_OFF); +} + +int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + struct ena_admin_get_feat_resp get_resp; + int ret = 0; + + if (unlikely(!ena_dev)) { + ena_trc_err("%s : ena_dev is NULL\n", __func__); + return ENA_COM_NO_DEVICE; + } + + ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG); + if (ret) { + ena_trc_info("Can't get aenq configuration\n"); + return ret; + } + + if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { + ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n", + get_resp.u.aenq.supported_groups, + groups_flag); + return ENA_COM_PERMISSION; + } + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = 0; + cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG; + cmd.u.aenq.enabled_groups = groups_flag; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + ena_trc_err("Failed to config AENQ ret: %d\n", ret); + + return ret; +} + +int ena_com_get_dma_width(struct ena_com_dev *ena_dev) +{ + u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); + int width; + + if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { + ena_trc_err("Reg read timeout occurred\n"); + return ENA_COM_TIMER_EXPIRED; + } + + width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> + ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; + + ena_trc_dbg("ENA dma width: %d\n", width); + + if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { + ena_trc_err("DMA width illegal value: %d\n", width); + return ENA_COM_INVAL; + } + + ena_dev->dma_addr_bits = width; + + return width; +} + +int ena_com_validate_version(struct ena_com_dev *ena_dev) +{ + u32 ver; + u32 ctrl_ver; + u32 ctrl_ver_masked; + + /* Make sure the ENA version and the controller version are at least + * as the driver expects + */ + ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF); + ctrl_ver = ena_com_reg_bar_read32(ena_dev, + ENA_REGS_CONTROLLER_VERSION_OFF); + + if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || + (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { + ena_trc_err("Reg read timeout occurred\n"); + return ENA_COM_TIMER_EXPIRED; + } + + ena_trc_info("ena device version: %d.%d\n", + (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, + ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); + + if (ver < MIN_ENA_VER) { + ena_trc_err("ENA version is lower than the minimal version the driver supports\n"); + return -1; + } + + ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n", + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) + >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) + >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> + ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); + + ctrl_ver_masked = + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); + + /* Validate the ctrl version without the implementation ID */ + if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { + ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); + return -1; + } + + return 0; +} + +void ena_com_admin_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + + if (!admin_queue) + return; + + if (admin_queue->comp_ctx) + ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx); + admin_queue->comp_ctx = NULL; + + if (admin_queue->sq.entries) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + ADMIN_SQ_SIZE(admin_queue->q_depth), + admin_queue->sq.entries, + admin_queue->sq.dma_addr, + admin_queue->sq.mem_handle); + admin_queue->sq.entries = NULL; + + if (admin_queue->cq.entries) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + ADMIN_CQ_SIZE(admin_queue->q_depth), + admin_queue->cq.entries, + admin_queue->cq.dma_addr, + admin_queue->cq.mem_handle); + admin_queue->cq.entries = NULL; + + if (ena_dev->aenq.entries) + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + ADMIN_AENQ_SIZE(ena_dev->aenq.q_depth), + ena_dev->aenq.entries, + ena_dev->aenq.dma_addr, + ena_dev->aenq.mem_handle); + ena_dev->aenq.entries = NULL; +} + +void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) +{ + ena_dev->admin_queue.polling = polling; +} + +int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + + ENA_SPINLOCK_INIT(mmio_read->lock); + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + sizeof(*mmio_read->read_resp), + mmio_read->read_resp, + mmio_read->read_resp_dma_addr, + mmio_read->read_resp_mem_handle); + if (unlikely(!mmio_read->read_resp)) + return ENA_COM_NO_MEM; + + ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); + + mmio_read->read_resp->req_id = 0x0; + mmio_read->seq_num = 0x0; + mmio_read->readless_supported = true; + + return 0; +} + +void +ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + + mmio_read->readless_supported = readless_supported; +} + +void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + + ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar + + ENA_REGS_MMIO_RESP_LO_OFF); + ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar + + ENA_REGS_MMIO_RESP_HI_OFF); + + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + sizeof(*mmio_read->read_resp), + mmio_read->read_resp, + mmio_read->read_resp_dma_addr, + mmio_read->read_resp_mem_handle); + + mmio_read->read_resp = NULL; +} + +void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + u32 addr_low, addr_high; + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); + + ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar + + ENA_REGS_MMIO_RESP_LO_OFF); + ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar + + ENA_REGS_MMIO_RESP_HI_OFF); +} + +int ena_com_admin_init(struct ena_com_dev *ena_dev, + struct ena_aenq_handlers *aenq_handlers, + bool init_spinlock) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; + int ret; + + dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); + + if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { + ena_trc_err("Reg read timeout occurred\n"); + return ENA_COM_TIMER_EXPIRED; + } + + if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { + ena_trc_err("Device isn't ready, abort com init\n"); + return -1; + } + + admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; + + admin_queue->q_dmadev = ena_dev->dmadev; + admin_queue->polling = false; + admin_queue->curr_cmd_id = 0; + + ATOMIC32_SET(&admin_queue->outstanding_cmds, 0); + + if (init_spinlock) + ENA_SPINLOCK_INIT(admin_queue->q_lock); + + ret = ena_com_init_comp_ctxt(admin_queue); + if (ret) + goto error; + + ret = ena_com_admin_init_sq(admin_queue); + if (ret) + goto error; + + ret = ena_com_admin_init_cq(admin_queue); + if (ret) + goto error; + + admin_queue->sq.db_addr = (u32 __iomem *) + ((unsigned char *)ena_dev->reg_bar + ENA_REGS_AQ_DB_OFF); + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); + + ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar + + ENA_REGS_AQ_BASE_LO_OFF); + ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar + + ENA_REGS_AQ_BASE_HI_OFF); + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); + + ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar + + ENA_REGS_ACQ_BASE_LO_OFF); + ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar + + ENA_REGS_ACQ_BASE_HI_OFF); + + aq_caps = 0; + aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; + aq_caps |= (sizeof(struct ena_admin_aq_entry) << + ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & + ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; + + acq_caps = 0; + acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; + acq_caps |= (sizeof(struct ena_admin_acq_entry) << + ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & + ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; + + ENA_REG_WRITE32(aq_caps, (unsigned char *)ena_dev->reg_bar + + ENA_REGS_AQ_CAPS_OFF); + ENA_REG_WRITE32(acq_caps, (unsigned char *)ena_dev->reg_bar + + ENA_REGS_ACQ_CAPS_OFF); + ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); + if (ret) + goto error; + + admin_queue->running_state = true; + + return 0; +error: + ena_com_admin_destroy(ena_dev); + + return ret; +} + +int ena_com_create_io_queue(struct ena_com_dev *ena_dev, + u16 qid, + enum queue_direction direction, + enum ena_admin_placement_policy_type mem_queue_type, + u32 msix_vector, + u16 queue_size) +{ + struct ena_com_io_sq *io_sq; + struct ena_com_io_cq *io_cq; + int ret = 0; + + if (qid >= ENA_TOTAL_NUM_QUEUES) { + ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n", + qid, ENA_TOTAL_NUM_QUEUES); + return ENA_COM_INVAL; + } + + io_sq = &ena_dev->io_sq_queues[qid]; + io_cq = &ena_dev->io_cq_queues[qid]; + + memset(io_sq, 0x0, sizeof(struct ena_com_io_sq)); + memset(io_cq, 0x0, sizeof(struct ena_com_io_cq)); + + /* Init CQ */ + io_cq->q_depth = queue_size; + io_cq->direction = direction; + io_cq->qid = qid; + + io_cq->msix_vector = msix_vector; + + io_sq->q_depth = queue_size; + io_sq->direction = direction; + io_sq->qid = qid; + + io_sq->mem_queue_type = mem_queue_type; + + if (direction == ENA_COM_IO_QUEUE_DIRECTION_TX) + /* header length is limited to 8 bits */ + io_sq->tx_max_header_size = + ENA_MIN16(ena_dev->tx_max_header_size, SZ_256); + + ret = ena_com_init_io_sq(ena_dev, io_sq); + if (ret) + goto error; + ret = ena_com_init_io_cq(ena_dev, io_cq); + if (ret) + goto error; + + ret = ena_com_create_io_cq(ena_dev, io_cq); + if (ret) + goto error; + + ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); + if (ret) + goto destroy_io_cq; + + return 0; + +destroy_io_cq: + ena_com_destroy_io_cq(ena_dev, io_cq); +error: + ena_com_io_queue_free(ena_dev, io_sq, io_cq); + return ret; +} + +void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) +{ + struct ena_com_io_sq *io_sq; + struct ena_com_io_cq *io_cq; + + if (qid >= ENA_TOTAL_NUM_QUEUES) { + ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n", + qid, ENA_TOTAL_NUM_QUEUES); + return; + } + + io_sq = &ena_dev->io_sq_queues[qid]; + io_cq = &ena_dev->io_cq_queues[qid]; + + ena_com_destroy_io_sq(ena_dev, io_sq); + ena_com_destroy_io_cq(ena_dev, io_cq); + + ena_com_io_queue_free(ena_dev, io_sq, io_cq); +} + +int ena_com_get_link_params(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *resp) +{ + return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG); +} + +int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + struct ena_admin_get_feat_resp get_resp; + int rc; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_DEVICE_ATTRIBUTES); + if (rc) + return rc; + + memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, + sizeof(get_resp.u.dev_attr)); + ena_dev->supported_features = get_resp.u.dev_attr.supported_features; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_MAX_QUEUES_NUM); + if (rc) + return rc; + + memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, + sizeof(get_resp.u.max_queue)); + ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_AENQ_CONFIG); + if (rc) + return rc; + + memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, + sizeof(get_resp.u.aenq)); + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); + if (rc) + return rc; + + memcpy(&get_feat_ctx->offload, &get_resp.u.offload, + sizeof(get_resp.u.offload)); + + return 0; +} + +void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) +{ + ena_com_handle_admin_completion(&ena_dev->admin_queue); +} + +/* ena_handle_specific_aenq_event: + * return the handler that is relevant to the specific event group + */ +static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev, + u16 group) +{ + struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers; + + if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) + return aenq_handlers->handlers[group]; + + return aenq_handlers->unimplemented_handler; +} + +/* ena_aenq_intr_handler: + * handles the aenq incoming events. + * pop events from the queue and apply the specific handler + */ +void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) +{ + struct ena_admin_aenq_entry *aenq_e; + struct ena_admin_aenq_common_desc *aenq_common; + struct ena_com_aenq *aenq = &dev->aenq; + ena_aenq_handler handler_cb; + u16 masked_head, processed = 0; + u8 phase; + + masked_head = aenq->head & (aenq->q_depth - 1); + phase = aenq->phase; + aenq_e = &aenq->entries[masked_head]; /* Get first entry */ + aenq_common = &aenq_e->aenq_common_desc; + + /* Go over all the events */ + while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == + phase) { + ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", + aenq_common->group, + aenq_common->syndrom, + (unsigned long long)aenq_common->timestamp_low + + ((u64)aenq_common->timestamp_high << 32)); + + /* Handle specific event*/ + handler_cb = ena_com_get_specific_aenq_cb(dev, + aenq_common->group); + handler_cb(data, aenq_e); /* call the actual event handler*/ + + /* Get next event entry */ + masked_head++; + processed++; + + if (unlikely(masked_head == aenq->q_depth)) { + masked_head = 0; + phase = !phase; + } + aenq_e = &aenq->entries[masked_head]; + aenq_common = &aenq_e->aenq_common_desc; + } + + aenq->head += processed; + aenq->phase = phase; + + /* Don't update aenq doorbell if there weren't any processed events */ + if (!processed) + return; + + /* write the aenq doorbell after all AENQ descriptors were read */ + mb(); + ENA_REG_WRITE32((u32)aenq->head, (unsigned char *)dev->reg_bar + + ENA_REGS_AENQ_HEAD_DB_OFF); +} + +/* Sets the function Idx and Queue Idx to be used for + * get full statistics feature + */ +int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev, + u32 func_queue) +{ + /* Function & Queue is acquired from user in the following format : + * Bottom Half word: funct + * Top Half Word: queue + */ + ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue); + ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue); + + return 0; +} + +int ena_com_dev_reset(struct ena_com_dev *ena_dev) +{ + u32 stat, timeout, cap, reset_val; + int rc; + + stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); + cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); + + if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || + (cap == ENA_MMIO_READ_TIMEOUT))) { + ena_trc_err("Reg read32 timeout occurred\n"); + return ENA_COM_TIMER_EXPIRED; + } + + if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { + ena_trc_err("Device isn't ready, can't reset device\n"); + return ENA_COM_INVAL; + } + + timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> + ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; + if (timeout == 0) { + ena_trc_err("Invalid timeout value\n"); + return ENA_COM_INVAL; + } + + /* start reset */ + reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; + ENA_REG_WRITE32(reset_val, (unsigned char *)ena_dev->reg_bar + + ENA_REGS_DEV_CTL_OFF); + + /* Write again the MMIO read request address */ + ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); + + rc = wait_for_reset_state(ena_dev, timeout, + ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); + if (rc != 0) { + ena_trc_err("Reset indication didn't turn on\n"); + return rc; + } + + /* reset done */ + ENA_REG_WRITE32(0, (unsigned char *)ena_dev->reg_bar + + ENA_REGS_DEV_CTL_OFF); + rc = wait_for_reset_state(ena_dev, timeout, 0); + if (rc != 0) { + ena_trc_err("Reset indication didn't turn off\n"); + return rc; + } + + return 0; +} + +static int ena_get_dev_stats(struct ena_com_dev *ena_dev, + struct ena_admin_aq_get_stats_cmd *get_cmd, + struct ena_admin_acq_get_stats_resp *get_resp, + enum ena_admin_get_stats_type type) +{ + struct ena_com_admin_queue *admin_queue; + int ret = 0; + + if (!ena_dev) { + ena_trc_err("%s : ena_dev is NULL\n", __func__); + return ENA_COM_NO_DEVICE; + } + + admin_queue = &ena_dev->admin_queue; + + get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; + get_cmd->aq_common_descriptor.flags = 0; + get_cmd->type = type; + + ret = ena_com_execute_admin_command( + admin_queue, + (struct ena_admin_aq_entry *)get_cmd, + sizeof(*get_cmd), + (struct ena_admin_acq_entry *)get_resp, + sizeof(*get_resp)); + + if (unlikely(ret)) + ena_trc_err("Failed to get stats. error: %d\n", ret); + + return ret; +} + +int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, + struct ena_admin_basic_stats *stats) +{ + int ret = 0; + struct ena_admin_aq_get_stats_cmd get_cmd; + struct ena_admin_acq_get_stats_resp get_resp; + + memset(&get_cmd, 0x0, sizeof(get_cmd)); + ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp, + ENA_ADMIN_GET_STATS_TYPE_BASIC); + if (likely(ret == 0)) + memcpy(stats, &get_resp.basic_stats, + sizeof(get_resp.basic_stats)); + + return ret; +} + +int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff, + u32 len) +{ + int ret = 0; + struct ena_admin_aq_get_stats_cmd get_cmd; + struct ena_admin_acq_get_stats_resp get_resp; + ena_mem_handle_t mem_handle = 0; + void *virt_addr; + dma_addr_t phys_addr; + + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len, + virt_addr, phys_addr, mem_handle); + if (!virt_addr) { + ret = ENA_COM_NO_MEM; + goto done; + } + memset(&get_cmd, 0x0, sizeof(get_cmd)); + ret = ena_com_mem_addr_set(ena_dev, + &get_cmd.u.control_buffer.address, + phys_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + get_cmd.u.control_buffer.length = len; + + get_cmd.device_id = ena_dev->stats_func; + get_cmd.queue_idx = ena_dev->stats_queue; + + ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp, + ENA_ADMIN_GET_STATS_TYPE_EXTENDED); + if (ret < 0) + goto free_ext_stats_mem; + + ret = snprintf(buff, len, "%s", (char *)virt_addr); + +free_ext_stats_mem: + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr, + mem_handle); +done: + return ret; +} + +int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + int ret = 0; + + if (unlikely(!ena_dev)) { + ena_trc_err("%s : ena_dev is NULL\n", __func__); + return ENA_COM_NO_DEVICE; + } + + if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { + ena_trc_info("Feature %d isn't supported\n", ENA_ADMIN_MTU); + return ENA_COM_PERMISSION; + } + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = 0; + cmd.feat_common.feature_id = ENA_ADMIN_MTU; + cmd.u.mtu.mtu = mtu; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) { + ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret); + return ENA_COM_INVAL; + } + return 0; +} + +int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, + struct ena_admin_feature_offload_desc *offload) +{ + int ret; + struct ena_admin_get_feat_resp resp; + + ret = ena_com_get_feature(ena_dev, &resp, + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); + if (unlikely(ret)) { + ena_trc_err("Failed to get offload capabilities %d\n", ret); + return ENA_COM_INVAL; + } + + memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); + + return 0; +} + +int ena_com_set_hash_function(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + struct ena_admin_get_feat_resp get_resp; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_RSS_HASH_FUNCTION)) { + ena_trc_info("Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_FUNCTION); + return ENA_COM_PERMISSION; + } + + /* Validate hash function is supported */ + ret = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION); + if (unlikely(ret)) + return ret; + + if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) { + ena_trc_err("Func hash %d isn't supported by device, abort\n", + rss->hash_func); + return ENA_COM_PERMISSION; + } + + memset(&cmd, 0x0, sizeof(cmd)); + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION; + cmd.u.flow_hash_func.init_val = rss->hash_init_val; + cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.control_buffer.address, + rss->hash_key_dma_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + + cmd.control_buffer.length = sizeof(*rss->hash_key); + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + if (unlikely(ret)) { + ena_trc_err("Failed to set hash function %d. error: %d\n", + rss->hash_func, ret); + return ENA_COM_INVAL; + } + + return 0; +} + +int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions func, + const u8 *key, u16 key_len, u32 init_val) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + struct ena_admin_feature_rss_flow_hash_control *hash_key = + rss->hash_key; + int rc; + + /* Make sure size is a mult of DWs */ + if (unlikely(key_len & 0x3)) + return ENA_COM_INVAL; + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, + rss->hash_key_dma_addr, + sizeof(*rss->hash_key)); + if (unlikely(rc)) + return rc; + + if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { + ena_trc_err("Flow hash function %d isn't supported\n", func); + return ENA_COM_PERMISSION; + } + + switch (func) { + case ENA_ADMIN_TOEPLITZ: + if (key_len > sizeof(hash_key->key)) { + ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n", + key_len, sizeof(hash_key->key)); + return ENA_COM_INVAL; + } + + memcpy(hash_key->key, key, key_len); + rss->hash_init_val = init_val; + hash_key->keys_num = key_len >> 2; + break; + case ENA_ADMIN_CRC32: + rss->hash_init_val = init_val; + break; + default: + ena_trc_err("Invalid hash function (%d)\n", func); + return ENA_COM_INVAL; + } + + rc = ena_com_set_hash_function(ena_dev); + + /* Restore the old function */ + if (unlikely(rc)) + ena_com_get_hash_function(ena_dev, NULL, NULL); + + return rc; +} + +int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions *func, + u8 *key) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + struct ena_admin_feature_rss_flow_hash_control *hash_key = + rss->hash_key; + int rc; + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, + rss->hash_key_dma_addr, + sizeof(*rss->hash_key)); + if (unlikely(rc)) + return rc; + + rss->hash_func = (enum ena_admin_hash_functions)get_resp.u.flow_hash_func.selected_func; + if (func) + *func = rss->hash_func; + + if (key) + memcpy(key, hash_key->key, hash_key->keys_num << 2); + + return 0; +} + +int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 *fields) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + int rc; + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_INPUT, + rss->hash_ctrl_dma_addr, + sizeof(*rss->hash_ctrl)); + if (unlikely(rc)) + return rc; + + if (fields) + *fields = rss->hash_ctrl->selected_fields[proto].fields; + + return 0; +} + +int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_RSS_HASH_INPUT)) { + ena_trc_info("Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_INPUT); + return ENA_COM_PERMISSION; + } + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT; + cmd.u.flow_hash_input.enabled_input_sort = + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK | + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.control_buffer.address, + rss->hash_ctrl_dma_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + cmd.control_buffer.length = sizeof(*hash_ctrl); + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + if (unlikely(ret)) { + ena_trc_err("Failed to set hash input. error: %d\n", ret); + ret = ENA_COM_INVAL; + } + + return 0; +} + +int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_hash_control *hash_ctrl = + rss->hash_ctrl; + u16 available_fields = 0; + int rc, i; + + /* Get the supported hash input */ + rc = ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL); + if (unlikely(rc)) + return rc; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = + ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; + + for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { + available_fields = hash_ctrl->selected_fields[i].fields & + hash_ctrl->supported_fields[i].fields; + if (available_fields != hash_ctrl->selected_fields[i].fields) { + ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", + i, hash_ctrl->supported_fields[i].fields, + hash_ctrl->selected_fields[i].fields); + return ENA_COM_PERMISSION; + } + } + + rc = ena_com_set_hash_ctrl(ena_dev); + + /* In case of failure, restore the old hash ctrl */ + if (unlikely(rc)) + ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL); + + return rc; +} + +int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 hash_fields) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; + u16 supported_fields; + int rc; + + if (proto > ENA_ADMIN_RSS_PROTO_NUM) { + ena_trc_err("Invalid proto num (%u)\n", proto); + return ENA_COM_INVAL; + } + + /* Get the ctrl table */ + rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL); + if (unlikely(rc)) + return rc; + + /* Make sure all the fields are supported */ + supported_fields = hash_ctrl->supported_fields[proto].fields; + if ((hash_fields & supported_fields) != hash_fields) { + ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n", + proto, hash_fields, supported_fields); + } + + hash_ctrl->selected_fields[proto].fields = hash_fields; + + rc = ena_com_set_hash_ctrl(ena_dev); + + /* In case of failure, restore the old hash ctrl */ + if (unlikely(rc)) + ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL); + + return 0; +} + +int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, + u16 entry_idx, u16 entry_value) +{ + struct ena_rss *rss = &ena_dev->rss; + + if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) + return ENA_COM_INVAL; + + if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES))) + return ENA_COM_INVAL; + + rss->host_rss_ind_tbl[entry_idx] = entry_value; + + return 0; +} + +int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + int ret = 0; + + if (!ena_com_check_supported_feature_id( + ena_dev, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { + ena_trc_info("Feature %d isn't supported\n", + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); + return ENA_COM_PERMISSION; + } + + ret = ena_com_ind_tbl_convert_to_device(ena_dev); + if (ret) { + ena_trc_err("Failed to convert host indirection table to device table\n"); + return ret; + } + + memset(&cmd, 0x0, sizeof(cmd)); + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG; + cmd.u.ind_table.size = rss->tbl_log_size; + cmd.u.ind_table.inline_index = 0xFFFFFFFF; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.control_buffer.address, + rss->rss_ind_tbl_dma_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + + cmd.control_buffer.length = (1 << rss->tbl_log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) { + ena_trc_err("Failed to set indirect table. error: %d\n", ret); + return ENA_COM_INVAL; + } + + return 0; +} + +int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + u32 tbl_size; + int i, rc; + + tbl_size = (1 << rss->tbl_log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, + rss->rss_ind_tbl_dma_addr, + tbl_size); + if (unlikely(rc)) + return rc; + + if (!ind_tbl) + return 0; + + rc = ena_com_ind_tbl_convert_from_device(ena_dev); + if (unlikely(rc)) + return rc; + + for (i = 0; i < (1 << rss->tbl_log_size); i++) + ind_tbl[i] = rss->host_rss_ind_tbl[i]; + + return 0; +} + +int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) +{ + int rc; + + memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); + + rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size); + if (unlikely(rc)) + goto err_indr_tbl; + + rc = ena_com_hash_key_allocate(ena_dev); + if (unlikely(rc)) + goto err_hash_key; + + rc = ena_com_hash_ctrl_init(ena_dev); + if (unlikely(rc)) + goto err_hash_ctrl; + + return 0; + +err_hash_ctrl: + ena_com_hash_key_destroy(ena_dev); +err_hash_key: + ena_com_indirect_table_destroy(ena_dev); +err_indr_tbl: + + return rc; +} + +int ena_com_rss_destroy(struct ena_com_dev *ena_dev) +{ + ena_com_indirect_table_destroy(ena_dev); + ena_com_hash_key_destroy(ena_dev); + ena_com_hash_ctrl_destroy(ena_dev); + + memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); + + return 0; +} + +int ena_com_allocate_host_attribute(struct ena_com_dev *ena_dev, + u32 debug_area_size) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + int rc; + + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + SZ_4K, + host_attr->host_info, + host_attr->host_info_dma_addr, + host_attr->host_info_dma_handle); + if (unlikely(!host_attr->host_info)) + return ENA_COM_NO_MEM; + + if (debug_area_size) { + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + debug_area_size, + host_attr->debug_area_virt_addr, + host_attr->debug_area_dma_addr, + host_attr->debug_area_dma_handle); + if (unlikely(!host_attr->debug_area_virt_addr)) { + rc = ENA_COM_NO_MEM; + goto err; + } + } + + host_attr->debug_area_size = debug_area_size; + + return 0; +err: + + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + SZ_4K, + host_attr->host_info, + host_attr->host_info_dma_addr, + host_attr->host_info_dma_handle); + host_attr->host_info = NULL; + return rc; +} + +void ena_com_delete_host_attribute(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + + if (host_attr->host_info) { + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + SZ_4K, + host_attr->host_info, + host_attr->host_info_dma_addr, + host_attr->host_info_dma_handle); + host_attr->host_info = NULL; + } + + if (host_attr->debug_area_virt_addr) { + ENA_MEM_FREE_COHERENT(ena_dev->dmadev, + host_attr->debug_area_size, + host_attr->debug_area_virt_addr, + host_attr->debug_area_dma_addr, + host_attr->debug_area_dma_handle); + host_attr->debug_area_virt_addr = NULL; + } +} + +int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + + int ret = 0; + + if (unlikely(!ena_dev)) { + ena_trc_err("%s : ena_dev is NULL\n", __func__); + return ENA_COM_NO_DEVICE; + } + + if (!ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_HOST_ATTR_CONFIG)) { + ena_trc_warn("Set host attribute isn't supported\n"); + return ENA_COM_PERMISSION; + } + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.u.host_attr.debug_ba, + host_attr->debug_area_dma_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.u.host_attr.os_info_ba, + host_attr->host_info_dma_addr); + if (unlikely(ret)) { + ena_trc_err("memory address set failed\n"); + return ret; + } + + cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + ena_trc_err("Failed to set host attributes: %d\n", ret); + + return ret; +} + +/* Interrupt moderation */ +bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) +{ + return ena_com_check_supported_feature_id( + ena_dev, + ENA_ADMIN_INTERRUPT_MODERATION); +} + +int +ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, + u32 tx_coalesce_usecs) +{ + if (!ena_dev->intr_delay_resolution) { + ena_trc_err("Illegal interrupt delay granularity value\n"); + return ENA_COM_FAULT; + } + + ena_dev->intr_moder_tx_interval = tx_coalesce_usecs / + ena_dev->intr_delay_resolution; + + return 0; +} + +int +ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, + u32 rx_coalesce_usecs) +{ + if (!ena_dev->intr_delay_resolution) { + ena_trc_err("Illegal interrupt delay granularity value\n"); + return ENA_COM_FAULT; + } + + /* We use LOWEST entry of moderation table for storing + * nonadaptive interrupt coalescing values + */ + ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = + rx_coalesce_usecs / ena_dev->intr_delay_resolution; + + return 0; +} + +void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev) +{ + if (ena_dev->intr_moder_tbl) + ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl); + ena_dev->intr_moder_tbl = NULL; +} + +int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) +{ + struct ena_admin_get_feat_resp get_resp; + u32 delay_resolution; + int rc; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_INTERRUPT_MODERATION); + + if (rc) { + if (rc == ENA_COM_PERMISSION) { + ena_trc_info("Feature %d isn't supported\n", + ENA_ADMIN_INTERRUPT_MODERATION); + rc = 0; + } else { + ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n", + rc); + } + + /* no moderation supported, disable adaptive support */ + ena_com_disable_adaptive_moderation(ena_dev); + return rc; + } + + rc = ena_com_init_interrupt_moderation_table(ena_dev); + if (rc) + goto err; + + /* if moderation is supported by device we set adaptive moderation */ + delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; + ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); + ena_com_enable_adaptive_moderation(ena_dev); + + return 0; +err: + ena_com_destroy_interrupt_moderation(ena_dev); + return rc; +} + +void +ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + + if (!intr_moder_tbl) + return; + + intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = + ENA_INTR_LOWEST_USECS; + intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval = + ENA_INTR_LOWEST_PKTS; + intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval = + ENA_INTR_LOWEST_BYTES; + + intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval = + ENA_INTR_LOW_USECS; + intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval = + ENA_INTR_LOW_PKTS; + intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval = + ENA_INTR_LOW_BYTES; + + intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval = + ENA_INTR_MID_USECS; + intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval = + ENA_INTR_MID_PKTS; + intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval = + ENA_INTR_MID_BYTES; + + intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval = + ENA_INTR_HIGH_USECS; + intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval = + ENA_INTR_HIGH_PKTS; + intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval = + ENA_INTR_HIGH_BYTES; + + intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval = + ENA_INTR_HIGHEST_USECS; + intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval = + ENA_INTR_HIGHEST_PKTS; + intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval = + ENA_INTR_HIGHEST_BYTES; +} + +unsigned int +ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) +{ + return ena_dev->intr_moder_tx_interval; +} + +unsigned int +ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + + if (intr_moder_tbl) + return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval; + + return 0; +} + +void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, + enum ena_intr_moder_level level, + struct ena_intr_moder_entry *entry) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + + if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) + return; + + intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval; + if (ena_dev->intr_delay_resolution) + intr_moder_tbl[level].intr_moder_interval /= + ena_dev->intr_delay_resolution; + intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval; + intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval; +} + +void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, + enum ena_intr_moder_level level, + struct ena_intr_moder_entry *entry) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + + if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) + return; + + entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval; + if (ena_dev->intr_delay_resolution) + entry->intr_moder_interval *= ena_dev->intr_delay_resolution; + entry->pkts_per_interval = + intr_moder_tbl[level].pkts_per_interval; + entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; +} diff --git a/drivers/net/ena/base/ena_com.h b/drivers/net/ena/base/ena_com.h new file mode 100644 index 00000000..19e53ffb --- /dev/null +++ b/drivers/net/ena/base/ena_com.h @@ -0,0 +1,1052 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef ENA_COM +#define ENA_COM + +#include "ena_plat.h" +#include "ena_common_defs.h" +#include "ena_admin_defs.h" +#include "ena_eth_io_defs.h" +#include "ena_regs_defs.h" +#if defined(__linux__) && !defined(__KERNEL__) +#include <rte_lcore.h> +#include <rte_spinlock.h> +#define __iomem +#endif + +#define ENA_MAX_NUM_IO_QUEUES 128U +/* We need to queues for each IO (on for Tx and one for Rx) */ +#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES)) + +#define ENA_MAX_HANDLERS 256 + +#define ENA_MAX_PHYS_ADDR_SIZE_BITS 48 + +/* Unit in usec */ +#define ENA_REG_READ_TIMEOUT 200000 + +#define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry)) +#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry)) +#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry)) + +/*****************************************************************************/ +/*****************************************************************************/ +/* ENA adaptive interrupt moderation settings */ + +#define ENA_INTR_LOWEST_USECS (0) +#define ENA_INTR_LOWEST_PKTS (3) +#define ENA_INTR_LOWEST_BYTES (2 * 1524) + +#define ENA_INTR_LOW_USECS (32) +#define ENA_INTR_LOW_PKTS (12) +#define ENA_INTR_LOW_BYTES (16 * 1024) + +#define ENA_INTR_MID_USECS (80) +#define ENA_INTR_MID_PKTS (48) +#define ENA_INTR_MID_BYTES (64 * 1024) + +#define ENA_INTR_HIGH_USECS (128) +#define ENA_INTR_HIGH_PKTS (96) +#define ENA_INTR_HIGH_BYTES (128 * 1024) + +#define ENA_INTR_HIGHEST_USECS (192) +#define ENA_INTR_HIGHEST_PKTS (128) +#define ENA_INTR_HIGHEST_BYTES (192 * 1024) + +#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196 +#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 4 +#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6 +#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4 + +enum ena_intr_moder_level { + ENA_INTR_MODER_LOWEST = 0, + ENA_INTR_MODER_LOW, + ENA_INTR_MODER_MID, + ENA_INTR_MODER_HIGH, + ENA_INTR_MODER_HIGHEST, + ENA_INTR_MAX_NUM_OF_LEVELS, +}; + +struct ena_intr_moder_entry { + unsigned int intr_moder_interval; + unsigned int pkts_per_interval; + unsigned int bytes_per_interval; +}; + +enum queue_direction { + ENA_COM_IO_QUEUE_DIRECTION_TX, + ENA_COM_IO_QUEUE_DIRECTION_RX +}; + +struct ena_com_buf { + dma_addr_t paddr; /**< Buffer physical address */ + u16 len; /**< Buffer length in bytes */ +}; + +struct ena_com_rx_buf_info { + u16 len; + u16 req_id; +}; + +struct ena_com_io_desc_addr { + void __iomem *pbuf_dev_addr; /* LLQ address */ + void *virt_addr; + dma_addr_t phys_addr; + ena_mem_handle_t mem_handle; +}; + +struct ena_com_tx_meta { + u16 mss; + u16 l3_hdr_len; + u16 l3_hdr_offset; + u16 l3_outer_hdr_len; /* In words */ + u16 l3_outer_hdr_offset; + u16 l4_hdr_len; /* In words */ +}; + +struct ena_com_io_cq { + struct ena_com_io_desc_addr cdesc_addr; + + u32 __iomem *db_addr; + + /* Interrupt unmask register */ + u32 __iomem *unmask_reg; + + /* The completion queue head doorbell register */ + uint32_t __iomem *cq_head_db_reg; + + /* The value to write to the above register to unmask + * the interrupt of this queue + */ + u32 msix_vector; + + enum queue_direction direction; + + /* holds the number of cdesc of the current packet */ + u16 cur_rx_pkt_cdesc_count; + /* save the firt cdesc idx of the current packet */ + u16 cur_rx_pkt_cdesc_start_idx; + + u16 q_depth; + /* Caller qid */ + u16 qid; + + /* Device queue index */ + u16 idx; + u16 head; + u16 last_head_update; + u8 phase; + u8 cdesc_entry_size_in_bytes; + +} ____cacheline_aligned; + +struct ena_com_io_sq { + struct ena_com_io_desc_addr desc_addr; + + u32 __iomem *db_addr; + u8 __iomem *header_addr; + + enum queue_direction direction; + enum ena_admin_placement_policy_type mem_queue_type; + + u32 msix_vector; + struct ena_com_tx_meta cached_tx_meta; + + u16 q_depth; + u16 qid; + + u16 idx; + u16 tail; + u16 next_to_comp; + u16 tx_max_header_size; + u8 phase; + u8 desc_entry_size; + u8 dma_addr_bits; +} ____cacheline_aligned; + +struct ena_com_admin_cq { + struct ena_admin_acq_entry *entries; + ena_mem_handle_t mem_handle; + dma_addr_t dma_addr; + + u16 head; + u8 phase; +}; + +struct ena_com_admin_sq { + struct ena_admin_aq_entry *entries; + ena_mem_handle_t mem_handle; + dma_addr_t dma_addr; + + u32 __iomem *db_addr; + + u16 head; + u16 tail; + u8 phase; + +}; + +struct ena_com_stats_admin { + u32 aborted_cmd; + u32 submitted_cmd; + u32 completed_cmd; + u32 out_of_space; + u32 no_completion; +}; + +struct ena_com_admin_queue { + void *q_dmadev; + ena_spinlock_t q_lock; /* spinlock for the admin queue */ + struct ena_comp_ctx *comp_ctx; + u16 q_depth; + struct ena_com_admin_cq cq; + struct ena_com_admin_sq sq; + + /* Indicate if the admin queue should poll for completion */ + bool polling; + + u16 curr_cmd_id; + + /* Indicate that the ena was initialized and can + * process new admin commands + */ + bool running_state; + + /* Count the number of outstanding admin commands */ + ena_atomic32_t outstanding_cmds; + + struct ena_com_stats_admin stats; +}; + +struct ena_aenq_handlers; + +struct ena_com_aenq { + u16 head; + u8 phase; + struct ena_admin_aenq_entry *entries; + dma_addr_t dma_addr; + ena_mem_handle_t mem_handle; + u16 q_depth; + struct ena_aenq_handlers *aenq_handlers; +}; + +struct ena_com_mmio_read { + struct ena_admin_ena_mmio_req_read_less_resp *read_resp; + dma_addr_t read_resp_dma_addr; + ena_mem_handle_t read_resp_mem_handle; + u16 seq_num; + bool readless_supported; + /* spin lock to ensure a single outstanding read */ + ena_spinlock_t lock; +}; + +struct ena_rss { + /* Indirect table */ + u16 *host_rss_ind_tbl; + struct ena_admin_rss_ind_table_entry *rss_ind_tbl; + dma_addr_t rss_ind_tbl_dma_addr; + ena_mem_handle_t rss_ind_tbl_mem_handle; + u16 tbl_log_size; + + /* Hash key */ + enum ena_admin_hash_functions hash_func; + struct ena_admin_feature_rss_flow_hash_control *hash_key; + dma_addr_t hash_key_dma_addr; + ena_mem_handle_t hash_key_mem_handle; + u32 hash_init_val; + + /* Flow Control */ + struct ena_admin_feature_rss_hash_control *hash_ctrl; + dma_addr_t hash_ctrl_dma_addr; + ena_mem_handle_t hash_ctrl_mem_handle; + +}; + +struct ena_host_attribute { + /* Debug area */ + u8 *debug_area_virt_addr; + dma_addr_t debug_area_dma_addr; + ena_mem_handle_t debug_area_dma_handle; + u32 debug_area_size; + + /* Host information */ + struct ena_admin_host_info *host_info; + dma_addr_t host_info_dma_addr; + ena_mem_handle_t host_info_dma_handle; +}; + +/* Each ena_dev is a PCI function. */ +struct ena_com_dev { + struct ena_com_admin_queue admin_queue; + struct ena_com_aenq aenq; + struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES]; + struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES]; + void __iomem *reg_bar; + void __iomem *mem_bar; + void *dmadev; + + enum ena_admin_placement_policy_type tx_mem_queue_type; + + u16 stats_func; /* Selected function for extended statistic dump */ + u16 stats_queue; /* Selected queue for extended statistic dump */ + + u16 tx_max_header_size; + + struct ena_com_mmio_read mmio_read; + + struct ena_rss rss; + u32 supported_features; + u32 dma_addr_bits; + + struct ena_host_attribute host_attr; + bool adaptive_coalescing; + u16 intr_delay_resolution; + u32 intr_moder_tx_interval; + struct ena_intr_moder_entry *intr_moder_tbl; +}; + +struct ena_com_dev_get_features_ctx { + struct ena_admin_queue_feature_desc max_queues; + struct ena_admin_device_attr_feature_desc dev_attr; + struct ena_admin_feature_aenq_desc aenq; + struct ena_admin_feature_offload_desc offload; +}; + +typedef void (*ena_aenq_handler)(void *data, + struct ena_admin_aenq_entry *aenq_e); + +/* Holds aenq handlers. Indexed by AENQ event group */ +struct ena_aenq_handlers { + ena_aenq_handler handlers[ENA_MAX_HANDLERS]; + ena_aenq_handler unimplemented_handler; +}; + +/*****************************************************************************/ +/*****************************************************************************/ +#if defined(__cplusplus) +extern "C" { +#endif + +/* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism + * @ena_dev: ENA communication layer struct + * + * Initialize the register read mechanism. + * + * @note: This method must be the first stage in the initialization sequence. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev); + +/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism + * @ena_dev: ENA communication layer struct + * @realess_supported: readless mode (enable/disable) + */ +void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, + bool readless_supported); + +/* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return + * value physical address. + * @ena_dev: ENA communication layer struct + */ +void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev); + +/* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism + * @ena_dev: ENA communication layer struct + */ +void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); + +/* ena_com_admin_init - Init the admin and the async queues + * @ena_dev: ENA communication layer struct + * @aenq_handlers: Those handlers to be called upon event. + * @init_spinlock: Indicate if this method should init the admin spinlock or + * the spinlock was init before (for example, in a case of FLR). + * + * Initialize the admin submission and completion queues. + * Initialize the asynchronous events notification queues. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_admin_init(struct ena_com_dev *ena_dev, + struct ena_aenq_handlers *aenq_handlers, + bool init_spinlock); + +/* ena_com_admin_destroy - Destroy the admin and the async events queues. + * @ena_dev: ENA communication layer struct + * + * @note: Before calling this method, the caller must validate that the device + * won't send any additional admin completions/aenq. + * To achieve that, a FLR is recommended. + */ +void ena_com_admin_destroy(struct ena_com_dev *ena_dev); + +/* ena_com_dev_reset - Perform device FLR to the device. + * @ena_dev: ENA communication layer struct + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_dev_reset(struct ena_com_dev *ena_dev); + +/* ena_com_create_io_queue - Create io queue. + * @ena_dev: ENA communication layer struct + * @qid - the caller virtual queue id. + * @direction - the queue direction (Rx/Tx) + * @mem_queue_type - Indicate if this queue is LLQ or regular queue + * (relevant only for Tx queue) + * @msix_vector - MSI-X vector + * @queue_size - queue size + * + * Create the submission and the completion queues for queue id - qid. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_create_io_queue(struct ena_com_dev *ena_dev, u16 qid, + enum queue_direction direction, + enum ena_admin_placement_policy_type mem_queue_type, + u32 msix_vector, + u16 queue_size); + +/* ena_com_admin_destroy - Destroy IO queue with the queue id - qid. + * @ena_dev: ENA communication layer struct + */ +void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid); + +/* ena_com_get_io_handlers - Return the io queue handlers + * @ena_dev: ENA communication layer struct + * @qid - the caller virtual queue id. + * @io_sq - IO submission queue handler + * @io_cq - IO completion queue handler. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, + struct ena_com_io_sq **io_sq, + struct ena_com_io_cq **io_cq); + +/* ena_com_admin_aenq_enable - ENAble asynchronous event notifications + * @ena_dev: ENA communication layer struct + * + * After this method, aenq event can be received via AENQ. + */ +void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev); + +/* ena_com_set_admin_running_state - Set the state of the admin queue + * @ena_dev: ENA communication layer struct + * + * Change the state of the admin queue (enable/disable) + */ +void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state); + +/* ena_com_get_admin_running_state - Get the admin queue state + * @ena_dev: ENA communication layer struct + * + * Retrieve the state of the admin queue (enable/disable) + * + * @return - current polling mode (enable/disable) + */ +bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev); + +/* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode + * @ena_dev: ENA communication layer struct + * @polling: ENAble/Disable polling mode + * + * Set the admin completion mode. + */ +void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling); + +/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode + * @ena_dev: ENA communication layer struct + * + * Get the admin completion mode. + * If polling mode is on, ena_com_execute_admin_command will perform a + * polling on the admin completion queue for the commands completion, + * otherwise it will wait on wait event. + * + * @return state + */ +bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev); + +/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler + * @ena_dev: ENA communication layer struct + * + * This method go over the admin completion queue and wake up all the pending + * threads that wait on the commands wait event. + * + * @note: Should be called after MSI-X interrupt. + */ +void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev); + +/* ena_com_aenq_intr_handler - AENQ interrupt handler + * @ena_dev: ENA communication layer struct + * + * This method go over the async event notification queue and call the proper + * aenq handler. + */ +void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data); + +/* ena_com_abort_admin_commands - Abort all the outstanding admin commands. + * @ena_dev: ENA communication layer struct + * + * This method aborts all the outstanding admin commands. + * The called should then call ena_com_wait_for_abort_completion to make sure + * all the commands were completed. + */ +void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev); + +/* ena_com_wait_for_abort_completion - Wait for admin commands abort. + * @ena_dev: ENA communication layer struct + * + * This method wait until all the outstanding admin commands will be completed. + */ +void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev); + +/* ena_com_validate_version - Validate the device parameters + * @ena_dev: ENA communication layer struct + * + * This method validate the device parameters are the same as the saved + * parameters in ena_dev. + * This method is useful after device reset, to validate the device mac address + * and the device offloads are the same as before the reset. + * + * @return - 0 on success negative value otherwise. + */ +int ena_com_validate_version(struct ena_com_dev *ena_dev); + +/* ena_com_get_link_params - Retrieve physical link parameters. + * @ena_dev: ENA communication layer struct + * @resp: Link parameters + * + * Retrieve the physical link parameters, + * like speed, auto-negotiation and full duplex support. + * + * @return - 0 on Success negative value otherwise. + */ +int ena_com_get_link_params(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *resp); + +/* ena_com_get_dma_width - Retrieve physical dma address width the device + * supports. + * @ena_dev: ENA communication layer struct + * + * Retrieve the maximum physical address bits the device can handle. + * + * @return: > 0 on Success and negative value otherwise. + */ +int ena_com_get_dma_width(struct ena_com_dev *ena_dev); + +/* ena_com_set_aenq_config - Set aenq groups configurations + * @ena_dev: ENA communication layer struct + * @groups flag: bit fields flags of enum ena_admin_aenq_group. + * + * Configure which aenq event group the driver would like to receive. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag); + +/* ena_com_get_dev_attr_feat - Get device features + * @ena_dev: ENA communication layer struct + * @get_feat_ctx: returned context that contain the get features. + * + * @return: 0 on Success and negative value otherwise. + */ +int +ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx); + +/* ena_com_get_dev_basic_stats - Get device basic statistics + * @ena_dev: ENA communication layer struct + * @stats: stats return value + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, + struct ena_admin_basic_stats *stats); + +/* ena_com_set_dev_mtu - Configure the device mtu. + * @ena_dev: ENA communication layer struct + * @mtu: mtu value + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu); + +/* ena_com_get_offload_settings - Retrieve the device offloads capabilities + * @ena_dev: ENA communication layer struct + * @offlad: offload return value + * + * @return: 0 on Success and negative value otherwise. + */ +int +ena_com_get_offload_settings(struct ena_com_dev *ena_dev, + struct ena_admin_feature_offload_desc *offload); + +/* ena_com_rss_init - Init RSS + * @ena_dev: ENA communication layer struct + * @log_size: indirection log size + * + * Allocate RSS/RFS resources. + * The caller then can configure rss using ena_com_set_hash_function, + * ena_com_set_hash_ctrl and ena_com_indirect_table_set. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size); + +/* ena_com_rss_destroy - Destroy rss + * @ena_dev: ENA communication layer struct + * + * Free all the RSS/RFS resources. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_rss_destroy(struct ena_com_dev *ena_dev); + +/* ena_com_fill_hash_function - Fill RSS hash function + * @ena_dev: ENA communication layer struct + * @func: The hash function (Toeplitz or crc) + * @key: Hash key (for toeplitz hash) + * @key_len: key length (max length 10 DW) + * @init_val: initial value for the hash function + * + * Fill the ena_dev resources with the desire hash function, hash key, key_len + * and key initial value (if needed by the hash function). + * To flush the key into the device the caller should call + * ena_com_set_hash_function. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions func, + const u8 *key, u16 key_len, u32 init_val); + +/* ena_com_set_hash_function - Flush the hash function and it dependencies to + * the device. + * @ena_dev: ENA communication layer struct + * + * Flush the hash function and it dependencies (key, key length and + * initial value) if needed. + * + * @note: Prior to this method the caller should call ena_com_fill_hash_function + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_hash_function(struct ena_com_dev *ena_dev); + +/* ena_com_get_hash_function - Retrieve the hash function and the hash key + * from the device. + * @ena_dev: ENA communication layer struct + * @func: hash function + * @key: hash key + * + * Retrieve the hash function and the hash key from the device. + * + * @note: If the caller called ena_com_fill_hash_function but didn't flash + * it to the device, the new configuration will be lost. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions *func, + u8 *key); + +/* ena_com_fill_hash_ctrl - Fill RSS hash control + * @ena_dev: ENA communication layer struct. + * @proto: The protocol to configure. + * @hash_fields: bit mask of ena_admin_flow_hash_fields + * + * Fill the ena_dev resources with the desire hash control (the ethernet + * fields that take part of the hash) for a specific protocol. + * To flush the hash control to the device, the caller should call + * ena_com_set_hash_ctrl. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 hash_fields); + +/* ena_com_set_hash_ctrl - Flush the hash control resources to the device. + * @ena_dev: ENA communication layer struct + * + * Flush the hash control (the ethernet fields that take part of the hash) + * + * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev); + +/* ena_com_get_hash_ctrl - Retrieve the hash control from the device. + * @ena_dev: ENA communication layer struct + * @proto: The protocol to retrieve. + * @fields: bit mask of ena_admin_flow_hash_fields. + * + * Retrieve the hash control from the device. + * + * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash + * it to the device, the new configuration will be lost. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 *fields); + +/* ena_com_set_default_hash_ctrl - Set the hash control to a default + * configuration. + * @ena_dev: ENA communication layer struct + * + * Fill the ena_dev resources with the default hash control configuration. + * To flush the hash control to the device, the caller should call + * ena_com_set_hash_ctrl. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev); + +/* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS + * indirection table + * @ena_dev: ENA communication layer struct. + * @entry_idx - indirection table entry. + * @entry_value - redirection value + * + * Fill a single entry of the RSS indirection table in the ena_dev resources. + * To flush the indirection table to the device, the called should call + * ena_com_indirect_table_set. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, + u16 entry_idx, u16 entry_value); + +/* ena_com_indirect_table_set - Flush the indirection table to the device. + * @ena_dev: ENA communication layer struct + * + * Flush the indirection hash control to the device. + * Prior to this method the caller should call ena_com_indirect_table_fill_entry + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_indirect_table_set(struct ena_com_dev *ena_dev); + +/* ena_com_indirect_table_get - Retrieve the indirection table from the device. + * @ena_dev: ENA communication layer struct + * @ind_tbl: indirection table + * + * Retrieve the RSS indirection table from the device. + * + * @note: If the caller called ena_com_indirect_table_fill_entry but didn't + * flash it to the device, the new configuration will be lost. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl); + +/* ena_com_allocate_host_attribute - Allocate host attributes resources. + * @ena_dev: ENA communication layer struct + * @debug_area_size: Debug aread size + * + * Allocate host info and debug area. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_allocate_host_attribute(struct ena_com_dev *ena_dev, + u32 debug_area_size); + +/* ena_com_allocate_host_attribute - Free the host attributes resources. + * @ena_dev: ENA communication layer struct + * + * Free the allocate host info and debug area. + */ +void ena_com_delete_host_attribute(struct ena_com_dev *ena_dev); + +/* ena_com_set_host_attributes - Update the device with the host + * attributes base address. + * @ena_dev: ENA communication layer struct + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_host_attributes(struct ena_com_dev *ena_dev); + +/* ena_com_create_io_cq - Create io completion queue. + * @ena_dev: ENA communication layer struct + * @io_cq - io completion queue handler + + * Create IO completion queue. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_create_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq); + +/* ena_com_destroy_io_cq - Destroy io completion queue. + * @ena_dev: ENA communication layer struct + * @io_cq - io completion queue handler + + * Destroy IO completion queue. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq); + +/* ena_com_execute_admin_command - Execute admin command + * @admin_queue: admin queue. + * @cmd: the admin command to execute. + * @cmd_size: the command size. + * @cmd_completion: command completion return value. + * @cmd_comp_size: command completion size. + + * Submit an admin command and then wait until the device will return a + * completion. + * The completion will be copyed into cmd_comp. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size, + struct ena_admin_acq_entry *cmd_comp, + size_t cmd_comp_size); + +/* ena_com_init_interrupt_moderation - Init interrupt moderation + * @ena_dev: ENA communication layer struct + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev); + +/* ena_com_destroy_interrupt_moderation - Destroy interrupt moderation resources + * @ena_dev: ENA communication layer struct + */ +void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev); + +/* ena_com_interrupt_moderation_supported - Return if interrupt moderation + * capability is supported by the device. + * + * @return - supported or not. + */ +bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev); + +/* ena_com_config_default_interrupt_moderation_table - Restore the interrupt + * moderation table back to the default parameters. + * @ena_dev: ENA communication layer struct + */ +void +ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev); + +/* ena_com_update_nonadaptive_moderation_interval_tx - Update the + * non-adaptive interval in Tx direction. + * @ena_dev: ENA communication layer struct + * @tx_coalesce_usecs: Interval in usec. + * + * @return - 0 on success, negative value on failure. + */ +int +ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, + u32 tx_coalesce_usecs); + +/* ena_com_update_nonadaptive_moderation_interval_rx - Update the + * non-adaptive interval in Rx direction. + * @ena_dev: ENA communication layer struct + * @rx_coalesce_usecs: Interval in usec. + * + * @return - 0 on success, negative value on failure. + */ +int +ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, + u32 rx_coalesce_usecs); + +/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the + * non-adaptive interval in Tx direction. + * @ena_dev: ENA communication layer struct + * + * @return - interval in usec + */ +unsigned int +ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev); + +/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the + * non-adaptive interval in Rx direction. + * @ena_dev: ENA communication layer struct + * + * @return - interval in usec + */ +unsigned int +ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev); + +/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt + * moderation table. + * @ena_dev: ENA communication layer struct + * @level: Interrupt moderation table level + * @entry: Entry value + * + * Update a single entry in the interrupt moderation table. + */ +void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, + enum ena_intr_moder_level level, + struct ena_intr_moder_entry *entry); + +/* ena_com_get_intr_moderation_entry - Init ena_intr_moder_entry. + * @ena_dev: ENA communication layer struct + * @level: Interrupt moderation table level + * @entry: Entry to fill. + * + * Initialize the entry according to the adaptive interrupt moderation table. + */ +void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, + enum ena_intr_moder_level level, + struct ena_intr_moder_entry *entry); + +static inline bool +ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) +{ + return ena_dev->adaptive_coalescing; +} + +static inline void +ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev) +{ + ena_dev->adaptive_coalescing = true; +} + +static inline void +ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev) +{ + ena_dev->adaptive_coalescing = false; +} + +/* ena_com_calculate_interrupt_delay - Calculate new interrupt delay + * @ena_dev: ENA communication layer struct + * @pkts: Number of packets since the last update + * @bytes: Number of bytes received since the last update. + * @smoothed_interval: Returned interval + * @moder_tbl_idx: Current table level as input update new level as return + * value. + */ +static inline void +ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev, + unsigned int pkts, + unsigned int bytes, + unsigned int *smoothed_interval, + unsigned int *moder_tbl_idx) +{ + enum ena_intr_moder_level curr_moder_idx, new_moder_idx; + struct ena_intr_moder_entry *curr_moder_entry; + struct ena_intr_moder_entry *pred_moder_entry; + struct ena_intr_moder_entry *new_moder_entry; + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + unsigned int interval; + + /* We apply adaptive moderation on Rx path only. + * Tx uses static interrupt moderation. + */ + if (!pkts || !bytes) + /* Tx interrupt, or spurious interrupt, + * in both cases we just use same delay values + */ + return; + + curr_moder_idx = (enum ena_intr_moder_level)*moder_tbl_idx; + if (unlikely(curr_moder_idx >= ENA_INTR_MAX_NUM_OF_LEVELS)) { + ena_trc_err("Wrong moderation index %u\n", curr_moder_idx); + return; + } + + curr_moder_entry = &intr_moder_tbl[curr_moder_idx]; + new_moder_idx = curr_moder_idx; + + if (curr_moder_idx == ENA_INTR_MODER_LOWEST) { + if ((pkts > curr_moder_entry->pkts_per_interval) || + (bytes > curr_moder_entry->bytes_per_interval)) + new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx + 1); + } else { + pred_moder_entry = &intr_moder_tbl[curr_moder_idx - 1]; + + if ((pkts <= pred_moder_entry->pkts_per_interval) || + (bytes <= pred_moder_entry->bytes_per_interval)) + new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx - 1); + else if ((pkts > curr_moder_entry->pkts_per_interval) || + (bytes > curr_moder_entry->bytes_per_interval)) { + if (curr_moder_idx != ENA_INTR_MODER_HIGHEST) + new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx + 1); + } + } + new_moder_entry = &intr_moder_tbl[new_moder_idx]; + + interval = new_moder_entry->intr_moder_interval; + *smoothed_interval = ( + (interval * ENA_INTR_DELAY_NEW_VALUE_WEIGHT + + ENA_INTR_DELAY_OLD_VALUE_WEIGHT * (*smoothed_interval)) + 5) / + 10; + + *moder_tbl_idx = new_moder_idx; +} + +/* ena_com_update_intr_reg - Prepare interrupt register + * @intr_reg: interrupt register to update. + * @rx_delay_interval: Rx interval in usecs + * @tx_delay_interval: Tx interval in usecs + * @unmask: unask enable/disable + * + * Prepare interrupt update register with the supplied parameters. + */ +static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg, + u32 rx_delay_interval, + u32 tx_delay_interval, + bool unmask) +{ + intr_reg->intr_control = 0; + intr_reg->intr_control |= rx_delay_interval & + ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; + + intr_reg->intr_control |= + (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) + & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; + + if (unmask) + intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; +} + +int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff, + u32 len); + +int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev, + u32 funct_queue); + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ +#endif /* !(ENA_COM) */ diff --git a/drivers/net/ena/base/ena_defs/ena_admin_defs.h b/drivers/net/ena/base/ena_defs/ena_admin_defs.h new file mode 100644 index 00000000..fe412469 --- /dev/null +++ b/drivers/net/ena/base/ena_defs/ena_admin_defs.h @@ -0,0 +1,1979 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef _ENA_ADMIN_H_ +#define _ENA_ADMIN_H_ + +/* admin commands opcodes */ +enum ena_admin_aq_opcode { + /* create submission queue */ + ENA_ADMIN_CREATE_SQ = 1, + + /* destroy submission queue */ + ENA_ADMIN_DESTROY_SQ = 2, + + /* create completion queue */ + ENA_ADMIN_CREATE_CQ = 3, + + /* destroy completion queue */ + ENA_ADMIN_DESTROY_CQ = 4, + + /* get capabilities of particular feature */ + ENA_ADMIN_GET_FEATURE = 8, + + /* get capabilities of particular feature */ + ENA_ADMIN_SET_FEATURE = 9, + + /* get statistics */ + ENA_ADMIN_GET_STATS = 11, +}; + +/* privileged amdin commands opcodes */ +enum ena_admin_aq_opcode_privileged { + /* get device capabilities */ + ENA_ADMIN_IDENTIFY = 48, + + /* configure device */ + ENA_ADMIN_CONFIGURE_PF_DEVICE = 49, + + /* setup SRIOV PCIe Virtual Function capabilities */ + ENA_ADMIN_SETUP_VF = 50, + + /* load firmware to the controller */ + ENA_ADMIN_LOAD_FIRMWARE = 52, + + /* commit previously loaded firmare */ + ENA_ADMIN_COMMIT_FIRMWARE = 53, + + /* quiesce virtual function */ + ENA_ADMIN_QUIESCE_VF = 54, + + /* load virtual function from migrates context */ + ENA_ADMIN_MIGRATE_VF = 55, +}; + +/* admin command completion status codes */ +enum ena_admin_aq_completion_status { + /* Request completed successfully */ + ENA_ADMIN_SUCCESS = 0, + + /* no resources to satisfy request */ + ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, + + /* Bad opcode in request descriptor */ + ENA_ADMIN_BAD_OPCODE = 2, + + /* Unsupported opcode in request descriptor */ + ENA_ADMIN_UNSUPPORTED_OPCODE = 3, + + /* Wrong request format */ + ENA_ADMIN_MALFORMED_REQUEST = 4, + + /* One of parameters is not valid. Provided in ACQ entry + * extended_status + */ + ENA_ADMIN_ILLEGAL_PARAMETER = 5, + + /* unexpected error */ + ENA_ADMIN_UNKNOWN_ERROR = 6, +}; + +/* get/set feature subcommands opcodes */ +enum ena_admin_aq_feature_id { + /* list of all supported attributes/capabilities in the ENA */ + ENA_ADMIN_DEVICE_ATTRIBUTES = 1, + + /* max number of supported queues per for every queues type */ + ENA_ADMIN_MAX_QUEUES_NUM = 2, + + /* low latency queues capabilities (max entry size, depth) */ + ENA_ADMIN_LLQ_CONFIG = 3, + + /* power management capabilities */ + ENA_ADMIN_POWER_MANAGEMENT_CONFIG = 4, + + /* MAC address filters support, multicast, broadcast, and + * promiscuous + */ + ENA_ADMIN_MAC_FILTERS_CONFIG = 5, + + /* VLAN membership, frame format, etc. */ + ENA_ADMIN_VLAN_CONFIG = 6, + + /* Available size for various on-chip memory resources, accessible + * by the driver + */ + ENA_ADMIN_ON_DEVICE_MEMORY_CONFIG = 7, + + /* Receive Side Scaling (RSS) function */ + ENA_ADMIN_RSS_HASH_FUNCTION = 10, + + /* stateless TCP/UDP/IP offload capabilities. */ + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, + + /* Multiple tuples flow table configuration */ + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, + + /* max MTU, current MTU */ + ENA_ADMIN_MTU = 14, + + /* Receive Side Scaling (RSS) hash input */ + ENA_ADMIN_RSS_HASH_INPUT = 18, + + /* overlay tunnels configuration */ + ENA_ADMIN_TUNNEL_CONFIG = 19, + + /* interrupt moderation parameters */ + ENA_ADMIN_INTERRUPT_MODERATION = 20, + + /* 1588v2 and Timing configuration */ + ENA_ADMIN_1588_CONFIG = 21, + + /* Packet Header format templates configuration for input and + * output parsers + */ + ENA_ADMIN_PKT_HEADER_TEMPLATES_CONFIG = 23, + + /* AENQ configuration */ + ENA_ADMIN_AENQ_CONFIG = 26, + + /* Link configuration */ + ENA_ADMIN_LINK_CONFIG = 27, + + /* Host attributes configuration */ + ENA_ADMIN_HOST_ATTR_CONFIG = 28, + + /* Number of valid opcodes */ + ENA_ADMIN_FEATURES_OPCODE_NUM = 32, +}; + +/* descriptors and headers placement */ +enum ena_admin_placement_policy_type { + /* descriptors and headers are in OS memory */ + ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, + + /* descriptors and headers in device memory (a.k.a Low Latency + * Queue) + */ + ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, +}; + +/* link speeds */ +enum ena_admin_link_types { + ENA_ADMIN_LINK_SPEED_1G = 0x1, + + ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, + + ENA_ADMIN_LINK_SPEED_5G = 0x4, + + ENA_ADMIN_LINK_SPEED_10G = 0x8, + + ENA_ADMIN_LINK_SPEED_25G = 0x10, + + ENA_ADMIN_LINK_SPEED_40G = 0x20, + + ENA_ADMIN_LINK_SPEED_50G = 0x40, + + ENA_ADMIN_LINK_SPEED_100G = 0x80, + + ENA_ADMIN_LINK_SPEED_200G = 0x100, + + ENA_ADMIN_LINK_SPEED_400G = 0x200, +}; + +/* completion queue update policy */ +enum ena_admin_completion_policy_type { + /* cqe for each sq descriptor */ + ENA_ADMIN_COMPLETION_POLICY_DESC = 0, + + /* cqe upon request in sq descriptor */ + ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, + + /* current queue head pointer is updated in OS memory upon sq + * descriptor request + */ + ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, + + /* current queue head pointer is updated in OS memory for each sq + * descriptor + */ + ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, +}; + +/* type of get statistics command */ +enum ena_admin_get_stats_type { + /* Basic statistics */ + ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, + + /* Extended statistics */ + ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, +}; + +/* scope of get statistics command */ +enum ena_admin_get_stats_scope { + ENA_ADMIN_SPECIFIC_QUEUE = 0, + + ENA_ADMIN_ETH_TRAFFIC = 1, +}; + +/* ENA Admin Queue (AQ) common descriptor */ +struct ena_admin_aq_common_desc { + /* word 0 : */ + /* command identificator to associate it with the completion + * 11:0 : command_id + * 15:12 : reserved12 + */ + uint16_t command_id; + + /* as appears in ena_aq_opcode */ + uint8_t opcode; + + /* 0 : phase + * 1 : ctrl_data - control buffer address valid + * 2 : ctrl_data_indirect - control buffer address + * points to list of pages with addresses of control + * buffers + * 7:3 : reserved3 + */ + uint8_t flags; +}; + +/* used in ena_aq_entry. Can point directly to control data, or to a page + * list chunk. Used also at the end of indirect mode page list chunks, for + * chaining. + */ +struct ena_admin_ctrl_buff_info { + /* word 0 : indicates length of the buffer pointed by + * control_buffer_address. + */ + uint32_t length; + + /* words 1:2 : points to control buffer (direct or indirect) */ + struct ena_common_mem_addr address; +}; + +/* submission queue full identification */ +struct ena_admin_sq { + /* word 0 : */ + /* queue id */ + uint16_t sq_idx; + + /* 4:0 : reserved + * 7:5 : sq_direction - 0x1 - Tx; 0x2 - Rx + */ + uint8_t sq_identity; + + uint8_t reserved1; +}; + +/* AQ entry format */ +struct ena_admin_aq_entry { + /* words 0 : */ + struct ena_admin_aq_common_desc aq_common_descriptor; + + /* words 1:3 : */ + union { + /* command specific inline data */ + uint32_t inline_data_w1[3]; + + /* words 1:3 : points to control buffer (direct or + * indirect, chained if needed) + */ + struct ena_admin_ctrl_buff_info control_buffer; + } u; + + /* command specific inline data */ + uint32_t inline_data_w4[12]; +}; + +/* ENA Admin Completion Queue (ACQ) common descriptor */ +struct ena_admin_acq_common_desc { + /* word 0 : */ + /* command identifier to associate it with the aq descriptor + * 11:0 : command_id + * 15:12 : reserved12 + */ + uint16_t command; + + /* status of request execution */ + uint8_t status; + + /* 0 : phase + * 7:1 : reserved1 + */ + uint8_t flags; + + /* word 1 : */ + /* provides additional info */ + uint16_t extended_status; + + /* submission queue head index, serves as a hint what AQ entries can + * be revoked + */ + uint16_t sq_head_indx; +}; + +/* ACQ entry format */ +struct ena_admin_acq_entry { + /* words 0:1 : */ + struct ena_admin_acq_common_desc acq_common_descriptor; + + /* response type specific data */ + uint32_t response_specific_data[14]; +}; + +/* ENA AQ Create Submission Queue command. Placed in control buffer pointed + * by AQ entry + */ +struct ena_admin_aq_create_sq_cmd { + /* words 0 : */ + struct ena_admin_aq_common_desc aq_common_descriptor; + + /* word 1 : */ + /* 4:0 : reserved0_w1 + * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx + */ + uint8_t sq_identity; + + uint8_t reserved8_w1; + + /* 3:0 : placement_policy - Describing where the SQ + * descriptor ring and the SQ packet headers reside: + * 0x1 - descriptors and headers are in OS memory, + * 0x3 - descriptors and headers in device memory + * (a.k.a Low Latency Queue) + * 6:4 : completion_policy - Describing what policy + * to use for generation completion entry (cqe) in + * the CQ associated with this SQ: 0x0 - cqe for each + * sq descriptor, 0x1 - cqe upon request in sq + * descriptor, 0x2 - current queue head pointer is + * updated in OS memory upon sq descriptor request + * 0x3 - current queue head pointer is updated in OS + * memory for each sq descriptor + * 7 : reserved15_w1 + */ + uint8_t sq_caps_2; + + /* 0 : is_physically_contiguous - Described if the + * queue ring memory is allocated in physical + * contiguous pages or split. + * 7:1 : reserved17_w1 + */ + uint8_t sq_caps_3; + + /* word 2 : */ + /* associated completion queue id. This CQ must be created prior to + * SQ creation + */ + uint16_t cq_idx; + + /* submission queue depth in entries */ + uint16_t sq_depth; + + /* words 3:4 : SQ physical base address in OS memory. This field + * should not be used for Low Latency queues. Has to be page + * aligned. + */ + struct ena_common_mem_addr sq_ba; + + /* words 5:6 : specifies queue head writeback location in OS + * memory. Valid if completion_policy is set to + * completion_policy_head_on_demand or completion_policy_head. Has + * to be cache aligned + */ + struct ena_common_mem_addr sq_head_writeback; + + /* word 7 : reserved word */ + uint32_t reserved0_w7; + + /* word 8 : reserved word */ + uint32_t reserved0_w8; +}; + +/* submission queue direction */ +enum ena_admin_sq_direction { + ENA_ADMIN_SQ_DIRECTION_TX = 1, + + ENA_ADMIN_SQ_DIRECTION_RX = 2, +}; + +/* ENA Response for Create SQ Command. Appears in ACQ entry as + * response_specific_data + */ +struct ena_admin_acq_create_sq_resp_desc { + /* words 0:1 : Common Admin Queue completion descriptor */ + struct ena_admin_acq_common_desc acq_common_desc; + + /* word 2 : */ + /* sq identifier */ + uint16_t sq_idx; + + uint16_t reserved; + + /* word 3 : queue doorbell address as and offset to PCIe MMIO REG + * BAR + */ + uint32_t sq_doorbell_offset; + + /* word 4 : low latency queue ring base address as an offset to + * PCIe MMIO LLQ_MEM BAR + */ + uint32_t llq_descriptors_offset; + + /* word 5 : low latency queue headers' memory as an offset to PCIe + * MMIO LLQ_MEM BAR + */ + uint32_t llq_headers_offset; +}; + +/* ENA AQ Destroy Submission Queue command. Placed in control buffer + * pointed by AQ entry + */ +struct ena_admin_aq_destroy_sq_cmd { + /* words 0 : */ + struct ena_admin_aq_common_desc aq_common_descriptor; + + /* words 1 : */ + struct ena_admin_sq sq; +}; + +/* ENA Response for Destroy SQ Command. Appears in ACQ entry as + * response_specific_data + */ +struct ena_admin_acq_destroy_sq_resp_desc { + /* words 0:1 : Common Admin Queue completion descriptor */ + struct ena_admin_acq_common_desc acq_common_desc; +}; + +/* ENA AQ Create Completion Queue command */ +struct ena_admin_aq_create_cq_cmd { + /* words 0 : */ + struct ena_admin_aq_common_desc aq_common_descriptor; + + /* word 1 : */ + /* 4:0 : reserved5 + * 5 : interrupt_mode_enabled - if set, cq operates + * in interrupt mode, otherwise - polling + * 7:6 : reserved6 + */ + uint8_t cq_caps_1; + + /* 4:0 : cq_entry_size_words - size of CQ entry in + * 32-bit words, valid values: 4, 8. + * 7:5 : reserved7 + */ + uint8_t cq_caps_2; + + /* completion queue depth in # of entries. must be power of 2 */ + uint16_t cq_depth; + + /* word 2 : msix vector assigned to this cq */ + uint32_t msix_vector; + + /* words 3:4 : cq physical base address in OS memory. CQ must be + * physically contiguous + */ + struct ena_common_mem_addr cq_ba; +}; + +/* ENA Response for Create CQ Command. Appears in ACQ entry as response + * specific data + */ +struct ena_admin_acq_create_cq_resp_desc { + /* words 0:1 : Common Admin Queue completion descriptor */ + struct ena_admin_acq_common_desc acq_common_desc; + + /* word 2 : */ + /* cq identifier */ + uint16_t cq_idx; + + /* actual cq depth in # of entries */ + uint16_t cq_actual_depth; + + /* word 3 : doorbell address as an offset to PCIe MMIO REG BAR */ + uint32_t cq_doorbell_offset; + + /* word 4 : completion head doorbell address as an offset to PCIe + * MMIO REG BAR + */ + uint32_t cq_head_db_offset; + + /* word 5 : interrupt unmask register address as an offset into + * PCIe MMIO REG BAR + */ + uint32_t cq_interrupt_unmask_register; +}; + +/* ENA AQ Destroy Completion Queue command. Placed in control buffer + * pointed by AQ entry + */ +struct ena_admin_aq_destroy_cq_cmd { + /* words 0 : */ + struct ena_admin_aq_common_desc aq_common_descriptor; + + /* word 1 : */ + /* associated queue id. */ + uint16_t cq_idx; + + uint16_t reserved1; +}; + +/* ENA Response for Destroy CQ Command. Appears in ACQ entry as + * response_specific_data + */ +struct ena_admin_acq_destroy_cq_resp_desc { + /* words 0:1 : Common Admin Queue completion descriptor */ + struct ena_admin_acq_common_desc acq_common_desc; +}; + +/* ENA AQ Get Statistics command. Extended statistics are placed in control + * buffer pointed by AQ entry + */ +struct ena_admin_aq_get_stats_cmd { + /* words 0 : */ + struct ena_admin_aq_common_desc aq_common_descriptor; + + /* words 1:3 : */ + union { + /* command specific inline data */ + uint32_t inline_data_w1[3]; + + /* words 1:3 : points to control buffer (direct or + * indirect, chained if needed) + */ + struct ena_admin_ctrl_buff_info control_buffer; + } u; + + /* word 4 : */ + /* stats type as defined in enum ena_admin_get_stats_type */ + uint8_t type; + + /* stats scope defined in enum ena_admin_get_stats_scope */ + uint8_t scope; + + uint16_t reserved3; + + /* word 5 : */ + /* queue id. used when scope is specific_queue */ + uint16_t queue_idx; + + /* device id, value 0xFFFF means mine. only privileged device can get + * stats of other device + */ + uint16_t device_id; +}; + +/* Basic Statistics Command. */ +struct ena_admin_basic_stats { + /* word 0 : */ + uint32_t tx_bytes_low; + + /* word 1 : */ + uint32_t tx_bytes_high; + + /* word 2 : */ + uint32_t tx_pkts_low; + + /* word 3 : */ + uint32_t tx_pkts_high; + + /* word 4 : */ + uint32_t rx_bytes_low; + + /* word 5 : */ + uint32_t rx_bytes_high; + + /* word 6 : */ + uint32_t rx_pkts_low; + + /* word 7 : */ + uint32_t rx_pkts_high; + + /* word 8 : */ + uint32_t rx_drops_low; + + /* word 9 : */ + uint32_t rx_drops_high; +}; + +/* ENA Response for Get Statistics Command. Appears in ACQ entry as + * response_specific_data + */ +struct ena_admin_acq_get_stats_resp { + /* words 0:1 : Common Admin Queue completion descriptor */ + struct ena_admin_acq_common_desc acq_common_desc; + + /* words 2:11 : */ + struct ena_admin_basic_stats basic_stats; +}; + +/* ENA Get/Set Feature common descriptor. Appears as inline word in + * ena_aq_entry + */ +struct ena_admin_get_set_feature_common_desc { + /* word 0 : */ + /* 1:0 : select - 0x1 - current value; 0x3 - default + * value + * 7:3 : reserved3 + */ + uint8_t flags; + + /* as appears in ena_feature_id */ + uint8_t feature_id; + + /* reserved16 */ + uint16_t reserved16; +}; + +/* ENA Device Attributes Feature descriptor. */ +struct ena_admin_device_attr_feature_desc { + /* word 0 : implementation id */ + uint32_t impl_id; + + /* word 1 : device version */ + uint32_t device_version; + + /* word 2 : bit map of which bits are supported value of 1 + * indicated that this feature is supported and can perform SET/GET + * for it + */ + uint32_t supported_features; + + /* word 3 : */ + uint32_t reserved3; + + /* word 4 : Indicates how many bits are used physical address + * access. + */ + uint32_t phys_addr_width; + + /* word 5 : Indicates how many bits are used virtual address access. */ + uint32_t virt_addr_width; + + /* unicast MAC address (in Network byte order) */ + uint8_t mac_addr[6]; + + uint8_t reserved7[2]; + + /* word 8 : Max supported MTU value */ + uint32_t max_mtu; +}; + +/* ENA Max Queues Feature descriptor. */ +struct ena_admin_queue_feature_desc { + /* word 0 : Max number of submission queues (including LLQs) */ + uint32_t max_sq_num; + + /* word 1 : Max submission queue depth */ + uint32_t max_sq_depth; + + /* word 2 : Max number of completion queues */ + uint32_t max_cq_num; + + /* word 3 : Max completion queue depth */ + uint32_t max_cq_depth; + + /* word 4 : Max number of LLQ submission queues */ + uint32_t max_llq_num; + + /* word 5 : Max submission queue depth of LLQ */ + uint32_t max_llq_depth; + + /* word 6 : Max header size */ + uint32_t max_header_size; + + /* word 7 : */ + /* Maximum Descriptors number, including meta descriptors, allowed + * for a single Tx packet + */ + uint16_t max_packet_tx_descs; + + /* Maximum Descriptors number allowed for a single Rx packet */ + uint16_t max_packet_rx_descs; +}; + +/* ENA MTU Set Feature descriptor. */ +struct ena_admin_set_feature_mtu_desc { + /* word 0 : mtu size including L2 */ + uint32_t mtu; +}; + +/* ENA host attributes Set Feature descriptor. */ +struct ena_admin_set_feature_host_attr_desc { + /* words 0:1 : host OS info base address in OS memory. host info is + * 4KB of physically contiguous + */ + struct ena_common_mem_addr os_info_ba; + + /* words 2:3 : host debug area base address in OS memory. debug + * area must be physically contiguous + */ + struct ena_common_mem_addr debug_ba; + + /* word 4 : debug area size */ + uint32_t debug_area_size; +}; + +/* ENA Interrupt Moderation Get Feature descriptor. */ +struct ena_admin_feature_intr_moder_desc { + /* word 0 : */ + /* interrupt delay granularity in usec */ + uint16_t intr_delay_resolution; + + uint16_t reserved; +}; + +/* ENA Link Get Feature descriptor. */ +struct ena_admin_get_feature_link_desc { + /* word 0 : Link speed in Mb */ + uint32_t speed; + + /* word 1 : supported speeds (bit field of enum ena_admin_link + * types) + */ + uint32_t supported; + + /* word 2 : */ + /* 0 : autoneg - auto negotiation + * 1 : duplex - Full Duplex + * 31:2 : reserved2 + */ + uint32_t flags; +}; + +/* ENA AENQ Feature descriptor. */ +struct ena_admin_feature_aenq_desc { + /* word 0 : bitmask for AENQ groups the device can report */ + uint32_t supported_groups; + + /* word 1 : bitmask for AENQ groups to report */ + uint32_t enabled_groups; +}; + +/* ENA Stateless Offload Feature descriptor. */ +struct ena_admin_feature_offload_desc { + /* word 0 : */ + /* Trasmit side stateless offload + * 0 : TX_L3_csum_ipv4 - IPv4 checksum + * 1 : TX_L4_ipv4_csum_part - TCP/UDP over IPv4 + * checksum, the checksum field should be initialized + * with pseudo header checksum + * 2 : TX_L4_ipv4_csum_full - TCP/UDP over IPv4 + * checksum + * 3 : TX_L4_ipv6_csum_part - TCP/UDP over IPv6 + * checksum, the checksum field should be initialized + * with pseudo header checksum + * 4 : TX_L4_ipv6_csum_full - TCP/UDP over IPv6 + * checksum + * 5 : tso_ipv4 - TCP/IPv4 Segmentation Offloading + * 6 : tso_ipv6 - TCP/IPv6 Segmentation Offloading + * 7 : tso_ecn - TCP Segmentation with ECN + */ + uint32_t tx; + + /* word 1 : */ + /* Receive side supported stateless offload + * 0 : RX_L3_csum_ipv4 - IPv4 checksum + * 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum + * 2 : RX_L4_ipv6_csum - TCP/UDP/IPv6 checksum + * 3 : RX_hash - Hash calculation + */ + uint32_t rx_supported; + + /* word 2 : */ + /* Receive side enabled stateless offload */ + uint32_t rx_enabled; +}; + +/* hash functions */ +enum ena_admin_hash_functions { + /* Toeplitz hash */ + ENA_ADMIN_TOEPLITZ = 1, + + /* CRC32 hash */ + ENA_ADMIN_CRC32 = 2, +}; + +/* ENA RSS flow hash control buffer structure */ +struct ena_admin_feature_rss_flow_hash_control { + /* word 0 : number of valid keys */ + uint32_t keys_num; + + /* word 1 : */ + uint32_t reserved; + + /* Toeplitz keys */ + uint32_t key[10]; +}; + +/* ENA RSS Flow Hash Function */ +struct ena_admin_feature_rss_flow_hash_function { + /* word 0 : */ + /* supported hash functions + * 7:0 : funcs - supported hash functions (bitmask + * accroding to ena_admin_hash_functions) + */ + uint32_t supported_func; + + /* word 1 : */ + /* selected hash func + * 7:0 : selected_func - selected hash function + * (bitmask accroding to ena_admin_hash_functions) + */ + uint32_t selected_func; + + /* word 2 : initial value */ + uint32_t init_val; +}; + +/* RSS flow hash protocols */ +enum ena_admin_flow_hash_proto { + /* tcp/ipv4 */ + ENA_ADMIN_RSS_TCP4 = 0, + + /* udp/ipv4 */ + ENA_ADMIN_RSS_UDP4 = 1, + + /* tcp/ipv6 */ + ENA_ADMIN_RSS_TCP6 = 2, + + /* udp/ipv6 */ + ENA_ADMIN_RSS_UDP6 = 3, + + /* ipv4 not tcp/udp */ + ENA_ADMIN_RSS_IP4 = 4, + + /* ipv6 not tcp/udp */ + ENA_ADMIN_RSS_IP6 = 5, + + /* fragmented ipv4 */ + ENA_ADMIN_RSS_IP4_FRAG = 6, + + /* not ipv4/6 */ + ENA_ADMIN_RSS_NOT_IP = 7, + + /* max number of protocols */ + ENA_ADMIN_RSS_PROTO_NUM = 16, +}; + +/* RSS flow hash fields */ +enum ena_admin_flow_hash_fields { + /* Ethernet Dest Addr */ + ENA_ADMIN_RSS_L2_DA = 0, + + /* Ethernet Src Addr */ + ENA_ADMIN_RSS_L2_SA = 1, + + /* ipv4/6 Dest Addr */ + ENA_ADMIN_RSS_L3_DA = 2, + + /* ipv4/6 Src Addr */ + ENA_ADMIN_RSS_L3_SA = 5, + + /* tcp/udp Dest Port */ + ENA_ADMIN_RSS_L4_DP = 6, + + /* tcp/udp Src Port */ + ENA_ADMIN_RSS_L4_SP = 7, +}; + +/* hash input fields for flow protocol */ +struct ena_admin_proto_input { + /* word 0 : */ + /* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */ + uint16_t fields; + + /* 0 : inner - for tunneled packet, select the fields + * from inner header + */ + uint16_t flags; +}; + +/* ENA RSS hash control buffer structure */ +struct ena_admin_feature_rss_hash_control { + /* supported input fields */ + struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM]; + + /* selected input fields */ + struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM]; + + /* supported input fields for inner header */ + struct ena_admin_proto_input supported_inner_fields[ENA_ADMIN_RSS_PROTO_NUM]; + + /* selected input fields */ + struct ena_admin_proto_input selected_inner_fields[ENA_ADMIN_RSS_PROTO_NUM]; +}; + +/* ENA RSS flow hash input */ +struct ena_admin_feature_rss_flow_hash_input { + /* word 0 : */ + /* supported hash input sorting + * 1 : L3_sort - support swap L3 addresses if DA + * smaller than SA + * 2 : L4_sort - support swap L4 ports if DP smaller + * SP + */ + uint16_t supported_input_sort; + + /* enabled hash input sorting + * 1 : enable_L3_sort - enable swap L3 addresses if + * DA smaller than SA + * 2 : enable_L4_sort - enable swap L4 ports if DP + * smaller than SP + */ + uint16_t enabled_input_sort; +}; + +/* Operating system type */ +enum ena_admin_os_type { + /* Linux OS */ + ENA_ADMIN_OS_LINUX = 1, + + /* Windows OS */ + ENA_ADMIN_OS_WIN = 2, + + /* DPDK OS */ + ENA_ADMIN_OS_DPDK = 3, + + /* FreeBSD OS */ + ENA_ADMIN_OS_FREE_BSD = 4, + + /* PXE OS */ + ENA_ADMIN_OS_PXE = 5, +}; + +/* host info */ +struct ena_admin_host_info { + /* word 0 : OS type defined in enum ena_os_type */ + uint32_t os_type; + + /* os distribution string format */ + uint8_t os_dist_str[128]; + + /* word 33 : OS distribution numeric format */ + uint32_t os_dist; + + /* kernel version string format */ + uint8_t kernel_ver_str[32]; + + /* word 42 : Kernel version numeric format */ + uint32_t kernel_ver; + + /* word 43 : */ + /* driver version + * 7:0 : major - major + * 15:8 : minor - minor + * 23:16 : sub_minor - sub minor + */ + uint32_t driver_version; + + /* features bitmap */ + uint32_t supported_network_features[4]; +}; + +/* ENA RSS indirection table entry */ +struct ena_admin_rss_ind_table_entry { + /* word 0 : */ + /* cq identifier */ + uint16_t cq_idx; + + uint16_t reserved; +}; + +/* ENA RSS indirection table */ +struct ena_admin_feature_rss_ind_table { + /* word 0 : */ + /* min supported table size (2^min_size) */ + uint16_t min_size; + + /* max supported table size (2^max_size) */ + uint16_t max_size; + + /* word 1 : */ + /* table size (2^size) */ + uint16_t size; + + uint16_t reserved; + + /* word 2 : index of the inline entry. 0xFFFFFFFF means invalid */ + uint32_t inline_index; + + /* words 3 : used for updating single entry, ignored when setting + * the entire table through the control buffer. + */ + struct ena_admin_rss_ind_table_entry inline_entry; +}; + +/* ENA Get Feature command */ +struct ena_admin_get_feat_cmd { + /* words 0 : */ + struct ena_admin_aq_common_desc aq_common_descriptor; + + /* words 1:3 : points to control buffer (direct or indirect, + * chained if needed) + */ + struct ena_admin_ctrl_buff_info control_buffer; + + /* words 4 : */ + struct ena_admin_get_set_feature_common_desc feat_common; + + /* words 5:15 : */ + union { + /* raw words */ + uint32_t raw[11]; + } u; +}; + +/* ENA Get Feature command response */ +struct ena_admin_get_feat_resp { + /* words 0:1 : */ + struct ena_admin_acq_common_desc acq_common_desc; + + /* words 2:15 : */ + union { + /* raw words */ + uint32_t raw[14]; + + /* words 2:10 : Get Device Attributes */ + struct ena_admin_device_attr_feature_desc dev_attr; + + /* words 2:5 : Max queues num */ + struct ena_admin_queue_feature_desc max_queue; + + /* words 2:3 : AENQ configuration */ + struct ena_admin_feature_aenq_desc aenq; + + /* words 2:4 : Get Link configuration */ + struct ena_admin_get_feature_link_desc link; + + /* words 2:4 : offload configuration */ + struct ena_admin_feature_offload_desc offload; + + /* words 2:4 : rss flow hash function */ + struct ena_admin_feature_rss_flow_hash_function flow_hash_func; + + /* words 2 : rss flow hash input */ + struct ena_admin_feature_rss_flow_hash_input flow_hash_input; + + /* words 2:3 : rss indirection table */ + struct ena_admin_feature_rss_ind_table ind_table; + + /* words 2 : interrupt moderation configuration */ + struct ena_admin_feature_intr_moder_desc intr_moderation; + } u; +}; + +/* ENA Set Feature command */ +struct ena_admin_set_feat_cmd { + /* words 0 : */ + struct ena_admin_aq_common_desc aq_common_descriptor; + + /* words 1:3 : points to control buffer (direct or indirect, + * chained if needed) + */ + struct ena_admin_ctrl_buff_info control_buffer; + + /* words 4 : */ + struct ena_admin_get_set_feature_common_desc feat_common; + + /* words 5:15 : */ + union { + /* raw words */ + uint32_t raw[11]; + + /* words 5 : mtu size */ + struct ena_admin_set_feature_mtu_desc mtu; + + /* words 5:7 : host attributes */ + struct ena_admin_set_feature_host_attr_desc host_attr; + + /* words 5:6 : AENQ configuration */ + struct ena_admin_feature_aenq_desc aenq; + + /* words 5:7 : rss flow hash function */ + struct ena_admin_feature_rss_flow_hash_function flow_hash_func; + + /* words 5 : rss flow hash input */ + struct ena_admin_feature_rss_flow_hash_input flow_hash_input; + + /* words 5:6 : rss indirection table */ + struct ena_admin_feature_rss_ind_table ind_table; + } u; +}; + +/* ENA Set Feature command response */ +struct ena_admin_set_feat_resp { + /* words 0:1 : */ + struct ena_admin_acq_common_desc acq_common_desc; + + /* words 2:15 : */ + union { + /* raw words */ + uint32_t raw[14]; + } u; +}; + +/* ENA Asynchronous Event Notification Queue descriptor. */ +struct ena_admin_aenq_common_desc { + /* word 0 : */ + uint16_t group; + + uint16_t syndrom; + + /* word 1 : */ + /* 0 : phase */ + uint8_t flags; + + uint8_t reserved1[3]; + + /* word 2 : Timestamp LSB */ + uint32_t timestamp_low; + + /* word 3 : Timestamp MSB */ + uint32_t timestamp_high; +}; + +/* asynchronous event notification groups */ +enum ena_admin_aenq_group { + /* Link State Change */ + ENA_ADMIN_LINK_CHANGE = 0, + + ENA_ADMIN_FATAL_ERROR = 1, + + ENA_ADMIN_WARNING = 2, + + ENA_ADMIN_NOTIFICATION = 3, + + ENA_ADMIN_KEEP_ALIVE = 4, + + ENA_ADMIN_AENQ_GROUPS_NUM = 5, +}; + +/* syndorm of AENQ notification group */ +enum ena_admin_aenq_notification_syndrom { + ENA_ADMIN_SUSPEND = 0, + + ENA_ADMIN_RESUME = 1, +}; + +/* ENA Asynchronous Event Notification generic descriptor. */ +struct ena_admin_aenq_entry { + /* words 0:3 : */ + struct ena_admin_aenq_common_desc aenq_common_desc; + + /* command specific inline data */ + uint32_t inline_data_w4[12]; +}; + +/* ENA Asynchronous Event Notification Queue Link Change descriptor. */ +struct ena_admin_aenq_link_change_desc { + /* words 0:3 : */ + struct ena_admin_aenq_common_desc aenq_common_desc; + + /* word 4 : */ + /* 0 : link_status */ + uint32_t flags; +}; + +/* ENA MMIO Readless response interface */ +struct ena_admin_ena_mmio_req_read_less_resp { + /* word 0 : */ + /* request id */ + uint16_t req_id; + + /* register offset */ + uint16_t reg_off; + + /* word 1 : value is valid when poll is cleared */ + uint32_t reg_val; +}; + +/* aq_common_desc */ +#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) +#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1 +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1) +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2 +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2) + +/* sq */ +#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5 +#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5) + +/* acq_common_desc */ +#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) +#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0) + +/* aq_create_sq_cmd */ +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5 +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4 +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0) + +/* aq_create_cq_cmd */ +#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5 +#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5) +#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) + +/* get_set_feature_common_desc */ +#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0) + +/* get_feature_link_desc */ +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0) +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1 +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1) + +/* feature_offload_desc */ +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT 1 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK BIT(1) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT 2 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK BIT(2) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT 3 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3) + +/* feature_rss_flow_hash_function */ +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK \ + GENMASK(7, 0) + +/* proto_input */ +#define ENA_ADMIN_PROTO_INPUT_INNER_MASK BIT(0) + +/* feature_rss_flow_hash_input */ +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2) + +/* host_info */ +#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0) +#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8 +#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8) +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16 +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) + +/* aenq_common_desc */ +#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) + +/* aenq_link_change_desc */ +#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) + +#if !defined(ENA_DEFS_LINUX_MAINLINE) +static inline uint16_t +get_ena_admin_aq_common_desc_command_id( + const struct ena_admin_aq_common_desc *p) +{ + return p->command_id & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; +} + +static inline void +set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p, + uint16_t val) +{ + p->command_id |= val & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; +} + +static inline uint8_t +get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p) +{ + return p->flags & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; +} + +static inline void +set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p, + uint8_t val) +{ + p->flags |= val & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; +} + +static inline uint8_t +get_ena_admin_aq_common_desc_ctrl_data( + const struct ena_admin_aq_common_desc *p) +{ + return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >> + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT; +} + +static inline void +set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p, + uint8_t val) +{ + p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT) + & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK; +} + +static inline uint8_t +get_ena_admin_aq_common_desc_ctrl_data_indirect( + const struct ena_admin_aq_common_desc *p) +{ + return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK) + >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT; +} + +static inline void +set_ena_admin_aq_common_desc_ctrl_data_indirect( + struct ena_admin_aq_common_desc *p, + uint8_t val) +{ + p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT) + & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; +} + +static inline uint8_t +get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p) +{ + return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK) + >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT; +} + +static inline void +set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val) +{ + p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & + ENA_ADMIN_SQ_SQ_DIRECTION_MASK; +} + +static inline uint16_t +get_ena_admin_acq_common_desc_command_id( + const struct ena_admin_acq_common_desc *p) +{ + return p->command & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; +} + +static inline void +set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p, + uint16_t val) +{ + p->command |= val & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; +} + +static inline uint8_t +get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p) +{ + return p->flags & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK; +} + +static inline void +set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p, + uint8_t val) +{ + p->flags |= val & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK; +} + +static inline uint8_t +get_ena_admin_aq_create_sq_cmd_sq_direction( + const struct ena_admin_aq_create_sq_cmd *p) +{ + return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK) + >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT; +} + +static inline void +set_ena_admin_aq_create_sq_cmd_sq_direction( + struct ena_admin_aq_create_sq_cmd *p, + uint8_t val) +{ + p->sq_identity |= (val << + ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) + & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; +} + +static inline uint8_t +get_ena_admin_aq_create_sq_cmd_placement_policy( + const struct ena_admin_aq_create_sq_cmd *p) +{ + return p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; +} + +static inline void +set_ena_admin_aq_create_sq_cmd_placement_policy( + struct ena_admin_aq_create_sq_cmd *p, + uint8_t val) +{ + p->sq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; +} + +static inline uint8_t +get_ena_admin_aq_create_sq_cmd_completion_policy( + const struct ena_admin_aq_create_sq_cmd *p) +{ + return (p->sq_caps_2 + & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK) + >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT; +} + +static inline void +set_ena_admin_aq_create_sq_cmd_completion_policy( + struct ena_admin_aq_create_sq_cmd *p, + uint8_t val) +{ + p->sq_caps_2 |= + (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) + & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; +} + +static inline uint8_t +get_ena_admin_aq_create_sq_cmd_is_physically_contiguous( + const struct ena_admin_aq_create_sq_cmd *p) +{ + return p->sq_caps_3 & + ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; +} + +static inline void +set_ena_admin_aq_create_sq_cmd_is_physically_contiguous( + struct ena_admin_aq_create_sq_cmd *p, + uint8_t val) +{ + p->sq_caps_3 |= val & + ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; +} + +static inline uint8_t +get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled( + const struct ena_admin_aq_create_cq_cmd *p) +{ + return (p->cq_caps_1 & + ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK) + >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT; +} + +static inline void +set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled( + struct ena_admin_aq_create_cq_cmd *p, + uint8_t val) +{ + p->cq_caps_1 |= + (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT) + & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; +} + +static inline uint8_t +get_ena_admin_aq_create_cq_cmd_cq_entry_size_words( + const struct ena_admin_aq_create_cq_cmd *p) +{ + return p->cq_caps_2 + & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; +} + +static inline void +set_ena_admin_aq_create_cq_cmd_cq_entry_size_words( + struct ena_admin_aq_create_cq_cmd *p, + uint8_t val) +{ + p->cq_caps_2 |= + val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; +} + +static inline uint8_t +get_ena_admin_get_set_feature_common_desc_select( + const struct ena_admin_get_set_feature_common_desc *p) +{ + return p->flags & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK; +} + +static inline void +set_ena_admin_get_set_feature_common_desc_select( + struct ena_admin_get_set_feature_common_desc *p, + uint8_t val) +{ + p->flags |= val & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK; +} + +static inline uint32_t +get_ena_admin_get_feature_link_desc_autoneg( + const struct ena_admin_get_feature_link_desc *p) +{ + return p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK; +} + +static inline void +set_ena_admin_get_feature_link_desc_autoneg( + struct ena_admin_get_feature_link_desc *p, + uint32_t val) +{ + p->flags |= val & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK; +} + +static inline uint32_t +get_ena_admin_get_feature_link_desc_duplex( + const struct ena_admin_get_feature_link_desc *p) +{ + return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK) + >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT; +} + +static inline void +set_ena_admin_get_feature_link_desc_duplex( + struct ena_admin_get_feature_link_desc *p, + uint32_t val) +{ + p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT) + & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK; +} + +static inline uint32_t +get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4( + const struct ena_admin_feature_offload_desc *p) +{ + return p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK; +} + +static inline void +set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4( + struct ena_admin_feature_offload_desc *p, + uint32_t val) +{ + p->tx |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK; +} + +static inline uint32_t +get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part( + const struct ena_admin_feature_offload_desc *p) +{ + return (p->tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) + >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT; +} + +static inline void +set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part( + struct ena_admin_feature_offload_desc *p, + uint32_t val) +{ + p->tx |= (val << + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT) + & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK; +} + +static inline uint32_t +get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full( + const struct ena_admin_feature_offload_desc *p) +{ + return (p->tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) + >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT; +} + +static inline void +set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full( + struct ena_admin_feature_offload_desc *p, + uint32_t val) +{ + p->tx |= (val << + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT) + & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK; +} + +static inline uint32_t +get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part( + const struct ena_admin_feature_offload_desc *p) +{ + return (p->tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) + >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT; +} + +static inline void +set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part( + struct ena_admin_feature_offload_desc *p, + uint32_t val) +{ + p->tx |= (val << + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT) + & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK; +} + +static inline uint32_t +get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full( + const struct ena_admin_feature_offload_desc *p) +{ + return (p->tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) + >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT; +} + +static inline void +set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full( + struct ena_admin_feature_offload_desc *p, + uint32_t val) +{ + p->tx |= (val << + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT) + & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK; +} + +static inline uint32_t +get_ena_admin_feature_offload_desc_tso_ipv4( + const struct ena_admin_feature_offload_desc *p) +{ + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) + >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT; +} + +static inline void +set_ena_admin_feature_offload_desc_tso_ipv4( + struct ena_admin_feature_offload_desc *p, + uint32_t val) +{ + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT) + & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK; +} + +static inline uint32_t +get_ena_admin_feature_offload_desc_tso_ipv6( + const struct ena_admin_feature_offload_desc *p) +{ + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) + >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT; +} + +static inline void +set_ena_admin_feature_offload_desc_tso_ipv6( + struct ena_admin_feature_offload_desc *p, + uint32_t val) +{ + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT) + & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK; +} + +static inline uint32_t +get_ena_admin_feature_offload_desc_tso_ecn( + const struct ena_admin_feature_offload_desc *p) +{ + return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) + >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT; +} + +static inline void +set_ena_admin_feature_offload_desc_tso_ecn( + struct ena_admin_feature_offload_desc *p, + uint32_t val) +{ + p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT) + & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK; +} + +static inline uint32_t +get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4( + const struct ena_admin_feature_offload_desc *p) +{ + return p->rx_supported & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK; +} + +static inline void +set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4( + struct ena_admin_feature_offload_desc *p, + uint32_t val) +{ + p->rx_supported |= + val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK; +} + +static inline uint32_t +get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum( + const struct ena_admin_feature_offload_desc *p) +{ + return (p->rx_supported & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) + >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT; +} + +static inline void +set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum( + struct ena_admin_feature_offload_desc *p, + uint32_t val) +{ + p->rx_supported |= + (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT) + & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK; +} + +static inline uint32_t +get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum( + const struct ena_admin_feature_offload_desc *p) +{ + return (p->rx_supported & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) + >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT; +} + +static inline void +set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum( + struct ena_admin_feature_offload_desc *p, + uint32_t val) +{ + p->rx_supported |= + (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT) + & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK; +} + +static inline uint32_t +get_ena_admin_feature_offload_desc_RX_hash( + const struct ena_admin_feature_offload_desc *p) +{ + return (p->rx_supported & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) + >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT; +} + +static inline void +set_ena_admin_feature_offload_desc_RX_hash( + struct ena_admin_feature_offload_desc *p, + uint32_t val) +{ + p->rx_supported |= + (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT) + & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK; +} + +static inline uint32_t +get_ena_admin_feature_rss_flow_hash_function_funcs( + const struct ena_admin_feature_rss_flow_hash_function *p) +{ + return p->supported_func & + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK; +} + +static inline void +set_ena_admin_feature_rss_flow_hash_function_funcs( + struct ena_admin_feature_rss_flow_hash_function *p, + uint32_t val) +{ + p->supported_func |= + val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK; +} + +static inline uint32_t +get_ena_admin_feature_rss_flow_hash_function_selected_func( + const struct ena_admin_feature_rss_flow_hash_function *p) +{ + return p->selected_func & + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK; +} + +static inline void +set_ena_admin_feature_rss_flow_hash_function_selected_func( + struct ena_admin_feature_rss_flow_hash_function *p, + uint32_t val) +{ + p->selected_func |= + val & + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK; +} + +static inline uint16_t +get_ena_admin_proto_input_inner(const struct ena_admin_proto_input *p) +{ + return p->flags & ENA_ADMIN_PROTO_INPUT_INNER_MASK; +} + +static inline void +set_ena_admin_proto_input_inner(struct ena_admin_proto_input *p, uint16_t val) +{ + p->flags |= val & ENA_ADMIN_PROTO_INPUT_INNER_MASK; +} + +static inline uint16_t +get_ena_admin_feature_rss_flow_hash_input_L3_sort( + const struct ena_admin_feature_rss_flow_hash_input *p) +{ + return (p->supported_input_sort & + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK) + >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT; +} + +static inline void +set_ena_admin_feature_rss_flow_hash_input_L3_sort( + struct ena_admin_feature_rss_flow_hash_input *p, + uint16_t val) +{ + p->supported_input_sort |= + (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT) + & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK; +} + +static inline uint16_t +get_ena_admin_feature_rss_flow_hash_input_L4_sort( + const struct ena_admin_feature_rss_flow_hash_input *p) +{ + return (p->supported_input_sort & + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK) + >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT; +} + +static inline void +set_ena_admin_feature_rss_flow_hash_input_L4_sort( + struct ena_admin_feature_rss_flow_hash_input *p, + uint16_t val) +{ + p->supported_input_sort |= + (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT) + & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; +} + +static inline uint16_t +get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort( + const struct ena_admin_feature_rss_flow_hash_input *p) +{ + return (p->enabled_input_sort & + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK) + >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT; +} + +static inline void +set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort( + struct ena_admin_feature_rss_flow_hash_input *p, + uint16_t val) +{ + p->enabled_input_sort |= + (val << + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT) + & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK; +} + +static inline uint16_t +get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort( + const struct ena_admin_feature_rss_flow_hash_input *p) +{ + return (p->enabled_input_sort & + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK) + >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT; +} + +static inline void +set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort( + struct ena_admin_feature_rss_flow_hash_input *p, + uint16_t val) +{ + p->enabled_input_sort |= + (val << + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT) + & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK; +} + +static inline uint32_t +get_ena_admin_host_info_major(const struct ena_admin_host_info *p) +{ + return p->driver_version & ENA_ADMIN_HOST_INFO_MAJOR_MASK; +} + +static inline void +set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val) +{ + p->driver_version |= val & ENA_ADMIN_HOST_INFO_MAJOR_MASK; +} + +static inline uint32_t +get_ena_admin_host_info_minor(const struct ena_admin_host_info *p) +{ + return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK) + >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT; +} + +static inline void +set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val) +{ + p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) + & ENA_ADMIN_HOST_INFO_MINOR_MASK; +} + +static inline uint32_t +get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p) +{ + return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK) + >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT; +} + +static inline void +set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val) +{ + p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) + & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK; +} + +static inline uint8_t +get_ena_admin_aenq_common_desc_phase( + const struct ena_admin_aenq_common_desc *p) +{ + return p->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK; +} + +static inline void +set_ena_admin_aenq_common_desc_phase( + struct ena_admin_aenq_common_desc *p, + uint8_t val) +{ + p->flags |= val & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK; +} + +static inline uint32_t +get_ena_admin_aenq_link_change_desc_link_status( + const struct ena_admin_aenq_link_change_desc *p) +{ + return p->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; +} + +static inline void +set_ena_admin_aenq_link_change_desc_link_status( + struct ena_admin_aenq_link_change_desc *p, + uint32_t val) +{ + p->flags |= val & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; +} + +#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */ +#endif /*_ENA_ADMIN_H_ */ diff --git a/drivers/net/ena/base/ena_defs/ena_common_defs.h b/drivers/net/ena/base/ena_defs/ena_common_defs.h new file mode 100644 index 00000000..95e0f389 --- /dev/null +++ b/drivers/net/ena/base/ena_defs/ena_common_defs.h @@ -0,0 +1,54 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef _ENA_COMMON_H_ +#define _ENA_COMMON_H_ + +/* spec version */ +#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* spec version major */ +#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* spec version minor */ + +/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */ +struct ena_common_mem_addr { + /* word 0 : low 32 bit of the memory address */ + uint32_t mem_addr_low; + + /* word 1 : */ + /* high 16 bits of the memory address */ + uint16_t mem_addr_high; + + /* MBZ */ + uint16_t reserved16; +}; + +#endif /*_ENA_COMMON_H_ */ diff --git a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h new file mode 100644 index 00000000..a547033d --- /dev/null +++ b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h @@ -0,0 +1,1488 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef _ENA_ETH_IO_H_ +#define _ENA_ETH_IO_H_ + +/* Layer 3 protocol index */ +enum ena_eth_io_l3_proto_index { + ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, + + ENA_ETH_IO_L3_PROTO_IPV4 = 8, + + ENA_ETH_IO_L3_PROTO_IPV6 = 11, + + ENA_ETH_IO_L3_PROTO_FCOE = 21, + + ENA_ETH_IO_L3_PROTO_ROCE = 22, +}; + +/* Layer 4 protocol index */ +enum ena_eth_io_l4_proto_index { + ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, + + ENA_ETH_IO_L4_PROTO_TCP = 12, + + ENA_ETH_IO_L4_PROTO_UDP = 13, + + ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23, +}; + +/* ENA IO Queue Tx descriptor */ +struct ena_eth_io_tx_desc { + /* word 0 : */ + /* length, request id and control flags + * 15:0 : length - Buffer length in bytes, must + * include any packet trailers that the ENA supposed + * to update like End-to-End CRC, Authentication GMAC + * etc. This length must not include the + * 'Push_Buffer' length. This length must not include + * the 4-byte added in the end for 802.3 Ethernet FCS + * 21:16 : req_id_hi - Request ID[15:10] + * 22 : reserved22 - MBZ + * 23 : meta_desc - MBZ + * 24 : phase + * 25 : reserved1 - MBZ + * 26 : first - Indicates first descriptor in + * transaction + * 27 : last - Indicates last descriptor in + * transaction + * 28 : comp_req - Indicates whether completion + * should be posted, after packet is transmitted. + * Valid only for first descriptor + * 30:29 : reserved29 - MBZ + * 31 : reserved31 - MBZ + */ + uint32_t len_ctrl; + + /* word 1 : */ + /* ethernet control + * 3:0 : l3_proto_idx - L3 protocol, if + * tunnel_ctrl[0] is set, then this is the inner + * packet L3. This field required when + * l3_csum_en,l3_csum or tso_en are set. + * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and + * DF flags of the IPv4 header is 0. Otherwise must + * be set to 1 + * 6:5 : reserved5 + * 7 : tso_en - Enable TSO, For TCP only. For packets + * with tunnel (tunnel_ctrl[0]=1), then the inner + * packet will be segmented while the outer tunnel is + * duplicated + * 12:8 : l4_proto_idx - L4 protocol, if + * tunnel_ctrl[0] is set, then this is the inner + * packet L4. This field need to be set when + * l4_csum_en or tso_en are set. + * 13 : l3_csum_en - enable IPv4 header checksum. if + * tunnel_ctrl[0] is set, then this will enable + * checksum for the inner packet IPv4 + * 14 : l4_csum_en - enable TCP/UDP checksum. if + * tunnel_ctrl[0] is set, then this will enable + * checksum on the inner packet TCP/UDP checksum + * 15 : ethernet_fcs_dis - when set, the controller + * will not append the 802.3 Ethernet Frame Check + * Sequence to the packet + * 16 : reserved16 + * 17 : l4_csum_partial - L4 partial checksum. when + * set to 0, the ENA calculates the L4 checksum, + * where the Destination Address required for the + * TCP/UDP pseudo-header is taken from the actual + * packet L3 header. when set to 1, the ENA doesn't + * calculate the sum of the pseudo-header, instead, + * the checksum field of the L4 is used instead. When + * TSO enabled, the checksum of the pseudo-header + * must not include the tcp length field. L4 partial + * checksum should be used for IPv6 packet that + * contains Routing Headers. + * 20:18 : tunnel_ctrl - Bit 0: tunneling exists, Bit + * 1: tunnel packet actually uses UDP as L4, Bit 2: + * tunnel packet L3 protocol: 0: IPv4 1: IPv6 + * 21 : ts_req - Indicates that the packet is IEEE + * 1588v2 packet requiring the timestamp + * 31:22 : req_id_lo - Request ID[9:0] + */ + uint32_t meta_ctrl; + + /* word 2 : Buffer address bits[31:0] */ + uint32_t buff_addr_lo; + + /* word 3 : */ + /* address high and header size + * 15:0 : addr_hi - Buffer Pointer[47:32] + * 23:16 : reserved16_w2 + * 31:24 : header_length - Header length. For Low + * Latency Queues, this fields indicates the number + * of bytes written to the headers' memory. For + * normal queues, if packet is TCP or UDP, and longer + * than max_header_size, then this field should be + * set to the sum of L4 header offset and L4 header + * size(without options), otherwise, this field + * should be set to 0. For both modes, this field + * must not exceed the max_header_size. + * max_header_size value is reported by the Max + * Queues Feature descriptor + */ + uint32_t buff_addr_hi_hdr_sz; +}; + +/* ENA IO Queue Tx Meta descriptor */ +struct ena_eth_io_tx_meta_desc { + /* word 0 : */ + /* length, request id and control flags + * 9:0 : req_id_lo - Request ID[9:0] + * 11:10 : outr_l3_off_hi - valid if + * tunnel_ctrl[0]=1. bits[4:3] of outer packet L3 + * offset + * 12 : reserved12 - MBZ + * 13 : reserved13 - MBZ + * 14 : ext_valid - if set, offset fields in Word2 + * are valid Also MSS High in Word 0 and Outer L3 + * Offset High in WORD 0 and bits [31:24] in Word 3 + * 15 : word3_valid - If set Crypto Info[23:0] of + * Word 3 is valid + * 19:16 : mss_hi_ptp + * 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1: + * Extended Metadata Descriptor + * 21 : meta_store - Store extended metadata in queue + * cache + * 22 : reserved22 - MBZ + * 23 : meta_desc - MBO + * 24 : phase + * 25 : reserved25 - MBZ + * 26 : first - Indicates first descriptor in + * transaction + * 27 : last - Indicates last descriptor in + * transaction + * 28 : comp_req - Indicates whether completion + * should be posted, after packet is transmitted. + * Valid only for first descriptor + * 30:29 : reserved29 - MBZ + * 31 : reserved31 - MBZ + */ + uint32_t len_ctrl; + + /* word 1 : */ + /* word 1 + * 5:0 : req_id_hi + * 31:6 : reserved6 - MBZ + */ + uint32_t word1; + + /* word 2 : */ + /* word 2 + * 7:0 : l3_hdr_len - the header length L3 IP header. + * if tunnel_ctrl[0]=1, this is the IP header length + * of the inner packet. FIXME - check if includes IP + * options hdr_len + * 15:8 : l3_hdr_off - the offset of the first byte + * in the L3 header from the beginning of the to-be + * transmitted packet. if tunnel_ctrl[0]=1, this is + * the offset the L3 header of the inner packet + * 21:16 : l4_hdr_len_in_words - counts the L4 header + * length in words. there is an explicit assumption + * that L4 header appears right after L3 header and + * L4 offset is based on l3_hdr_off+l3_hdr_len FIXME + * - pls confirm + * 31:22 : mss_lo + */ + uint32_t word2; + + /* word 3 : */ + /* word 3 + * 23:0 : crypto_info + * 28:24 : outr_l3_hdr_len_words - valid if + * tunnel_ctrl[0]=1. Counts in words + * 31:29 : outr_l3_off_lo - valid if + * tunnel_ctrl[0]=1. bits[2:0] of outer packet L3 + * offset. Counts the offset of the tunnel IP header + * from beginning of the packet. NOTE: if the tunnel + * header requires CRC or checksum, it is expected to + * be done by the driver as it is not done by the HW + */ + uint32_t word3; +}; + +/* ENA IO Queue Tx completions descriptor */ +struct ena_eth_io_tx_cdesc { + /* word 0 : */ + /* Request ID[15:0] */ + uint16_t req_id; + + uint8_t status; + + /* flags + * 0 : phase + * 7:1 : reserved1 + */ + uint8_t flags; + + /* word 1 : */ + uint16_t sub_qid; + + /* indicates location of submission queue head */ + uint16_t sq_head_idx; +}; + +/* ENA IO Queue Rx descriptor */ +struct ena_eth_io_rx_desc { + /* word 0 : */ + /* In bytes. 0 means 64KB */ + uint16_t length; + + /* MBZ */ + uint8_t reserved2; + + /* control flags + * 0 : phase + * 1 : reserved1 - MBZ + * 2 : first - Indicates first descriptor in + * transaction + * 3 : last - Indicates last descriptor in transaction + * 4 : comp_req + * 5 : reserved5 - MBO + * 7:6 : reserved6 - MBZ + */ + uint8_t ctrl; + + /* word 1 : */ + uint16_t req_id; + + /* MBZ */ + uint16_t reserved6; + + /* word 2 : Buffer address bits[31:0] */ + uint32_t buff_addr_lo; + + /* word 3 : */ + /* Buffer Address bits[47:16] */ + uint16_t buff_addr_hi; + + /* MBZ */ + uint16_t reserved16_w3; +}; + +/* ENA IO Queue Rx Completion Base Descriptor (4-word format). Note: all + * ethernet parsing information are valid only when last=1 + */ +struct ena_eth_io_rx_cdesc_base { + /* word 0 : */ + /* 4:0 : l3_proto_idx - L3 protocol index + * 6:5 : src_vlan_cnt - Source VLAN count + * 7 : tunnel - Tunnel exists + * 12:8 : l4_proto_idx - L4 protocol index + * 13 : l3_csum_err - when set, either the L3 + * checksum error detected, or, the controller didn't + * validate the checksum, If tunnel exists, this + * result is for the inner packet. This bit is valid + * only when l3_proto_idx indicates IPv4 packet + * 14 : l4_csum_err - when set, either the L4 + * checksum error detected, or, the controller didn't + * validate the checksum. If tunnel exists, this + * result is for the inner packet. This bit is valid + * only when l4_proto_idx indicates TCP/UDP packet, + * and, ipv4_frag is not set + * 15 : ipv4_frag - Indicates IPv4 fragmented packet + * 17:16 : reserved16 + * 19:18 : reserved18 + * 20 : secured_pkt - Set if packet was handled by + * inline crypto engine + * 22:21 : crypto_status - bit 0 secured direction: + * 0: decryption, 1: encryption. bit 1 reserved + * 23 : reserved23 + * 24 : phase + * 25 : l3_csum2 - second checksum engine result + * 26 : first - Indicates first descriptor in + * transaction + * 27 : last - Indicates last descriptor in + * transaction + * 28 : inr_l4_csum - TCP/UDP checksum results for + * inner packet + * 29 : reserved29 + * 30 : buffer - 0: Metadata descriptor. 1: Buffer + * Descriptor was used + * 31 : reserved31 + */ + uint32_t status; + + /* word 1 : */ + uint16_t length; + + uint16_t req_id; + + /* word 2 : 32-bit hash result */ + uint32_t hash; + + /* word 3 : */ + /* submission queue number */ + uint16_t sub_qid; + + uint16_t reserved; +}; + +/* ENA IO Queue Rx Completion Descriptor (8-word format) */ +struct ena_eth_io_rx_cdesc_ext { + /* words 0:3 : Rx Completion Extended */ + struct ena_eth_io_rx_cdesc_base base; + + /* word 4 : Completed Buffer address bits[31:0] */ + uint32_t buff_addr_lo; + + /* word 5 : */ + /* the buffer address used bits[47:32] */ + uint16_t buff_addr_hi; + + uint16_t reserved16; + + /* word 6 : Reserved */ + uint32_t reserved_w6; + + /* word 7 : Reserved */ + uint32_t reserved_w7; +}; + +/* ENA Interrupt Unmask Register */ +struct ena_eth_io_intr_reg { + /* word 0 : */ + /* 14:0 : rx_intr_delay - rx interrupt delay value + * 29:15 : tx_intr_delay - tx interrupt delay value + * 30 : intr_unmask - if set, unmasks interrupt + * 31 : reserved + */ + uint32_t intr_control; +}; + +/* tx_desc */ +#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0) +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16 +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) +#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23 +#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23) +#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24 +#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24) +#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26 +#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26) +#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27 +#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27) +#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28 +#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28) +#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) +#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4 +#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4) +#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7 +#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7) +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8 +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13 +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13) +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14 +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14) +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) +#define ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT 18 +#define ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK GENMASK(20, 18) +#define ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT 21 +#define ENA_ETH_IO_TX_DESC_TS_REQ_MASK BIT(21) +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22 +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) +#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24 +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) + +/* tx_meta_desc */ +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) +#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT 10 +#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK GENMASK(11, 10) +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) +#define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT 15 +#define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK BIT(15) +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK GENMASK(19, 16) +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20 +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20) +#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21 +#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21) +#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23 +#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23) +#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24 +#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24) +#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26 +#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26) +#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27 +#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27) +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28 +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28) +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0) +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0) +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8 +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8) +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16) +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22 +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22) +#define ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK GENMASK(23, 0) +#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT 24 +#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK GENMASK(28, 24) +#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT 29 +#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK GENMASK(31, 29) + +/* tx_cdesc */ +#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0) + +/* rx_desc */ +#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0) +#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2 +#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2) +#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3 +#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3) +#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4 +#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4) + +/* rx_cdesc_base */ +#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0) +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5 +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5) +#define ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT 7 +#define ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK BIT(7) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13 +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) +#define ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT 20 +#define ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK BIT(20) +#define ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT 21 +#define ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK GENMASK(22, 21) +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25) +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26 +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26) +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27 +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27) +#define ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT 28 +#define ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK BIT(28) +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30 +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30) + +/* intr_reg */ +#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0) +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15 +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15) +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30 +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30) + +#if !defined(ENA_DEFS_LINUX_MAINLINE) +static inline uint32_t get_ena_eth_io_tx_desc_length( + const struct ena_eth_io_tx_desc *p) +{ + return p->len_ctrl & ENA_ETH_IO_TX_DESC_LENGTH_MASK; +} + +static inline void set_ena_eth_io_tx_desc_length( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->len_ctrl |= val & ENA_ETH_IO_TX_DESC_LENGTH_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi( + const struct ena_eth_io_tx_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK) + >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_req_id_hi( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) + & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_meta_desc( + const struct ena_eth_io_tx_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK) + >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_meta_desc( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT) + & ENA_ETH_IO_TX_DESC_META_DESC_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_phase( + const struct ena_eth_io_tx_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK) + >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_phase( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) + & ENA_ETH_IO_TX_DESC_PHASE_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_first( + const struct ena_eth_io_tx_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK) + >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_first( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT) + & ENA_ETH_IO_TX_DESC_FIRST_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_last( + const struct ena_eth_io_tx_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK) + >> ENA_ETH_IO_TX_DESC_LAST_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_last( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT) + & ENA_ETH_IO_TX_DESC_LAST_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_comp_req( + const struct ena_eth_io_tx_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK) + >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_comp_req( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT) + & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx( + const struct ena_eth_io_tx_desc *p) +{ + return p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK; +} + +static inline void set_ena_eth_io_tx_desc_l3_proto_idx( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->meta_ctrl |= val & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_DF( + const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK) + >> ENA_ETH_IO_TX_DESC_DF_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_DF( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->meta_ctrl |= + (val << ENA_ETH_IO_TX_DESC_DF_SHIFT) + & ENA_ETH_IO_TX_DESC_DF_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_tso_en( + const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK) + >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_tso_en( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->meta_ctrl |= + (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) + & ENA_ETH_IO_TX_DESC_TSO_EN_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx( + const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK) + >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_l4_proto_idx( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->meta_ctrl |= + (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) + & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en( + const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK) + >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_l3_csum_en( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->meta_ctrl |= + (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) + & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en( + const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK) + >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_l4_csum_en( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->meta_ctrl |= + (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) + & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis( + const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK) + >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->meta_ctrl |= + (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT) + & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial( + const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK) + >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_l4_csum_partial( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->meta_ctrl |= + (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) + & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_tunnel_ctrl( + const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK) + >> ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_tunnel_ctrl( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->meta_ctrl |= + (val << ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT) + & ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_ts_req( + const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TS_REQ_MASK) + >> ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_ts_req( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->meta_ctrl |= + (val << ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT) + & ENA_ETH_IO_TX_DESC_TS_REQ_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo( + const struct ena_eth_io_tx_desc *p) +{ + return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK) + >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_req_id_lo( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->meta_ctrl |= + (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) + & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_addr_hi( + const struct ena_eth_io_tx_desc *p) +{ + return p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK; +} + +static inline void set_ena_eth_io_tx_desc_addr_hi( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->buff_addr_hi_hdr_sz |= val & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_desc_header_length( + const struct ena_eth_io_tx_desc *p) +{ + return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK) + >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT; +} + +static inline void set_ena_eth_io_tx_desc_header_length( + struct ena_eth_io_tx_desc *p, + uint32_t val) +{ + p->buff_addr_hi_hdr_sz |= + (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) + & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo( + const struct ena_eth_io_tx_meta_desc *p) +{ + return p->len_ctrl & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK; +} + +static inline void set_ena_eth_io_tx_meta_desc_req_id_lo( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->len_ctrl |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_off_hi( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK) + >> ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_outr_l3_off_hi( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT) + & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK) + >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_ext_valid( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT) + & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_word3_valid( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK) + >> ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_word3_valid( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT) + & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi_ptp( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK) + >> ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_mss_hi_ptp( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT) + & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK) + >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT) + & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK) + >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_meta_store( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT) + & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK) + >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_meta_desc( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT) + & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_phase( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK) + >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_phase( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) + & ENA_ETH_IO_TX_META_DESC_PHASE_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_first( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK) + >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_first( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT) + & ENA_ETH_IO_TX_META_DESC_FIRST_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_last( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK) + >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_last( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT) + & ENA_ETH_IO_TX_META_DESC_LAST_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK) + >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_comp_req( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->len_ctrl |= + (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT) + & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi( + const struct ena_eth_io_tx_meta_desc *p) +{ + return p->word1 & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK; +} + +static inline void set_ena_eth_io_tx_meta_desc_req_id_hi( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->word1 |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len( + const struct ena_eth_io_tx_meta_desc *p) +{ + return p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; +} + +static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->word2 |= val & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK) + >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->word2 |= + (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) + & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK) + >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->word2 |= + (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) + & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK) + >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_mss_lo( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->word2 |= + (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) + & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_crypto_info( + const struct ena_eth_io_tx_meta_desc *p) +{ + return p->word3 & ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK; +} + +static inline void set_ena_eth_io_tx_meta_desc_crypto_info( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->word3 |= val & ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_hdr_len_words( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->word3 & ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK) + >> ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_outr_l3_hdr_len_words( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->word3 |= + (val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT) + & ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK; +} + +static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_off_lo( + const struct ena_eth_io_tx_meta_desc *p) +{ + return (p->word3 & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK) + >> ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT; +} + +static inline void set_ena_eth_io_tx_meta_desc_outr_l3_off_lo( + struct ena_eth_io_tx_meta_desc *p, + uint32_t val) +{ + p->word3 |= + (val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT) + & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK; +} + +static inline uint8_t get_ena_eth_io_tx_cdesc_phase( + const struct ena_eth_io_tx_cdesc *p) +{ + return p->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK; +} + +static inline void set_ena_eth_io_tx_cdesc_phase( + struct ena_eth_io_tx_cdesc *p, + uint8_t val) +{ + p->flags |= val & ENA_ETH_IO_TX_CDESC_PHASE_MASK; +} + +static inline uint8_t get_ena_eth_io_rx_desc_phase( + const struct ena_eth_io_rx_desc *p) +{ + return p->ctrl & ENA_ETH_IO_RX_DESC_PHASE_MASK; +} + +static inline void set_ena_eth_io_rx_desc_phase( + struct ena_eth_io_rx_desc *p, + uint8_t val) +{ + p->ctrl |= val & ENA_ETH_IO_RX_DESC_PHASE_MASK; +} + +static inline uint8_t get_ena_eth_io_rx_desc_first( + const struct ena_eth_io_rx_desc *p) +{ + return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK) + >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT; +} + +static inline void set_ena_eth_io_rx_desc_first( + struct ena_eth_io_rx_desc *p, + uint8_t val) +{ + p->ctrl |= + (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT) + & ENA_ETH_IO_RX_DESC_FIRST_MASK; +} + +static inline uint8_t get_ena_eth_io_rx_desc_last( + const struct ena_eth_io_rx_desc *p) +{ + return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK) + >> ENA_ETH_IO_RX_DESC_LAST_SHIFT; +} + +static inline void set_ena_eth_io_rx_desc_last( + struct ena_eth_io_rx_desc *p, + uint8_t val) +{ + p->ctrl |= + (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT) + & ENA_ETH_IO_RX_DESC_LAST_MASK; +} + +static inline uint8_t get_ena_eth_io_rx_desc_comp_req( + const struct ena_eth_io_rx_desc *p) +{ + return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK) + >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT; +} + +static inline void set_ena_eth_io_rx_desc_comp_req( + struct ena_eth_io_rx_desc *p, + uint8_t val) +{ + p->ctrl |= + (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT) + & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK; +} + +static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= val & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK) + >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= + (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT) + & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_tunnel( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK) + >> ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_tunnel( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= + (val << ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT) + & ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) + >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= + (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT) + & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) + >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= + (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT) + & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) + >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= + (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT) + & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) + >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= + (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT) + & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_secured_pkt( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK) + >> ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_secured_pkt( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= + (val << ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT) + & ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_crypto_status( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK) + >> ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_crypto_status( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= + (val << ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT) + & ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) + >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_phase( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= + (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT) + & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK) + >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= + (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT) + & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_first( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) + >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_first( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= + (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT) + & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_last( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) + >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_last( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= + (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT) + & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_inr_l4_csum( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK) + >> ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_inr_l4_csum( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= + (val << ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT) + & ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK; +} + +static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer( + const struct ena_eth_io_rx_cdesc_base *p) +{ + return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK) + >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT; +} + +static inline void set_ena_eth_io_rx_cdesc_base_buffer( + struct ena_eth_io_rx_cdesc_base *p, + uint32_t val) +{ + p->status |= + (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT) + & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK; +} + +static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay( + const struct ena_eth_io_intr_reg *p) +{ + return p->intr_control & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; +} + +static inline void set_ena_eth_io_intr_reg_rx_intr_delay( + struct ena_eth_io_intr_reg *p, + uint32_t val) +{ + p->intr_control |= val & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; +} + +static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay( + const struct ena_eth_io_intr_reg *p) +{ + return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK) + >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT; +} + +static inline void set_ena_eth_io_intr_reg_tx_intr_delay( + struct ena_eth_io_intr_reg *p, + uint32_t val) +{ + p->intr_control |= + (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) + & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK; +} + +static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask( + const struct ena_eth_io_intr_reg *p) +{ + return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK) + >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT; +} + +static inline void set_ena_eth_io_intr_reg_intr_unmask( + struct ena_eth_io_intr_reg *p, + uint32_t val) +{ + p->intr_control |= + (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT) + & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; +} + +#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */ +#endif /*_ENA_ETH_IO_H_ */ diff --git a/drivers/net/ena/base/ena_defs/ena_gen_info.h b/drivers/net/ena/base/ena_defs/ena_gen_info.h new file mode 100644 index 00000000..4abdffed --- /dev/null +++ b/drivers/net/ena/base/ena_defs/ena_gen_info.h @@ -0,0 +1,35 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#define ENA_GEN_DATE "Mon Feb 15 14:33:08 IST 2016" +#define ENA_GEN_COMMIT "c71ec25" diff --git a/drivers/net/ena/base/ena_defs/ena_includes.h b/drivers/net/ena/base/ena_defs/ena_includes.h new file mode 100644 index 00000000..a86c876f --- /dev/null +++ b/drivers/net/ena/base/ena_defs/ena_includes.h @@ -0,0 +1,39 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include "ena_common_defs.h" +#include "ena_regs_defs.h" +#include "ena_admin_defs.h" +#include "ena_eth_io_defs.h" +#include "ena_efa_admin_defs.h" +#include "ena_efa_io_defs.h" diff --git a/drivers/net/ena/base/ena_defs/ena_regs_defs.h b/drivers/net/ena/base/ena_defs/ena_regs_defs.h new file mode 100644 index 00000000..d0241278 --- /dev/null +++ b/drivers/net/ena/base/ena_defs/ena_regs_defs.h @@ -0,0 +1,135 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef _ENA_REGS_H_ +#define _ENA_REGS_H_ + +/* ena_registers offsets */ +#define ENA_REGS_VERSION_OFF 0x0 +#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 +#define ENA_REGS_CAPS_OFF 0x8 +#define ENA_REGS_CAPS_EXT_OFF 0xc +#define ENA_REGS_AQ_BASE_LO_OFF 0x10 +#define ENA_REGS_AQ_BASE_HI_OFF 0x14 +#define ENA_REGS_AQ_CAPS_OFF 0x18 +#define ENA_REGS_ACQ_BASE_LO_OFF 0x20 +#define ENA_REGS_ACQ_BASE_HI_OFF 0x24 +#define ENA_REGS_ACQ_CAPS_OFF 0x28 +#define ENA_REGS_AQ_DB_OFF 0x2c +#define ENA_REGS_ACQ_TAIL_OFF 0x30 +#define ENA_REGS_AENQ_CAPS_OFF 0x34 +#define ENA_REGS_AENQ_BASE_LO_OFF 0x38 +#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c +#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40 +#define ENA_REGS_AENQ_TAIL_OFF 0x44 +#define ENA_REGS_INTR_MASK_OFF 0x4c +#define ENA_REGS_DEV_CTL_OFF 0x54 +#define ENA_REGS_DEV_STS_OFF 0x58 +#define ENA_REGS_MMIO_REG_READ_OFF 0x5c +#define ENA_REGS_MMIO_RESP_LO_OFF 0x60 +#define ENA_REGS_MMIO_RESP_HI_OFF 0x64 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68 + +/* version register */ +#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff +#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 +#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00 + +/* controller_version register */ +#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8 +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00 +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16 +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000 +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24 +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000 + +/* caps register */ +#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 +#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1 +#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 + +/* aq_caps register */ +#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000 + +/* acq_caps register */ +#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000 + +/* aenq_caps register */ +#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000 + +/* dev_ctl register */ +#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 +#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1 +#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 +#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2 +#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 +#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 +#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 + +/* dev_sts register */ +#define ENA_REGS_DEV_STS_READY_MASK 0x1 +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 +#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4 +#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10 +#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5 +#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 + +/* mmio_reg_read register */ +#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff +#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16 +#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 + +/* rss_ind_entry_update register */ +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 + +#endif /*_ENA_REGS_H_ */ diff --git a/drivers/net/ena/base/ena_eth_com.c b/drivers/net/ena/base/ena_eth_com.c new file mode 100644 index 00000000..459e0bbb --- /dev/null +++ b/drivers/net/ena/base/ena_eth_com.c @@ -0,0 +1,508 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include "ena_eth_com.h" + +static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( + struct ena_com_io_cq *io_cq) +{ + struct ena_eth_io_rx_cdesc_base *cdesc; + u16 expected_phase, head_masked; + u16 desc_phase; + + head_masked = io_cq->head & (io_cq->q_depth - 1); + expected_phase = io_cq->phase; + + cdesc = (struct ena_eth_io_rx_cdesc_base *) + ((unsigned char *)io_cq->cdesc_addr.virt_addr + + (head_masked * io_cq->cdesc_entry_size_in_bytes)); + + desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; + + if (desc_phase != expected_phase) + return NULL; + + return cdesc; +} + +static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) +{ + io_cq->head++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) + io_cq->phase = 1 - io_cq->phase; +} + +static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) +{ + u16 tail_masked; + u32 offset; + + tail_masked = io_sq->tail & (io_sq->q_depth - 1); + + offset = tail_masked * io_sq->desc_entry_size; + + return (unsigned char *)io_sq->desc_addr.virt_addr + offset; +} + +static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq) +{ + u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); + u32 offset = tail_masked * io_sq->desc_entry_size; + + /* In case this queue isn't a LLQ */ + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return; + + memcpy_toio((unsigned char *)io_sq->desc_addr.pbuf_dev_addr + offset, + (unsigned char *)io_sq->desc_addr.virt_addr + offset, + io_sq->desc_entry_size); +} + +static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) +{ + io_sq->tail++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) + io_sq->phase = 1 - io_sq->phase; +} + +static inline int ena_com_write_header(struct ena_com_io_sq *io_sq, + u8 *head_src, u16 header_len) +{ + u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); + u8 __iomem *dev_head_addr = + io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size); + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return 0; + + ENA_ASSERT(io_sq->header_addr, "header address is NULL\n"); + + memcpy_toio(dev_head_addr, head_src, header_len); + + return 0; +} + +static inline struct ena_eth_io_rx_cdesc_base * + ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx) +{ + idx &= (io_cq->q_depth - 1); + return (struct ena_eth_io_rx_cdesc_base *) + ((unsigned char *)io_cq->cdesc_addr.virt_addr + + idx * io_cq->cdesc_entry_size_in_bytes); +} + +static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, + u16 *first_cdesc_idx, + u16 *nb_hw_desc) +{ + struct ena_eth_io_rx_cdesc_base *cdesc; + u16 count = 0, head_masked; + u32 last = 0; + + do { + cdesc = ena_com_get_next_rx_cdesc(io_cq); + if (!cdesc) + break; + + ena_com_cq_inc_head(io_cq); + count++; + last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; + } while (!last); + + if (last) { + *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx; + count += io_cq->cur_rx_pkt_cdesc_count; + + head_masked = io_cq->head & (io_cq->q_depth - 1); + + io_cq->cur_rx_pkt_cdesc_count = 0; + io_cq->cur_rx_pkt_cdesc_start_idx = head_masked; + + ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n", + io_cq->qid, *first_cdesc_idx, count); + } else { + io_cq->cur_rx_pkt_cdesc_count += count; + count = 0; + } + + *nb_hw_desc = count; + return 0; +} + +static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx) +{ + int rc; + + if (ena_tx_ctx->meta_valid) { + rc = memcmp(&io_sq->cached_tx_meta, + &ena_tx_ctx->ena_meta, + sizeof(struct ena_com_tx_meta)); + + if (unlikely(rc != 0)) + return true; + } + + return false; +} + +static inline void ena_com_create_and_store_tx_meta_desc( + struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx) +{ + struct ena_eth_io_tx_meta_desc *meta_desc = NULL; + struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; + + meta_desc = get_sq_desc(io_sq); + memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc)); + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK; + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK; + + /* bits 0-9 of the mss */ + meta_desc->word2 |= (ena_meta->mss << + ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & + ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK; + /* bits 10-13 of the mss */ + meta_desc->len_ctrl |= ((ena_meta->mss >> 10) << + ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT) & + ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK; + + /* Extended meta desc */ + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; + meta_desc->len_ctrl |= (io_sq->phase << + ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & + ENA_ETH_IO_TX_META_DESC_PHASE_MASK; + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK; + meta_desc->word2 |= ena_meta->l3_hdr_len & + ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; + meta_desc->word2 |= (ena_meta->l3_hdr_offset << + ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & + ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK; + + meta_desc->word2 |= (ena_meta->l4_hdr_len << + ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & + ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; + + /* Cached the meta desc */ + memcpy(&io_sq->cached_tx_meta, ena_meta, + sizeof(struct ena_com_tx_meta)); + + ena_com_copy_curr_sq_desc_to_dev(io_sq); + ena_com_sq_update_tail(io_sq); +} + +static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, + struct ena_eth_io_rx_cdesc_base *cdesc) +{ + ena_rx_ctx->l3_proto = (enum ena_eth_io_l3_proto_index)(cdesc->status & + ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK); + ena_rx_ctx->l4_proto = (enum ena_eth_io_l4_proto_index) + ((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT); + ena_rx_ctx->l3_csum_err = + (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; + ena_rx_ctx->l4_csum_err = + (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; + ena_rx_ctx->hash = cdesc->hash; + ena_rx_ctx->frag = + (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; + + ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n", + ena_rx_ctx->l3_proto, + ena_rx_ctx->l4_proto, + ena_rx_ctx->l3_csum_err, + ena_rx_ctx->l4_csum_err, + ena_rx_ctx->hash, + ena_rx_ctx->frag, + cdesc->status); +} + +/*****************************************************************************/ +/***************************** API **********************************/ +/*****************************************************************************/ + +int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx, + int *nb_hw_desc) +{ + struct ena_eth_io_tx_desc *desc = NULL; + struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs; + void *push_header = ena_tx_ctx->push_header; + u16 header_len = ena_tx_ctx->header_len; + u16 num_bufs = ena_tx_ctx->num_bufs; + int total_desc, i, rc; + bool have_meta; + u64 addr_hi; + + ENA_ASSERT(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX, + "wrong Q type"); + + /* num_bufs +1 for potential meta desc */ + if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) { + ena_trc_err("Not enough space in the tx queue\n"); + return ENA_COM_NO_MEM; + } + + if (unlikely(header_len > io_sq->tx_max_header_size)) { + ena_trc_err("header size is too large %d max header: %d\n", + header_len, io_sq->tx_max_header_size); + return ENA_COM_INVAL; + } + + /* start with pushing the header (if needed) */ + rc = ena_com_write_header(io_sq, push_header, header_len); + if (unlikely(rc)) + return rc; + + have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq, + ena_tx_ctx); + if (have_meta) + ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); + + /* If the caller doesn't want send packets */ + if (unlikely(!num_bufs && !header_len)) { + *nb_hw_desc = have_meta ? 0 : 1; + return 0; + } + + desc = get_sq_desc(io_sq); + memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); + + /* Set first desc when we don't have meta descriptor */ + if (!have_meta) + desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK; + + desc->buff_addr_hi_hdr_sz |= (header_len << + ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & + ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK; + desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & + ENA_ETH_IO_TX_DESC_PHASE_MASK; + + desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK; + + /* Bits 0-9 */ + desc->meta_ctrl |= (ena_tx_ctx->req_id << + ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & + ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK; + + desc->meta_ctrl |= (ena_tx_ctx->df << + ENA_ETH_IO_TX_DESC_DF_SHIFT) & + ENA_ETH_IO_TX_DESC_DF_MASK; + + /* Bits 10-15 */ + desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) << + ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & + ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK; + + if (ena_tx_ctx->meta_valid) { + desc->meta_ctrl |= (ena_tx_ctx->tso_enable << + ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & + ENA_ETH_IO_TX_DESC_TSO_EN_MASK; + desc->meta_ctrl |= ena_tx_ctx->l3_proto & + ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l4_proto << + ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & + ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable << + ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & + ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable << + ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & + ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial << + ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & + ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK; + } + + for (i = 0; i < num_bufs; i++) { + /* The first desc share the same desc as the header */ + if (likely(i != 0)) { + ena_com_copy_curr_sq_desc_to_dev(io_sq); + ena_com_sq_update_tail(io_sq); + + desc = get_sq_desc(io_sq); + memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); + + desc->len_ctrl |= (io_sq->phase << + ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & + ENA_ETH_IO_TX_DESC_PHASE_MASK; + } + + desc->len_ctrl |= ena_bufs->len & + ENA_ETH_IO_TX_DESC_LENGTH_MASK; + + addr_hi = ((ena_bufs->paddr & + GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); + + desc->buff_addr_lo = (u32)ena_bufs->paddr; + desc->buff_addr_hi_hdr_sz |= addr_hi & + ENA_ETH_IO_TX_DESC_ADDR_HI_MASK; + ena_bufs++; + } + + /* set the last desc indicator */ + desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK; + + ena_com_copy_curr_sq_desc_to_dev(io_sq); + + ena_com_sq_update_tail(io_sq); + + total_desc = ENA_MAX16(num_bufs, 1); + total_desc += have_meta ? 1 : 0; + + *nb_hw_desc = total_desc; + return 0; +} + +int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, + struct ena_com_io_sq *io_sq, + struct ena_com_rx_ctx *ena_rx_ctx) +{ + struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0]; + struct ena_eth_io_rx_cdesc_base *cdesc = NULL; + u16 cdesc_idx = 0; + u16 nb_hw_desc; + u16 i; + int rc; + + ENA_ASSERT(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX, + "wrong Q type"); + + rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc); + if (rc || (nb_hw_desc == 0)) { + ena_rx_ctx->descs = nb_hw_desc; + return rc; + } + + ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n", + io_cq->qid, nb_hw_desc); + + if (unlikely(nb_hw_desc >= ena_rx_ctx->max_bufs)) { + ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n", + nb_hw_desc, ena_rx_ctx->max_bufs); + return ENA_COM_NO_SPACE; + } + + for (i = 0; i < nb_hw_desc; i++) { + cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i); + + ena_buf->len = cdesc->length; + ena_buf->req_id = cdesc->req_id; + ena_buf++; + } + + /* Update SQ head ptr */ + io_sq->next_to_comp += nb_hw_desc; + + ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__, + io_sq->qid, io_sq->next_to_comp); + + /* Get rx flags from the last pkt */ + ena_com_rx_set_flags(ena_rx_ctx, cdesc); + + ena_rx_ctx->descs = nb_hw_desc; + return 0; +} + +int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, + struct ena_com_buf *ena_buf, + u16 req_id) +{ + struct ena_eth_io_rx_desc *desc; + + ENA_ASSERT(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX, + "wrong Q type"); + + if (unlikely(ena_com_sq_empty_space(io_sq) == 0)) + return -1; + + desc = get_sq_desc(io_sq); + memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc)); + + desc->length = ena_buf->len; + + desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK; + desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK; + desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK; + desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; + + desc->req_id = req_id; + + desc->buff_addr_lo = (u32)ena_buf->paddr; + desc->buff_addr_hi = + ((ena_buf->paddr & + GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); + + ena_com_sq_update_tail(io_sq); + + return 0; +} + +int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) +{ + u8 expected_phase, cdesc_phase; + struct ena_eth_io_tx_cdesc *cdesc; + u16 masked_head; + + masked_head = io_cq->head & (io_cq->q_depth - 1); + expected_phase = io_cq->phase; + + cdesc = (struct ena_eth_io_tx_cdesc *) + ((unsigned char *)io_cq->cdesc_addr.virt_addr + + (masked_head * io_cq->cdesc_entry_size_in_bytes)); + + cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK; + if (cdesc_phase != expected_phase) + return -1; + + ena_com_cq_inc_head(io_cq); + + *req_id = cdesc->req_id; + + return 0; +} diff --git a/drivers/net/ena/base/ena_eth_com.h b/drivers/net/ena/base/ena_eth_com.h new file mode 100644 index 00000000..325d69c0 --- /dev/null +++ b/drivers/net/ena/base/ena_eth_com.h @@ -0,0 +1,153 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef ENA_ETH_COM_H_ +#define ENA_ETH_COM_H_ + +#if defined(__cplusplus) +extern "C" { +#endif +#include "ena_com.h" + +/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */ +#define ENA_COMP_HEAD_THRESH 4 + +struct ena_com_tx_ctx { + struct ena_com_tx_meta ena_meta; + struct ena_com_buf *ena_bufs; + /* For LLQ, header buffer - pushed to the device mem space */ + void *push_header; + + enum ena_eth_io_l3_proto_index l3_proto; + enum ena_eth_io_l4_proto_index l4_proto; + u16 num_bufs; + u16 req_id; + /* For regular queue, indicate the size of the header + * For LLQ, indicate the size of the pushed buffer + */ + u16 header_len; + + u8 meta_valid; + u8 tso_enable; + u8 l3_csum_enable; + u8 l4_csum_enable; + u8 l4_csum_partial; + u8 df; /* Don't fragment */ +}; + +struct ena_com_rx_ctx { + struct ena_com_rx_buf_info *ena_bufs; + enum ena_eth_io_l3_proto_index l3_proto; + enum ena_eth_io_l4_proto_index l4_proto; + bool l3_csum_err; + bool l4_csum_err; + /* fragmented packet */ + bool frag; + u32 hash; + u16 descs; + int max_bufs; +}; + +int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx, + int *nb_hw_desc); + +int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, + struct ena_com_io_sq *io_sq, + struct ena_com_rx_ctx *ena_rx_ctx); + +int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, + struct ena_com_buf *ena_buf, + u16 req_id); + +int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id); + +static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, + struct ena_eth_io_intr_reg *intr_reg) +{ + ENA_REG_WRITE32(intr_reg->intr_control, io_cq->unmask_reg); +} + +static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) +{ + u16 tail, next_to_comp, cnt; + + next_to_comp = io_sq->next_to_comp; + tail = io_sq->tail; + cnt = tail - next_to_comp; + + return io_sq->q_depth - 1 - cnt; +} + +static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) +{ + u16 tail; + + tail = io_sq->tail; + + ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n", + io_sq->qid, tail); + + ENA_REG_WRITE32(tail, io_sq->db_addr); + + return 0; +} + +static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) +{ + u16 unreported_comp, head; + bool need_update; + + head = io_cq->head; + unreported_comp = head - io_cq->last_head_update; + need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); + + if (io_cq->cq_head_db_reg && need_update) { + ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n", + io_cq->qid, head); + ENA_REG_WRITE32(head, io_cq->cq_head_db_reg); + io_cq->last_head_update = head; + } + + return 0; +} + +static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) +{ + io_sq->next_to_comp += elem; +} + +#if defined(__cplusplus) +} +#endif +#endif /* ENA_ETH_COM_H_ */ diff --git a/drivers/net/ena/base/ena_plat.h b/drivers/net/ena/base/ena_plat.h new file mode 100644 index 00000000..b5b64545 --- /dev/null +++ b/drivers/net/ena/base/ena_plat.h @@ -0,0 +1,53 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef ENA_PLAT_H_ +#define ENA_PLAT_H_ + +#if defined(ENA_IPXE) +#include "ena_plat_ipxe.h" +#elif defined(__linux__) +#if defined(__KERNEL__) +#include "ena_plat_linux.h" +#else +#include "ena_plat_dpdk.h" +#endif +#elif defined(__FreeBSD__) +#include "ena_plat_dpdk.h" +#elif defined(_WIN32) +#include "ena_plat_windows.h" +#else +#error "Invalid platform" +#endif + +#endif /* ENA_PLAT_H_ */ diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h new file mode 100644 index 00000000..aab2ac86 --- /dev/null +++ b/drivers/net/ena/base/ena_plat_dpdk.h @@ -0,0 +1,220 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef DPDK_ENA_COM_ENA_PLAT_DPDK_H_ +#define DPDK_ENA_COM_ENA_PLAT_DPDK_H_ + +#include <stdbool.h> +#include <stdlib.h> +#include <pthread.h> +#include <stdint.h> +#include <string.h> +#include <errno.h> + +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_cycles.h> +#include <rte_log.h> +#include <rte_malloc.h> +#include <rte_memzone.h> +#include <rte_spinlock.h> + +#include <sys/time.h> + +typedef uint64_t u64; +typedef uint32_t u32; +typedef uint16_t u16; +typedef uint8_t u8; + +typedef uint64_t dma_addr_t; +#ifndef ETIME +#define ETIME ETIMEDOUT +#endif + +#define ena_atomic32_t rte_atomic32_t +#define ena_mem_handle_t void * + +#define SZ_256 (256) +#define SZ_4K (4096) + +#define ENA_COM_OK 0 +#define ENA_COM_NO_MEM -ENOMEM +#define ENA_COM_INVAL -EINVAL +#define ENA_COM_NO_SPACE -ENOSPC +#define ENA_COM_NO_DEVICE -ENODEV +#define ENA_COM_PERMISSION -EPERM +#define ENA_COM_TIMER_EXPIRED -ETIME +#define ENA_COM_FAULT -EFAULT + +#define ____cacheline_aligned __rte_cache_aligned + +#define ENA_ABORT() abort() + +#define ENA_MSLEEP(x) rte_delay_ms(x) +#define ENA_UDELAY(x) rte_delay_us(x) + +#define memcpy_toio memcpy +#define wmb rte_wmb +#define rmb rte_wmb +#define mb rte_mb +#define __iomem + +#define US_PER_S 1000000 +#define ENA_GET_SYSTEM_USECS() \ + (rte_get_timer_cycles() * US_PER_S / rte_get_timer_hz()) + +#define ENA_ASSERT(cond, format, arg...) \ + do { \ + if (unlikely(!(cond))) { \ + printf("Assertion failed on %s:%s:%d: " format, \ + __FILE__, __func__, __LINE__, ##arg); \ + rte_exit(EXIT_FAILURE, "ASSERTION FAILED\n"); \ + } \ + } while (0) + +#define ENA_MAX32(x, y) RTE_MAX((x), (y)) +#define ENA_MAX16(x, y) RTE_MAX((x), (y)) +#define ENA_MAX8(x, y) RTE_MAX((x), (y)) +#define ENA_MIN32(x, y) RTE_MIN((x), (y)) +#define ENA_MIN16(x, y) RTE_MIN((x), (y)) +#define ENA_MIN8(x, y) RTE_MIN((x), (y)) + +#define U64_C(x) x ## ULL +#define BIT(nr) (1UL << (nr)) +#define BITS_PER_LONG (__SIZEOF_LONG__ * 8) +#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) + +#ifdef RTE_LIBRTE_ENA_COM_DEBUG +#define ena_trc_dbg(format, arg...) \ + RTE_LOG(DEBUG, PMD, "[ENA_COM: %s] " format, __func__, ##arg) +#define ena_trc_info(format, arg...) \ + RTE_LOG(INFO, PMD, "[ENA_COM: %s] " format, __func__, ##arg) +#define ena_trc_warn(format, arg...) \ + RTE_LOG(ERR, PMD, "[ENA_COM: %s] " format, __func__, ##arg) +#define ena_trc_err(format, arg...) \ + RTE_LOG(ERR, PMD, "[ENA_COM: %s] " format, __func__, ##arg) +#else +#define ena_trc_dbg(format, arg...) do { } while (0) +#define ena_trc_info(format, arg...) do { } while (0) +#define ena_trc_warn(format, arg...) do { } while (0) +#define ena_trc_err(format, arg...) do { } while (0) +#endif /* RTE_LIBRTE_ENA_COM_DEBUG */ + +/* Spinlock related methods */ +#define ena_spinlock_t rte_spinlock_t +#define ENA_SPINLOCK_INIT(spinlock) rte_spinlock_init(&spinlock) +#define ENA_SPINLOCK_LOCK(spinlock, flags) \ + ({(void)flags; rte_spinlock_lock(&spinlock); }) +#define ENA_SPINLOCK_UNLOCK(spinlock, flags) \ + ({(void)flags; rte_spinlock_unlock(&(spinlock)); }) + +#define q_waitqueue_t \ + struct { \ + pthread_cond_t cond; \ + pthread_mutex_t mutex; \ + } + +#define ena_wait_queue_t q_waitqueue_t + +#define ENA_WAIT_EVENT_INIT(waitqueue) \ + do { \ + pthread_mutex_init(&(waitqueue).mutex, NULL); \ + pthread_cond_init(&(waitqueue).cond, NULL); \ + } while (0) + +#define ENA_WAIT_EVENT_WAIT(waitevent, timeout) \ + do { \ + struct timespec wait; \ + struct timeval now; \ + unsigned long timeout_us; \ + gettimeofday(&now, NULL); \ + wait.tv_sec = now.tv_sec + timeout / 1000000UL; \ + timeout_us = timeout % 1000000UL; \ + wait.tv_nsec = (now.tv_usec + timeout_us) * 1000UL; \ + pthread_mutex_lock(&waitevent.mutex); \ + pthread_cond_timedwait(&waitevent.cond, \ + &waitevent.mutex, &wait); \ + pthread_mutex_unlock(&waitevent.mutex); \ + } while (0) +#define ENA_WAIT_EVENT_SIGNAL(waitevent) pthread_cond_signal(&waitevent.cond) +/* pthread condition doesn't need to be rearmed after usage */ +#define ENA_WAIT_EVENT_CLEAR(...) + +#define ena_wait_event_t ena_wait_queue_t +#define ENA_MIGHT_SLEEP() + +#define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, handle) \ + do { \ + const struct rte_memzone *mz; \ + char z_name[RTE_MEMZONE_NAMESIZE]; \ + (void)dmadev; (void)handle; \ + snprintf(z_name, sizeof(z_name), \ + "ena_alloc_%d", ena_alloc_cnt++); \ + mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, 0); \ + virt = mz->addr; \ + phys = mz->phys_addr; \ + } while (0) +#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \ + ({(void)size; rte_free(virt); }) +#define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1) +#define ENA_MEM_FREE(dmadev, ptr) ({(void)dmadev; rte_free(ptr); }) + +static inline void writel(u32 value, volatile void *addr) +{ + *(volatile u32 *)addr = value; +} + +static inline u32 readl(const volatile void *addr) +{ + return *(const volatile u32 *)addr; +} + +#define ENA_REG_WRITE32(value, reg) writel((value), (reg)) +#define ENA_REG_READ32(reg) readl((reg)) + +#define ATOMIC32_INC(i32_ptr) rte_atomic32_inc(i32_ptr) +#define ATOMIC32_DEC(i32_ptr) rte_atomic32_dec(i32_ptr) +#define ATOMIC32_SET(i32_ptr, val) rte_atomic32_set(i32_ptr, val) +#define ATOMIC32_READ(i32_ptr) rte_atomic32_read(i32_ptr) + +#define msleep(x) rte_delay_us(x * 1000) +#define udelay(x) rte_delay_us(x) + +#define MAX_ERRNO 4095 +#define IS_ERR(x) (((unsigned long)x) >= (unsigned long)-MAX_ERRNO) +#define ERR_PTR(error) ((void *)(long)error) +#define PTR_ERR(error) ((long)(void *)error) +#define might_sleep() + +#endif /* DPDK_ENA_COM_ENA_PLAT_DPDK_H_ */ diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c new file mode 100644 index 00000000..02af67a2 --- /dev/null +++ b/drivers/net/ena/ena_ethdev.c @@ -0,0 +1,1455 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_tcp.h> +#include <rte_atomic.h> +#include <rte_dev.h> +#include <rte_errno.h> + +#include "ena_ethdev.h" +#include "ena_logs.h" +#include "ena_platform.h" +#include "ena_com.h" +#include "ena_eth_com.h" + +#include <ena_common_defs.h> +#include <ena_regs_defs.h> +#include <ena_admin_defs.h> +#include <ena_eth_io_defs.h> + +#define ENA_IO_TXQ_IDX(q) (2 * (q)) +#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) +/*reverse version of ENA_IO_RXQ_IDX*/ +#define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) + +/* While processing submitted and completed descriptors (rx and tx path + * respectively) in a loop it is desired to: + * - perform batch submissions while populating sumbissmion queue + * - avoid blocking transmission of other packets during cleanup phase + * Hence the utilization ratio of 1/8 of a queue size. + */ +#define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8) + +#define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) +#define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) + +#define GET_L4_HDR_LEN(mbuf) \ + ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \ + mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) + +#define ENA_RX_RSS_TABLE_LOG_SIZE 7 +#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) +#define ENA_HASH_KEY_SIZE 40 + +/** Vendor ID used by Amazon devices */ +#define PCI_VENDOR_ID_AMAZON 0x1D0F +/** Amazon devices */ +#define PCI_DEVICE_ID_ENA_VF 0xEC20 +#define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 + +static struct rte_pci_id pci_id_ena_map[] = { +#define RTE_PCI_DEV_ID_DECL_ENA(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, + + RTE_PCI_DEV_ID_DECL_ENA(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) + RTE_PCI_DEV_ID_DECL_ENA(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) + {.device_id = 0}, +}; + +static int ena_device_init(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx); +static int ena_dev_configure(struct rte_eth_dev *dev); +static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +static uint16_t eth_ena_recv_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); +static void ena_init_rings(struct ena_adapter *adapter); +static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static int ena_start(struct rte_eth_dev *dev); +static void ena_close(struct rte_eth_dev *dev); +static void ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +static void ena_rx_queue_release_all(struct rte_eth_dev *dev); +static void ena_tx_queue_release_all(struct rte_eth_dev *dev); +static void ena_rx_queue_release(void *queue); +static void ena_tx_queue_release(void *queue); +static void ena_rx_queue_release_bufs(struct ena_ring *ring); +static void ena_tx_queue_release_bufs(struct ena_ring *ring); +static int ena_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete); +static int ena_queue_restart(struct ena_ring *ring); +static int ena_queue_restart_all(struct rte_eth_dev *dev, + enum ena_ring_type ring_type); +static void ena_stats_restart(struct rte_eth_dev *dev); +static void ena_infos_get(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int ena_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int ena_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); + +static struct eth_dev_ops ena_dev_ops = { + .dev_configure = ena_dev_configure, + .dev_infos_get = ena_infos_get, + .rx_queue_setup = ena_rx_queue_setup, + .tx_queue_setup = ena_tx_queue_setup, + .dev_start = ena_start, + .link_update = ena_link_update, + .stats_get = ena_stats_get, + .mtu_set = ena_mtu_set, + .rx_queue_release = ena_rx_queue_release, + .tx_queue_release = ena_tx_queue_release, + .dev_close = ena_close, + .reta_update = ena_rss_reta_update, + .reta_query = ena_rss_reta_query, +}; + +static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, + struct ena_com_rx_ctx *ena_rx_ctx) +{ + uint64_t ol_flags = 0; + + if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) + ol_flags |= PKT_TX_TCP_CKSUM; + else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) + ol_flags |= PKT_TX_UDP_CKSUM; + + if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) + ol_flags |= PKT_TX_IPV4; + else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) + ol_flags |= PKT_TX_IPV6; + + if (unlikely(ena_rx_ctx->l4_csum_err)) + ol_flags |= PKT_RX_L4_CKSUM_BAD; + if (unlikely(ena_rx_ctx->l3_csum_err)) + ol_flags |= PKT_RX_IP_CKSUM_BAD; + + mbuf->ol_flags = ol_flags; +} + +static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, + struct ena_com_tx_ctx *ena_tx_ctx) +{ + struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; + + if (mbuf->ol_flags & + (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) { + /* check if TSO is required */ + if (mbuf->ol_flags & PKT_TX_TCP_SEG) { + ena_tx_ctx->tso_enable = true; + + ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); + } + + /* check if L3 checksum is needed */ + if (mbuf->ol_flags & PKT_TX_IP_CKSUM) + ena_tx_ctx->l3_csum_enable = true; + + if (mbuf->ol_flags & PKT_TX_IPV6) { + ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; + } else { + ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; + + /* set don't fragment (DF) flag */ + if (mbuf->packet_type & + (RTE_PTYPE_L4_NONFRAG + | RTE_PTYPE_INNER_L4_NONFRAG)) + ena_tx_ctx->df = true; + } + + /* check if L4 checksum is needed */ + switch (mbuf->ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; + ena_tx_ctx->l4_csum_enable = true; + break; + case PKT_TX_UDP_CKSUM: + ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; + ena_tx_ctx->l4_csum_enable = true; + break; + default: + ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; + ena_tx_ctx->l4_csum_enable = false; + break; + } + + ena_meta->mss = mbuf->tso_segsz; + ena_meta->l3_hdr_len = mbuf->l3_len; + ena_meta->l3_hdr_offset = mbuf->l2_len; + /* this param needed only for TSO */ + ena_meta->l3_outer_hdr_len = 0; + ena_meta->l3_outer_hdr_offset = 0; + + ena_tx_ctx->meta_valid = true; + } else { + ena_tx_ctx->meta_valid = false; + } +} + +static void ena_close(struct rte_eth_dev *dev) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + + adapter->state = ENA_ADAPTER_STATE_STOPPED; + + ena_rx_queue_release_all(dev); + ena_tx_queue_release_all(dev); +} + +static int ena_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + struct ena_com_dev *ena_dev = &adapter->ena_dev; + int ret, i; + u16 entry_value; + int conf_idx; + int idx; + + if ((reta_size == 0) || (reta_conf == NULL)) + return -EINVAL; + + if (reta_size > ENA_RX_RSS_TABLE_SIZE) { + RTE_LOG(WARNING, PMD, + "indirection table %d is bigger than supported (%d)\n", + reta_size, ENA_RX_RSS_TABLE_SIZE); + ret = -EINVAL; + goto err; + } + + for (i = 0 ; i < reta_size ; i++) { + /* each reta_conf is for 64 entries. + * to support 128 we use 2 conf of 64 + */ + conf_idx = i / RTE_RETA_GROUP_SIZE; + idx = i % RTE_RETA_GROUP_SIZE; + if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { + entry_value = + ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); + ret = ena_com_indirect_table_fill_entry(ena_dev, + i, + entry_value); + if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { + RTE_LOG(ERR, PMD, + "Cannot fill indirect table\n"); + ret = -ENOTSUP; + goto err; + } + } + } + + ret = ena_com_indirect_table_set(ena_dev); + if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { + RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); + ret = -ENOTSUP; + goto err; + } + + RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", + __func__, reta_size, adapter->rte_dev->data->port_id); +err: + return ret; +} + +/* Query redirection table. */ +static int ena_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + struct ena_com_dev *ena_dev = &adapter->ena_dev; + int ret; + int i; + u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; + int reta_conf_idx; + int reta_idx; + + if (reta_size == 0 || reta_conf == NULL || + (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) + return -EINVAL; + + ret = ena_com_indirect_table_get(ena_dev, indirect_table); + if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { + RTE_LOG(ERR, PMD, "cannot get indirect table\n"); + ret = -ENOTSUP; + goto err; + } + + for (i = 0 ; i < reta_size ; i++) { + reta_conf_idx = i / RTE_RETA_GROUP_SIZE; + reta_idx = i % RTE_RETA_GROUP_SIZE; + if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) + reta_conf[reta_conf_idx].reta[reta_idx] = + ENA_IO_RXQ_IDX_REV(indirect_table[i]); + } +err: + return ret; +} + +static int ena_rss_init_default(struct ena_adapter *adapter) +{ + struct ena_com_dev *ena_dev = &adapter->ena_dev; + uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues; + int rc, i; + u32 val; + + rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); + if (unlikely(rc)) { + RTE_LOG(ERR, PMD, "Cannot init indirect table\n"); + goto err_rss_init; + } + + for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { + val = i % nb_rx_queues; + rc = ena_com_indirect_table_fill_entry(ena_dev, i, + ENA_IO_RXQ_IDX(val)); + if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { + RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); + goto err_fill_indir; + } + } + + rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, + ENA_HASH_KEY_SIZE, 0xFFFFFFFF); + if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { + RTE_LOG(INFO, PMD, "Cannot fill hash function\n"); + goto err_fill_indir; + } + + rc = ena_com_set_default_hash_ctrl(ena_dev); + if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { + RTE_LOG(INFO, PMD, "Cannot fill hash control\n"); + goto err_fill_indir; + } + + rc = ena_com_indirect_table_set(ena_dev); + if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { + RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); + goto err_fill_indir; + } + RTE_LOG(DEBUG, PMD, "RSS configured for port %d\n", + adapter->rte_dev->data->port_id); + + return 0; + +err_fill_indir: + ena_com_rss_destroy(ena_dev); +err_rss_init: + + return rc; +} + +static void ena_rx_queue_release_all(struct rte_eth_dev *dev) +{ + struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; + int nb_queues = dev->data->nb_rx_queues; + int i; + + for (i = 0; i < nb_queues; i++) + ena_rx_queue_release(queues[i]); +} + +static void ena_tx_queue_release_all(struct rte_eth_dev *dev) +{ + struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues; + int nb_queues = dev->data->nb_tx_queues; + int i; + + for (i = 0; i < nb_queues; i++) + ena_tx_queue_release(queues[i]); +} + +static void ena_rx_queue_release(void *queue) +{ + struct ena_ring *ring = (struct ena_ring *)queue; + struct ena_adapter *adapter = ring->adapter; + int ena_qid; + + ena_assert_msg(ring->configured, + "API violation - releasing not configured queue"); + ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, + "API violation"); + + /* Destroy HW queue */ + ena_qid = ENA_IO_RXQ_IDX(ring->id); + ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid); + + /* Free all bufs */ + ena_rx_queue_release_bufs(ring); + + /* Free ring resources */ + if (ring->rx_buffer_info) + rte_free(ring->rx_buffer_info); + ring->rx_buffer_info = NULL; + + ring->configured = 0; + + RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n", + ring->port_id, ring->id); +} + +static void ena_tx_queue_release(void *queue) +{ + struct ena_ring *ring = (struct ena_ring *)queue; + struct ena_adapter *adapter = ring->adapter; + int ena_qid; + + ena_assert_msg(ring->configured, + "API violation. Releasing not configured queue"); + ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, + "API violation"); + + /* Destroy HW queue */ + ena_qid = ENA_IO_TXQ_IDX(ring->id); + ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid); + + /* Free all bufs */ + ena_tx_queue_release_bufs(ring); + + /* Free ring resources */ + if (ring->tx_buffer_info) + rte_free(ring->tx_buffer_info); + + if (ring->empty_tx_reqs) + rte_free(ring->empty_tx_reqs); + + ring->empty_tx_reqs = NULL; + ring->tx_buffer_info = NULL; + + ring->configured = 0; + + RTE_LOG(NOTICE, PMD, "TX Queue %d:%d released\n", + ring->port_id, ring->id); +} + +static void ena_rx_queue_release_bufs(struct ena_ring *ring) +{ + unsigned int ring_mask = ring->ring_size - 1; + + while (ring->next_to_clean != ring->next_to_use) { + struct rte_mbuf *m = + ring->rx_buffer_info[ring->next_to_clean & ring_mask]; + + if (m) + __rte_mbuf_raw_free(m); + + ring->next_to_clean = + ENA_CIRC_INC(ring->next_to_clean, 1, ring->ring_size); + } +} + +static void ena_tx_queue_release_bufs(struct ena_ring *ring) +{ + unsigned int ring_mask = ring->ring_size - 1; + + while (ring->next_to_clean != ring->next_to_use) { + struct ena_tx_buffer *tx_buf = + &ring->tx_buffer_info[ring->next_to_clean & ring_mask]; + + if (tx_buf->mbuf) + rte_pktmbuf_free(tx_buf->mbuf); + + ring->next_to_clean = + ENA_CIRC_INC(ring->next_to_clean, 1, ring->ring_size); + } +} + +static int ena_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete) +{ + struct rte_eth_link *link = &dev->data->dev_link; + + link->link_status = 1; + link->link_speed = ETH_SPEED_NUM_10G; + link->link_duplex = ETH_LINK_FULL_DUPLEX; + + return 0; +} + +static int ena_queue_restart_all(struct rte_eth_dev *dev, + enum ena_ring_type ring_type) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + struct ena_ring *queues = NULL; + int i = 0; + int rc = 0; + + queues = (ring_type == ENA_RING_TYPE_RX) ? + adapter->rx_ring : adapter->tx_ring; + + for (i = 0; i < adapter->num_queues; i++) { + if (queues[i].configured) { + if (ring_type == ENA_RING_TYPE_RX) { + ena_assert_msg( + dev->data->rx_queues[i] == &queues[i], + "Inconsistent state of rx queues\n"); + } else { + ena_assert_msg( + dev->data->tx_queues[i] == &queues[i], + "Inconsistent state of tx queues\n"); + } + + rc = ena_queue_restart(&queues[i]); + + if (rc) { + PMD_INIT_LOG(ERR, + "failed to restart queue %d type(%d)\n", + i, ring_type); + return -1; + } + } + } + + return 0; +} + +static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) +{ + uint32_t max_frame_len = adapter->max_mtu; + + if (adapter->rte_eth_dev_data->dev_conf.rxmode.jumbo_frame == 1) + max_frame_len = + adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; + + return max_frame_len; +} + +static int ena_check_valid_conf(struct ena_adapter *adapter) +{ + uint32_t max_frame_len = ena_get_mtu_conf(adapter); + + if (max_frame_len > adapter->max_mtu) { + PMD_INIT_LOG(ERR, "Unsupported MTU of %d\n", max_frame_len); + return -1; + } + + return 0; +} + +static int +ena_calc_queue_size(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + uint32_t queue_size = ENA_DEFAULT_RING_SIZE; + + queue_size = RTE_MIN(queue_size, + get_feat_ctx->max_queues.max_cq_depth); + queue_size = RTE_MIN(queue_size, + get_feat_ctx->max_queues.max_sq_depth); + + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + queue_size = RTE_MIN(queue_size, + get_feat_ctx->max_queues.max_llq_depth); + + /* Round down to power of 2 */ + if (!rte_is_power_of_2(queue_size)) + queue_size = rte_align32pow2(queue_size >> 1); + + if (queue_size == 0) { + PMD_INIT_LOG(ERR, "Invalid queue size\n"); + return -EFAULT; + } + + return queue_size; +} + +static void ena_stats_restart(struct rte_eth_dev *dev) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + + rte_atomic64_init(&adapter->drv_stats->ierrors); + rte_atomic64_init(&adapter->drv_stats->oerrors); + rte_atomic64_init(&adapter->drv_stats->imcasts); + rte_atomic64_init(&adapter->drv_stats->rx_nombuf); +} + +static void ena_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct ena_admin_basic_stats ena_stats; + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + struct ena_com_dev *ena_dev = &adapter->ena_dev; + int rc; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + + memset(&ena_stats, 0, sizeof(ena_stats)); + rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); + if (unlikely(rc)) { + RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA"); + return; + } + + /* Set of basic statistics from ENA */ + stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, + ena_stats.rx_pkts_low); + stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, + ena_stats.tx_pkts_low); + stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, + ena_stats.rx_bytes_low); + stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, + ena_stats.tx_bytes_low); + stats->imissed = __MERGE_64B_H_L(ena_stats.rx_drops_high, + ena_stats.rx_drops_low); + + /* Driver related stats */ + stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); + stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); + stats->imcasts = rte_atomic64_read(&adapter->drv_stats->imcasts); + stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); +} + +static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct ena_adapter *adapter; + struct ena_com_dev *ena_dev; + int rc = 0; + + ena_assert_msg(dev->data != NULL, "Uninitialized device"); + ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); + adapter = (struct ena_adapter *)(dev->data->dev_private); + + ena_dev = &adapter->ena_dev; + ena_assert_msg(ena_dev != NULL, "Uninitialized device"); + + if (mtu > ena_get_mtu_conf(adapter)) { + RTE_LOG(ERR, PMD, + "Given MTU (%d) exceeds maximum MTU supported (%d)\n", + mtu, ena_get_mtu_conf(adapter)); + rc = -EINVAL; + goto err; + } + + rc = ena_com_set_dev_mtu(ena_dev, mtu); + if (rc) + RTE_LOG(ERR, PMD, "Could not set MTU: %d\n", mtu); + else + RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); + +err: + return rc; +} + +static int ena_start(struct rte_eth_dev *dev) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + int rc = 0; + + if (!(adapter->state == ENA_ADAPTER_STATE_CONFIG || + adapter->state == ENA_ADAPTER_STATE_STOPPED)) { + PMD_INIT_LOG(ERR, "API violation"); + return -1; + } + + rc = ena_check_valid_conf(adapter); + if (rc) + return rc; + + rc = ena_queue_restart_all(dev, ENA_RING_TYPE_RX); + if (rc) + return rc; + + rc = ena_queue_restart_all(dev, ENA_RING_TYPE_TX); + if (rc) + return rc; + + if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & + ETH_MQ_RX_RSS_FLAG) { + rc = ena_rss_init_default(adapter); + if (rc) + return rc; + } + + ena_stats_restart(dev); + + adapter->state = ENA_ADAPTER_STATE_RUNNING; + + return 0; +} + +static int ena_queue_restart(struct ena_ring *ring) +{ + int rc; + + ena_assert_msg(ring->configured == 1, + "Trying to restart unconfigured queue\n"); + + ring->next_to_clean = 0; + ring->next_to_use = 0; + + if (ring->type == ENA_RING_TYPE_TX) + return 0; + + rc = ena_populate_rx_queue(ring, ring->ring_size - 1); + if ((unsigned int)rc != ring->ring_size - 1) { + PMD_INIT_LOG(ERR, "Failed to populate rx ring !\n"); + return (-1); + } + + return 0; +} + +static int ena_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + struct ena_ring *txq = NULL; + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + unsigned int i; + int ena_qid; + int rc; + struct ena_com_dev *ena_dev = &adapter->ena_dev; + + txq = &adapter->tx_ring[queue_idx]; + + if (txq->configured) { + RTE_LOG(CRIT, PMD, + "API violation. Queue %d is already configured\n", + queue_idx); + return -1; + } + + if (nb_desc > adapter->tx_ring_size) { + RTE_LOG(ERR, PMD, + "Unsupported size of TX queue (max size: %d)\n", + adapter->tx_ring_size); + return -EINVAL; + } + + ena_qid = ENA_IO_TXQ_IDX(queue_idx); + rc = ena_com_create_io_queue(ena_dev, ena_qid, + ENA_COM_IO_QUEUE_DIRECTION_TX, + ena_dev->tx_mem_queue_type, + -1 /* admin interrupts is not used */, + nb_desc); + if (rc) { + RTE_LOG(ERR, PMD, + "failed to create io TX queue #%d (qid:%d) rc: %d\n", + queue_idx, ena_qid, rc); + } + txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; + txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; + + txq->port_id = dev->data->port_id; + txq->next_to_clean = 0; + txq->next_to_use = 0; + txq->ring_size = nb_desc; + + txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", + sizeof(struct ena_tx_buffer) * + txq->ring_size, + RTE_CACHE_LINE_SIZE); + if (!txq->tx_buffer_info) { + RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n"); + return -ENOMEM; + } + + txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", + sizeof(u16) * txq->ring_size, + RTE_CACHE_LINE_SIZE); + if (!txq->empty_tx_reqs) { + RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n"); + rte_free(txq->tx_buffer_info); + return -ENOMEM; + } + for (i = 0; i < txq->ring_size; i++) + txq->empty_tx_reqs[i] = i; + + /* Store pointer to this queue in upper layer */ + txq->configured = 1; + dev->data->tx_queues[queue_idx] = txq; + + return rc; +} + +static int ena_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + struct ena_ring *rxq = NULL; + uint16_t ena_qid = 0; + int rc = 0; + struct ena_com_dev *ena_dev = &adapter->ena_dev; + + rxq = &adapter->rx_ring[queue_idx]; + if (rxq->configured) { + RTE_LOG(CRIT, PMD, + "API violation. Queue %d is already configured\n", + queue_idx); + return -1; + } + + if (nb_desc > adapter->rx_ring_size) { + RTE_LOG(ERR, PMD, + "Unsupported size of RX queue (max size: %d)\n", + adapter->rx_ring_size); + return -EINVAL; + } + + ena_qid = ENA_IO_RXQ_IDX(queue_idx); + rc = ena_com_create_io_queue(ena_dev, ena_qid, + ENA_COM_IO_QUEUE_DIRECTION_RX, + ENA_ADMIN_PLACEMENT_POLICY_HOST, + -1 /* admin interrupts not used */, + nb_desc); + if (rc) + RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n", + queue_idx, rc); + + rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; + rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; + + rxq->port_id = dev->data->port_id; + rxq->next_to_clean = 0; + rxq->next_to_use = 0; + rxq->ring_size = nb_desc; + rxq->mb_pool = mp; + + rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", + sizeof(struct rte_mbuf *) * nb_desc, + RTE_CACHE_LINE_SIZE); + if (!rxq->rx_buffer_info) { + RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n"); + return -ENOMEM; + } + + /* Store pointer to this queue in upper layer */ + rxq->configured = 1; + dev->data->rx_queues[queue_idx] = rxq; + + return rc; +} + +static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) +{ + unsigned int i; + int rc; + unsigned int ring_size = rxq->ring_size; + unsigned int ring_mask = ring_size - 1; + int next_to_use = rxq->next_to_use & ring_mask; + struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0]; + + if (unlikely(!count)) + return 0; + + ena_assert_msg((((ENA_CIRC_COUNT(rxq->next_to_use, rxq->next_to_clean, + rxq->ring_size)) + + count) < rxq->ring_size), "bad ring state"); + + count = RTE_MIN(count, ring_size - next_to_use); + + /* get resources for incoming packets */ + rc = rte_mempool_get_bulk(rxq->mb_pool, + (void **)(&mbufs[next_to_use]), count); + if (unlikely(rc < 0)) { + rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); + PMD_RX_LOG(DEBUG, "there are no enough free buffers"); + return 0; + } + + for (i = 0; i < count; i++) { + struct rte_mbuf *mbuf = mbufs[next_to_use]; + struct ena_com_buf ebuf; + + rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); + /* prepare physical address for DMA transaction */ + ebuf.paddr = mbuf->buf_physaddr + RTE_PKTMBUF_HEADROOM; + ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; + /* pass resource to device */ + rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, + &ebuf, next_to_use); + if (unlikely(rc)) { + RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); + break; + } + next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, ring_size); + } + + rte_wmb(); + rxq->next_to_use = next_to_use; + /* let HW know that it can fill buffers with data */ + ena_com_write_sq_doorbell(rxq->ena_com_io_sq); + + return i; +} + +static int ena_device_init(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + int rc; + + /* Initialize mmio registers */ + rc = ena_com_mmio_reg_read_request_init(ena_dev); + if (rc) { + RTE_LOG(ERR, PMD, "failed to init mmio read less\n"); + return rc; + } + + /* reset device */ + rc = ena_com_dev_reset(ena_dev); + if (rc) { + RTE_LOG(ERR, PMD, "cannot reset device\n"); + goto err_mmio_read_less; + } + + /* check FW version */ + rc = ena_com_validate_version(ena_dev); + if (rc) { + RTE_LOG(ERR, PMD, "device version is too low\n"); + goto err_mmio_read_less; + } + + ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); + + /* ENA device administration layer init */ + rc = ena_com_admin_init(ena_dev, NULL, true); + if (rc) { + RTE_LOG(ERR, PMD, + "cannot initialize ena admin queue with device\n"); + goto err_mmio_read_less; + } + + /* To enable the msix interrupts the driver needs to know the number + * of queues. So the driver uses polling mode to retrieve this + * information. + */ + ena_com_set_admin_polling_mode(ena_dev, true); + + /* Get Device Attributes and features */ + rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); + if (rc) { + RTE_LOG(ERR, PMD, + "cannot get attribute for ena device rc= %d\n", rc); + goto err_admin_init; + } + + return 0; + +err_admin_init: + ena_com_admin_destroy(ena_dev); + +err_mmio_read_less: + ena_com_mmio_reg_read_request_destroy(ena_dev); + + return rc; +} + +static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct ena_adapter *adapter = + (struct ena_adapter *)(eth_dev->data->dev_private); + struct ena_com_dev *ena_dev = &adapter->ena_dev; + struct ena_com_dev_get_features_ctx get_feat_ctx; + int queue_size, rc; + + static int adapters_found; + + memset(adapter, 0, sizeof(struct ena_adapter)); + ena_dev = &adapter->ena_dev; + + eth_dev->dev_ops = &ena_dev_ops; + eth_dev->rx_pkt_burst = ð_ena_recv_pkts; + eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; + adapter->rte_eth_dev_data = eth_dev->data; + adapter->rte_dev = eth_dev; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + pci_dev = eth_dev->pci_dev; + adapter->pdev = pci_dev; + + PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n", + pci_dev->addr.domain, + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); + + adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; + adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; + + /* Present ENA_MEM_BAR indicates available LLQ mode. + * Use corresponding policy + */ + if (adapter->dev_mem_base) + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; + else if (adapter->regs) + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + else + PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n", + ENA_REGS_BAR); + + ena_dev->reg_bar = adapter->regs; + ena_dev->dmadev = adapter->pdev; + + adapter->id_number = adapters_found; + + snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", + adapter->id_number); + + /* device specific initialization routine */ + rc = ena_device_init(ena_dev, &get_feat_ctx); + if (rc) { + PMD_INIT_LOG(CRIT, "Failed to init ENA device\n"); + return -1; + } + + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + if (get_feat_ctx.max_queues.max_llq_num == 0) { + PMD_INIT_LOG(ERR, + "Trying to use LLQ but llq_num is 0.\n" + "Fall back into regular queues.\n"); + ena_dev->tx_mem_queue_type = + ENA_ADMIN_PLACEMENT_POLICY_HOST; + adapter->num_queues = + get_feat_ctx.max_queues.max_sq_num; + } else { + adapter->num_queues = + get_feat_ctx.max_queues.max_llq_num; + } + } else { + adapter->num_queues = get_feat_ctx.max_queues.max_sq_num; + } + + queue_size = ena_calc_queue_size(ena_dev, &get_feat_ctx); + if ((queue_size <= 0) || (adapter->num_queues <= 0)) + return -EFAULT; + + adapter->tx_ring_size = queue_size; + adapter->rx_ring_size = queue_size; + + /* prepare ring structures */ + ena_init_rings(adapter); + + /* Set max MTU for this device */ + adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; + + /* Copy MAC address and point DPDK to it */ + eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr; + ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr, + (struct ether_addr *)adapter->mac_addr); + + adapter->drv_stats = rte_zmalloc("adapter stats", + sizeof(*adapter->drv_stats), + RTE_CACHE_LINE_SIZE); + if (!adapter->drv_stats) { + RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); + return -ENOMEM; + } + + adapters_found++; + adapter->state = ENA_ADAPTER_STATE_INIT; + + return 0; +} + +static int ena_dev_configure(struct rte_eth_dev *dev) +{ + struct ena_adapter *adapter = + (struct ena_adapter *)(dev->data->dev_private); + + if (!(adapter->state == ENA_ADAPTER_STATE_INIT || + adapter->state == ENA_ADAPTER_STATE_STOPPED)) { + PMD_INIT_LOG(ERR, "Illegal adapter state: %d\n", + adapter->state); + return -1; + } + + switch (adapter->state) { + case ENA_ADAPTER_STATE_INIT: + case ENA_ADAPTER_STATE_STOPPED: + adapter->state = ENA_ADAPTER_STATE_CONFIG; + break; + case ENA_ADAPTER_STATE_CONFIG: + RTE_LOG(WARNING, PMD, + "Ivalid driver state while trying to configure device\n"); + break; + default: + break; + } + + return 0; +} + +static void ena_init_rings(struct ena_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) { + struct ena_ring *ring = &adapter->tx_ring[i]; + + ring->configured = 0; + ring->type = ENA_RING_TYPE_TX; + ring->adapter = adapter; + ring->id = i; + ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; + ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; + } + + for (i = 0; i < adapter->num_queues; i++) { + struct ena_ring *ring = &adapter->rx_ring[i]; + + ring->configured = 0; + ring->type = ENA_RING_TYPE_RX; + ring->adapter = adapter; + ring->id = i; + } +} + +static void ena_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct ena_adapter *adapter; + struct ena_com_dev *ena_dev; + struct ena_com_dev_get_features_ctx feat; + uint32_t rx_feat = 0, tx_feat = 0; + int rc = 0; + + ena_assert_msg(dev->data != NULL, "Uninitialized device"); + ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); + adapter = (struct ena_adapter *)(dev->data->dev_private); + + ena_dev = &adapter->ena_dev; + ena_assert_msg(ena_dev != NULL, "Uninitialized device"); + + dev_info->speed_capa = + ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_2_5G | + ETH_LINK_SPEED_5G | + ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | + ETH_LINK_SPEED_100G; + + /* Get supported features from HW */ + rc = ena_com_get_dev_attr_feat(ena_dev, &feat); + if (unlikely(rc)) { + RTE_LOG(ERR, PMD, + "Cannot get attribute for ena device rc= %d\n", rc); + return; + } + + /* Set Tx & Rx features available for device */ + if (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) + tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; + + if (feat.offload.tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) + tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + if (feat.offload.tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) + rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + + /* Inform framework about available features */ + dev_info->rx_offload_capa = rx_feat; + dev_info->tx_offload_capa = tx_feat; + + dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; + dev_info->max_rx_pktlen = adapter->max_mtu; + dev_info->max_mac_addrs = 1; + + dev_info->max_rx_queues = adapter->num_queues; + dev_info->max_tx_queues = adapter->num_queues; + dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; +} + +static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); + unsigned int ring_size = rx_ring->ring_size; + unsigned int ring_mask = ring_size - 1; + uint16_t next_to_clean = rx_ring->next_to_clean; + int desc_in_use = 0; + unsigned int recv_idx = 0; + struct rte_mbuf *mbuf = NULL; + struct rte_mbuf *mbuf_head = NULL; + struct rte_mbuf *mbuf_prev = NULL; + struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info; + unsigned int completed; + + struct ena_com_rx_ctx ena_rx_ctx; + int rc = 0; + + /* Check adapter state */ + if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { + RTE_LOG(ALERT, PMD, + "Trying to receive pkts while device is NOT running\n"); + return 0; + } + + desc_in_use = ENA_CIRC_COUNT(rx_ring->next_to_use, + next_to_clean, ring_size); + if (unlikely(nb_pkts > desc_in_use)) + nb_pkts = desc_in_use; + + for (completed = 0; completed < nb_pkts; completed++) { + int segments = 0; + + ena_rx_ctx.max_bufs = rx_ring->ring_size; + ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; + ena_rx_ctx.descs = 0; + /* receive packet context */ + rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, + rx_ring->ena_com_io_sq, + &ena_rx_ctx); + if (unlikely(rc)) { + RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); + return 0; + } + + if (unlikely(ena_rx_ctx.descs == 0)) + break; + + while (segments < ena_rx_ctx.descs) { + mbuf = rx_buff_info[next_to_clean & ring_mask]; + mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->refcnt = 1; + mbuf->next = NULL; + if (segments == 0) { + mbuf->nb_segs = ena_rx_ctx.descs; + mbuf->port = rx_ring->port_id; + mbuf->pkt_len = 0; + mbuf_head = mbuf; + } else { + /* for multi-segment pkts create mbuf chain */ + mbuf_prev->next = mbuf; + } + mbuf_head->pkt_len += mbuf->data_len; + + mbuf_prev = mbuf; + segments++; + next_to_clean = + ENA_RX_RING_IDX_NEXT(next_to_clean, ring_size); + } + + /* fill mbuf attributes if any */ + ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx); + mbuf_head->hash.rss = (uint32_t)rx_ring->id; + + /* pass to DPDK application head mbuf */ + rx_pkts[recv_idx] = mbuf_head; + recv_idx++; + } + + /* Burst refill to save doorbells, memory barriers, const interval */ + if (ring_size - desc_in_use - 1 > ENA_RING_DESCS_RATIO(ring_size)) + ena_populate_rx_queue(rx_ring, ring_size - desc_in_use - 1); + + rx_ring->next_to_clean = next_to_clean & ring_mask; + + return recv_idx; +} + +static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); + unsigned int next_to_use = tx_ring->next_to_use; + struct rte_mbuf *mbuf; + unsigned int ring_size = tx_ring->ring_size; + unsigned int ring_mask = ring_size - 1; + struct ena_com_tx_ctx ena_tx_ctx; + struct ena_tx_buffer *tx_info; + struct ena_com_buf *ebuf; + uint16_t rc, req_id, total_tx_descs = 0; + int sent_idx = 0; + int nb_hw_desc; + + /* Check adapter state */ + if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { + RTE_LOG(ALERT, PMD, + "Trying to xmit pkts while device is NOT running\n"); + return 0; + } + + for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { + mbuf = tx_pkts[sent_idx]; + + req_id = tx_ring->empty_tx_reqs[next_to_use]; + tx_info = &tx_ring->tx_buffer_info[req_id]; + tx_info->mbuf = mbuf; + tx_info->num_of_bufs = 0; + ebuf = tx_info->bufs; + + /* Prepare TX context */ + memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); + memset(&ena_tx_ctx.ena_meta, 0x0, + sizeof(struct ena_com_tx_meta)); + ena_tx_ctx.ena_bufs = ebuf; + ena_tx_ctx.req_id = req_id; + if (tx_ring->tx_mem_queue_type == + ENA_ADMIN_PLACEMENT_POLICY_DEV) { + /* prepare the push buffer with + * virtual address of the data + */ + ena_tx_ctx.header_len = + RTE_MIN(mbuf->data_len, + tx_ring->tx_max_header_size); + ena_tx_ctx.push_header = + (void *)((char *)mbuf->buf_addr + + mbuf->data_off); + } /* there's no else as we take advantage of memset zeroing */ + + /* Set TX offloads flags, if applicable */ + ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx); + + if (unlikely(mbuf->ol_flags & + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD))) + rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); + + rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]); + + /* Process first segment taking into + * consideration pushed header + */ + if (mbuf->data_len > ena_tx_ctx.header_len) { + ebuf->paddr = mbuf->buf_physaddr + + mbuf->data_off + + ena_tx_ctx.header_len; + ebuf->len = mbuf->data_len - ena_tx_ctx.header_len; + ebuf++; + tx_info->num_of_bufs++; + } + + while ((mbuf = mbuf->next) != NULL) { + ebuf->paddr = mbuf->buf_physaddr + mbuf->data_off; + ebuf->len = mbuf->data_len; + ebuf++; + tx_info->num_of_bufs++; + } + + ena_tx_ctx.num_bufs = tx_info->num_of_bufs; + + /* Write data to device */ + rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, + &ena_tx_ctx, &nb_hw_desc); + if (unlikely(rc)) + break; + + tx_info->tx_descs = nb_hw_desc; + + next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, ring_size); + } + + /* Let HW do it's best :-) */ + rte_wmb(); + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + + /* Clear complete packets */ + while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { + /* Get Tx info & store how many descs were processed */ + tx_info = &tx_ring->tx_buffer_info[req_id]; + total_tx_descs += tx_info->tx_descs; + + /* Free whole mbuf chain */ + mbuf = tx_info->mbuf; + rte_pktmbuf_free(mbuf); + + /* Put back descriptor to the ring for reuse */ + tx_ring->empty_tx_reqs[tx_ring->next_to_clean] = req_id; + tx_ring->next_to_clean = + ENA_TX_RING_IDX_NEXT(tx_ring->next_to_clean, + tx_ring->ring_size); + + /* If too many descs to clean, leave it for another run */ + if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) + break; + } + + /* acknowledge completion of sent packets */ + ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); + tx_ring->next_to_use = next_to_use; + return sent_idx; +} + +static struct eth_driver rte_ena_pmd = { + { + .name = "rte_ena_pmd", + .id_table = pci_id_ena_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + }, + .eth_dev_init = eth_ena_dev_init, + .dev_private_size = sizeof(struct ena_adapter), +}; + +static int +rte_ena_pmd_init(const char *name __rte_unused, + const char *params __rte_unused) +{ + rte_eth_driver_register(&rte_ena_pmd); + return 0; +}; + +struct rte_driver ena_pmd_drv = { + .name = "ena_driver", + .type = PMD_PDEV, + .init = rte_ena_pmd_init, +}; + +PMD_REGISTER_DRIVER(ena_pmd_drv); diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h new file mode 100644 index 00000000..ba6f01e6 --- /dev/null +++ b/drivers/net/ena/ena_ethdev.h @@ -0,0 +1,160 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef _ENA_ETHDEV_H_ +#define _ENA_ETHDEV_H_ + +#include <rte_pci.h> + +#include "ena_com.h" + +#define ENA_REGS_BAR 0 +#define ENA_MEM_BAR 2 + +#define ENA_MAX_NUM_QUEUES 128 + +#define ENA_DEFAULT_TX_SW_DESCS (1024) +#define ENA_DEFAULT_TX_HW_DESCS (1024) +#define ENA_DEFAULT_RING_SIZE (1024) + +#define ENA_MIN_FRAME_LEN 64 + +#define ENA_NAME_MAX_LEN 20 +#define ENA_IRQNAME_SIZE 40 + +#define ENA_PKT_MAX_BUFS 17 + +#define ENA_CIRC_COUNT(head, tail, size) \ + (((uint16_t)((uint16_t)(head) - (uint16_t)(tail))) & ((size) - 1)) + +#define ENA_CIRC_INC(index, step, size) \ + ((uint16_t)(index) + (uint16_t)(step)) +#define ENA_CIRC_INC_WRAP(index, step, size) \ + (((uint16_t)(index) + (uint16_t)(step)) & ((size) - 1)) + +#define ENA_TX_RING_IDX_NEXT(idx, ring_size) \ + ENA_CIRC_INC_WRAP(idx, 1, ring_size) +#define ENA_RX_RING_IDX_NEXT(idx, ring_size) \ + ENA_CIRC_INC_WRAP(idx, 1, ring_size) + +struct ena_adapter; + +enum ena_ring_type { + ENA_RING_TYPE_RX = 1, + ENA_RING_TYPE_TX = 2, +}; + +struct ena_tx_buffer { + struct rte_mbuf *mbuf; + unsigned int tx_descs; + unsigned int num_of_bufs; + struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; +}; + +struct ena_ring { + u16 next_to_use; + u16 next_to_clean; + + enum ena_ring_type type; + enum ena_admin_placement_policy_type tx_mem_queue_type; + /* Holds the empty requests for TX OOO completions */ + uint16_t *empty_tx_reqs; + union { + struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */ + struct rte_mbuf **rx_buffer_info; /* contex of rx packet */ + }; + unsigned int ring_size; /* number of tx/rx_buffer_info's entries */ + + struct ena_com_io_cq *ena_com_io_cq; + struct ena_com_io_sq *ena_com_io_sq; + + struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS] + __rte_cache_aligned; + + struct rte_mempool *mb_pool; + unsigned int port_id; + unsigned int id; + /* Max length PMD can push to device for LLQ */ + uint8_t tx_max_header_size; + int configured; + struct ena_adapter *adapter; +} __rte_cache_aligned; + +enum ena_adapter_state { + ENA_ADAPTER_STATE_FREE = 0, + ENA_ADAPTER_STATE_INIT = 1, + ENA_ADAPTER_STATE_RUNNING = 2, + ENA_ADAPTER_STATE_STOPPED = 3, + ENA_ADAPTER_STATE_CONFIG = 4, +}; + +struct ena_driver_stats { + rte_atomic64_t ierrors; + rte_atomic64_t oerrors; + rte_atomic64_t imcasts; + rte_atomic64_t rx_nombuf; +}; + +/* board specific private data structure */ +struct ena_adapter { + /* OS defined structs */ + struct rte_pci_device *pdev; + struct rte_eth_dev_data *rte_eth_dev_data; + struct rte_eth_dev *rte_dev; + + struct ena_com_dev ena_dev __rte_cache_aligned; + + /* TX */ + struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned; + int tx_ring_size; + + /* RX */ + struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned; + int rx_ring_size; + + u16 num_queues; + u16 max_mtu; + + int id_number; + char name[ENA_NAME_MAX_LEN]; + u8 mac_addr[ETHER_ADDR_LEN]; + + void *regs; + void *dev_mem_base; + + struct ena_driver_stats *drv_stats; + enum ena_adapter_state state; + +}; + +#endif /* _ENA_ETHDEV_H_ */ diff --git a/drivers/net/ena/ena_logs.h b/drivers/net/ena/ena_logs.h new file mode 100644 index 00000000..c6c8a41b --- /dev/null +++ b/drivers/net/ena/ena_logs.h @@ -0,0 +1,70 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef _ENA_LOGS_H_ +#define _ENA_LOGS_H_ + +#define RTE_LOGTYPE_ENA RTE_LOGTYPE_USER1 + +#define PMD_INIT_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) + +#ifdef RTE_LIBRTE_ENA_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_ENA_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_ENA_DEBUG_DRIVER +#define PMD_DRV_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_DRV_LOG(level, fmt, args...) do { } while (0) +#endif + +#endif /* _ENA_LOGS_H_ */ diff --git a/drivers/net/ena/ena_platform.h b/drivers/net/ena/ena_platform.h new file mode 100644 index 00000000..0df82d6f --- /dev/null +++ b/drivers/net/ena/ena_platform.h @@ -0,0 +1,59 @@ +/*- +* BSD LICENSE +* +* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in +* the documentation and/or other materials provided with the +* distribution. +* * Neither the name of copyright holder nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef __ENA_PLATFORM_H__ +#define __ENA_PLATFORM_H__ + +#define swap16_to_le(x) (x) + +#define swap32_to_le(x) (x) + +#define swap64_to_le(x) (x) + +#define swap16_from_le(x) (x) + +#define swap32_from_le(x) (x) + +#define swap64_from_le(x) (x) + +#define ena_assert_msg(cond, msg) \ + do { \ + if (unlikely(!(cond))) { \ + RTE_LOG(ERR, ENA, \ + "Assert failed on %s:%s:%d: ", \ + __FILE__, __func__, __LINE__); \ + rte_panic(msg); \ + } \ + } while (0) + +#endif /* __ENA_PLATFORM_H__ */ diff --git a/drivers/net/ena/rte_pmd_ena_version.map b/drivers/net/ena/rte_pmd_ena_version.map new file mode 100644 index 00000000..349c6e1c --- /dev/null +++ b/drivers/net/ena/rte_pmd_ena_version.map @@ -0,0 +1,4 @@ +DPDK_16.04 { + + local: *; +}; diff --git a/drivers/net/enic/LICENSE b/drivers/net/enic/LICENSE new file mode 100644 index 00000000..46a27a4e --- /dev/null +++ b/drivers/net/enic/LICENSE @@ -0,0 +1,27 @@ + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile new file mode 100644 index 00000000..f3162741 --- /dev/null +++ b/drivers/net/enic/Makefile @@ -0,0 +1,72 @@ +# +# Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. +# Copyright 2007 Nuova Systems, Inc. All rights reserved. +# +# Copyright (c) 2014, Cisco Systems, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_enic.a + +EXPORT_MAP := rte_pmd_enic_version.map + +LIBABIVER := 1 + +CFLAGS += -I$(SRCDIR)/base/ +CFLAGS += -I$(SRCDIR) +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) -Wno-strict-aliasing + +VPATH += $(SRCDIR)/src + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rx.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_wq.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_dev.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_intr.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_rq.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_rss.c + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_net +DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_hash + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/enic/base/cq_desc.h b/drivers/net/enic/base/cq_desc.h new file mode 100644 index 00000000..f3ef6bb5 --- /dev/null +++ b/drivers/net/enic/base/cq_desc.h @@ -0,0 +1,125 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _CQ_DESC_H_ +#define _CQ_DESC_H_ + +/* + * Completion queue descriptor types + */ +enum cq_desc_types { + CQ_DESC_TYPE_WQ_ENET = 0, + CQ_DESC_TYPE_DESC_COPY = 1, + CQ_DESC_TYPE_WQ_EXCH = 2, + CQ_DESC_TYPE_RQ_ENET = 3, + CQ_DESC_TYPE_RQ_FCP = 4, + CQ_DESC_TYPE_IOMMU_MISS = 5, + CQ_DESC_TYPE_SGL = 6, + CQ_DESC_TYPE_CLASSIFIER = 7, + CQ_DESC_TYPE_TEST = 127, +}; + +/* Completion queue descriptor: 16B + * + * All completion queues have this basic layout. The + * type_specfic area is unique for each completion + * queue type. + */ +struct cq_desc { + __le16 completed_index; + __le16 q_number; + u8 type_specfic[11]; + u8 type_color; +}; + +#define CQ_DESC_TYPE_BITS 4 +#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1) +#define CQ_DESC_COLOR_MASK 1 +#define CQ_DESC_COLOR_SHIFT 7 +#define CQ_DESC_Q_NUM_BITS 10 +#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1) +#define CQ_DESC_COMP_NDX_BITS 12 +#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1) + +static inline void cq_color_enc(struct cq_desc *desc, const u8 color) +{ + if (color) + desc->type_color |= (1 << CQ_DESC_COLOR_SHIFT); + else + desc->type_color &= ~(1 << CQ_DESC_COLOR_SHIFT); +} + +static inline void cq_desc_enc(struct cq_desc *desc, + const u8 type, const u8 color, const u16 q_number, + const u16 completed_index) +{ + desc->type_color = (type & CQ_DESC_TYPE_MASK) | + ((color & CQ_DESC_COLOR_MASK) << CQ_DESC_COLOR_SHIFT); + desc->q_number = cpu_to_le16(q_number & CQ_DESC_Q_NUM_MASK); + desc->completed_index = cpu_to_le16(completed_index & + CQ_DESC_COMP_NDX_MASK); +} + +static inline void cq_desc_dec(const struct cq_desc *desc_arg, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index) +{ + const struct cq_desc *desc = desc_arg; + const u8 type_color = desc->type_color; + + *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; + + /* + * Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + + rmb(); + + *type = type_color & CQ_DESC_TYPE_MASK; + *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; + *completed_index = le16_to_cpu(desc->completed_index) & + CQ_DESC_COMP_NDX_MASK; +} + +static inline void cq_color_dec(const struct cq_desc *desc_arg, u8 *color) +{ + volatile const struct cq_desc *desc = desc_arg; + + *color = (desc->type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; +} + +#endif /* _CQ_DESC_H_ */ diff --git a/drivers/net/enic/base/cq_enet_desc.h b/drivers/net/enic/base/cq_enet_desc.h new file mode 100644 index 00000000..f9822a45 --- /dev/null +++ b/drivers/net/enic/base/cq_enet_desc.h @@ -0,0 +1,260 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _CQ_ENET_DESC_H_ +#define _CQ_ENET_DESC_H_ + +#include "cq_desc.h" + +/* Ethernet completion queue descriptor: 16B */ +struct cq_enet_wq_desc { + __le16 completed_index; + __le16 q_number; + u8 reserved[11]; + u8 type_color; +}; + +static inline void cq_enet_wq_desc_enc(struct cq_enet_wq_desc *desc, + u8 type, u8 color, u16 q_number, u16 completed_index) +{ + cq_desc_enc((struct cq_desc *)desc, type, + color, q_number, completed_index); +} + +static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index) +{ + cq_desc_dec((struct cq_desc *)desc, type, + color, q_number, completed_index); +} + +/* Completion queue descriptor: Ethernet receive queue, 16B */ +struct cq_enet_rq_desc { + __le16 completed_index_flags; + __le16 q_number_rss_type_flags; + __le32 rss_hash; + __le16 bytes_written_flags; + __le16 vlan; + __le16 checksum_fcoe; + u8 flags; + u8 type_color; +}; + +#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12) +#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13) +#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14) +#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15) + +#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4 +#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \ + ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1) +#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6 + +#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14) + +#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14 +#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \ + ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1) +#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14) +#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15) + +#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS 12 +#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK \ + ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS) - 1) +#define CQ_ENET_RQ_DESC_VLAN_TCI_CFI_MASK (0x1 << 12) +#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS 3 +#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_MASK \ + ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS) - 1) +#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_SHIFT 13 + +#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 8 +#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \ + ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1) +#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8 +#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \ + ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1) +#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8 + +#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0) +#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0) +#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1) +#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1) +#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3) +#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6) +#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7) + +static inline void cq_enet_rq_desc_enc(struct cq_enet_rq_desc *desc, + u8 type, u8 color, u16 q_number, u16 completed_index, + u8 ingress_port, u8 fcoe, u8 eop, u8 sop, u8 rss_type, u8 csum_not_calc, + u32 rss_hash, u16 bytes_written, u8 packet_error, u8 vlan_stripped, + u16 vlan, u16 checksum, u8 fcoe_sof, u8 fcoe_fc_crc_ok, + u8 fcoe_enc_error, u8 fcoe_eof, u8 tcp_udp_csum_ok, u8 udp, u8 tcp, + u8 ipv4_csum_ok, u8 ipv6, u8 ipv4, u8 ipv4_fragment, u8 fcs_ok) +{ + cq_desc_enc((struct cq_desc *)desc, type, + color, q_number, completed_index); + + desc->completed_index_flags |= cpu_to_le16( + (ingress_port ? CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT : 0) | + (fcoe ? CQ_ENET_RQ_DESC_FLAGS_FCOE : 0) | + (eop ? CQ_ENET_RQ_DESC_FLAGS_EOP : 0) | + (sop ? CQ_ENET_RQ_DESC_FLAGS_SOP : 0)); + + desc->q_number_rss_type_flags |= cpu_to_le16( + ((rss_type & CQ_ENET_RQ_DESC_RSS_TYPE_MASK) << + CQ_DESC_Q_NUM_BITS) | + (csum_not_calc ? CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC : 0)); + + desc->rss_hash = cpu_to_le32(rss_hash); + + desc->bytes_written_flags = cpu_to_le16( + (bytes_written & CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK) | + (packet_error ? CQ_ENET_RQ_DESC_FLAGS_TRUNCATED : 0) | + (vlan_stripped ? CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED : 0)); + + desc->vlan = cpu_to_le16(vlan); + + if (fcoe) { + desc->checksum_fcoe = cpu_to_le16( + (fcoe_sof & CQ_ENET_RQ_DESC_FCOE_SOF_MASK) | + ((fcoe_eof & CQ_ENET_RQ_DESC_FCOE_EOF_MASK) << + CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT)); + } else { + desc->checksum_fcoe = cpu_to_le16(checksum); + } + + desc->flags = + (tcp_udp_csum_ok ? CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK : 0) | + (udp ? CQ_ENET_RQ_DESC_FLAGS_UDP : 0) | + (tcp ? CQ_ENET_RQ_DESC_FLAGS_TCP : 0) | + (ipv4_csum_ok ? CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK : 0) | + (ipv6 ? CQ_ENET_RQ_DESC_FLAGS_IPV6 : 0) | + (ipv4 ? CQ_ENET_RQ_DESC_FLAGS_IPV4 : 0) | + (ipv4_fragment ? CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT : 0) | + (fcs_ok ? CQ_ENET_RQ_DESC_FLAGS_FCS_OK : 0) | + (fcoe_fc_crc_ok ? CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK : 0) | + (fcoe_enc_error ? CQ_ENET_RQ_DESC_FCOE_ENC_ERROR : 0); +} + +static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index, + u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type, + u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error, + u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof, + u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof, + u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, + u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) +{ + u16 completed_index_flags; + u16 q_number_rss_type_flags; + u16 bytes_written_flags; + + cq_desc_dec((struct cq_desc *)desc, type, + color, q_number, completed_index); + + completed_index_flags = le16_to_cpu(desc->completed_index_flags); + q_number_rss_type_flags = + le16_to_cpu(desc->q_number_rss_type_flags); + bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); + + *ingress_port = (completed_index_flags & + CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; + *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? + 1 : 0; + *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ? + 1 : 0; + *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ? + 1 : 0; + + *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) & + CQ_ENET_RQ_DESC_RSS_TYPE_MASK); + *csum_not_calc = (q_number_rss_type_flags & + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0; + + *rss_hash = le32_to_cpu(desc->rss_hash); + + *bytes_written = bytes_written_flags & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; + *packet_error = (bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0; + *vlan_stripped = (bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0; + + /* + * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12) + */ + *vlan_tci = le16_to_cpu(desc->vlan); + + if (*fcoe) { + *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & + CQ_ENET_RQ_DESC_FCOE_SOF_MASK); + *fcoe_fc_crc_ok = (desc->flags & + CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0; + *fcoe_enc_error = (desc->flags & + CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0; + *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >> + CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) & + CQ_ENET_RQ_DESC_FCOE_EOF_MASK); + *checksum = 0; + } else { + *fcoe_sof = 0; + *fcoe_fc_crc_ok = 0; + *fcoe_enc_error = 0; + *fcoe_eof = 0; + *checksum = le16_to_cpu(desc->checksum_fcoe); + } + + *tcp_udp_csum_ok = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; + *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; + *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; + *ipv4_csum_ok = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; + *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; + *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; + *ipv4_fragment = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; + *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; +} + +#endif /* _CQ_ENET_DESC_H_ */ diff --git a/drivers/net/enic/base/enic_vnic_wq.h b/drivers/net/enic/base/enic_vnic_wq.h new file mode 100644 index 00000000..b0191093 --- /dev/null +++ b/drivers/net/enic/base/enic_vnic_wq.h @@ -0,0 +1,79 @@ +/* + * Copyright 2008-2015 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2015, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _ENIC_VNIC_WQ_H_ +#define _ENIC_VNIC_WQ_H_ + +#include "vnic_dev.h" +#include "vnic_cq.h" + +static inline void enic_vnic_post_wq_index(struct vnic_wq *wq) +{ + struct vnic_wq_buf *buf = wq->to_use; + + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(buf->index, &wq->ctrl->posted_index); +} + +static inline void enic_vnic_post_wq(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, + unsigned int len, int sop, + uint8_t desc_skip_cnt, uint8_t cq_entry, + uint8_t compressed_send, uint64_t wrid) +{ + struct vnic_wq_buf *buf = wq->to_use; + + buf->sop = sop; + buf->cq_entry = cq_entry; + buf->compressed_send = compressed_send; + buf->desc_skip_cnt = desc_skip_cnt; + buf->os_buf = os_buf; + buf->dma_addr = dma_addr; + buf->len = len; + buf->wr_id = wrid; + + buf = buf->next; + wq->ring.desc_avail -= desc_skip_cnt; + wq->to_use = buf; + + if (cq_entry) + enic_vnic_post_wq_index(wq); +} + +#endif /* _ENIC_VNIC_WQ_H_ */ diff --git a/drivers/net/enic/base/rq_enet_desc.h b/drivers/net/enic/base/rq_enet_desc.h new file mode 100644 index 00000000..7292d9dc --- /dev/null +++ b/drivers/net/enic/base/rq_enet_desc.h @@ -0,0 +1,75 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _RQ_ENET_DESC_H_ +#define _RQ_ENET_DESC_H_ + +/* Ethernet receive queue descriptor: 16B */ +struct rq_enet_desc { + __le64 address; + __le16 length_type; + u8 reserved[6]; +}; + +enum rq_enet_type_types { + RQ_ENET_TYPE_ONLY_SOP = 0, + RQ_ENET_TYPE_NOT_SOP = 1, + RQ_ENET_TYPE_RESV2 = 2, + RQ_ENET_TYPE_RESV3 = 3, +}; + +#define RQ_ENET_ADDR_BITS 64 +#define RQ_ENET_LEN_BITS 14 +#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1) +#define RQ_ENET_TYPE_BITS 2 +#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1) + +static inline void rq_enet_desc_enc(struct rq_enet_desc *desc, + u64 address, u8 type, u16 length) +{ + desc->address = cpu_to_le64(address); + desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) | + ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS)); +} + +static inline void rq_enet_desc_dec(struct rq_enet_desc *desc, + u64 *address, u8 *type, u16 *length) +{ + *address = le64_to_cpu(desc->address); + *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK; + *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) & + RQ_ENET_TYPE_MASK); +} + +#endif /* _RQ_ENET_DESC_H_ */ diff --git a/drivers/net/enic/base/vnic_cq.c b/drivers/net/enic/base/vnic_cq.c new file mode 100644 index 00000000..2f65f357 --- /dev/null +++ b/drivers/net/enic/base/vnic_cq.c @@ -0,0 +1,116 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "vnic_dev.h" +#include "vnic_cq.h" + +int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count, + unsigned int desc_size) +{ + int mem_size; + + mem_size = vnic_dev_desc_ring_size(&cq->ring, desc_count, desc_size); + + return mem_size; +} + +void vnic_cq_free(struct vnic_cq *cq) +{ + vnic_dev_free_desc_ring(cq->vdev, &cq->ring); + + cq->ctrl = NULL; +} + +int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, + unsigned int socket_id, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + char res_name[NAME_MAX]; + static int instance; + + cq->index = index; + cq->vdev = vdev; + + cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); + if (!cq->ctrl) { + pr_err("Failed to hook CQ[%d] resource\n", index); + return -EINVAL; + } + + snprintf(res_name, sizeof(res_name), "%d-cq-%d", instance++, index); + err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size, + socket_id, res_name); + if (err) + return err; + + return 0; +} + +void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, + unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, + unsigned int cq_tail_color, unsigned int interrupt_enable, + unsigned int cq_entry_enable, unsigned int cq_message_enable, + unsigned int interrupt_offset, u64 cq_message_addr) +{ + u64 paddr; + + paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &cq->ctrl->ring_base); + iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); + iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable); + iowrite32(color_enable, &cq->ctrl->color_enable); + iowrite32(cq_head, &cq->ctrl->cq_head); + iowrite32(cq_tail, &cq->ctrl->cq_tail); + iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color); + iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable); + iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable); + iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable); + iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset); + writeq(cq_message_addr, &cq->ctrl->cq_message_addr); + + cq->interrupt_offset = interrupt_offset; +} + +void vnic_cq_clean(struct vnic_cq *cq) +{ + cq->to_clean = 0; + cq->last_color = 0; + + iowrite32(0, &cq->ctrl->cq_head); + iowrite32(0, &cq->ctrl->cq_tail); + iowrite32(1, &cq->ctrl->cq_tail_color); + + vnic_dev_clear_desc_ring(&cq->ring); +} diff --git a/drivers/net/enic/base/vnic_cq.h b/drivers/net/enic/base/vnic_cq.h new file mode 100644 index 00000000..922391b3 --- /dev/null +++ b/drivers/net/enic/base/vnic_cq.h @@ -0,0 +1,150 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _VNIC_CQ_H_ +#define _VNIC_CQ_H_ + +#include <rte_mbuf.h> + +#include "cq_desc.h" +#include "vnic_dev.h" + +/* Completion queue control */ +struct vnic_cq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 flow_control_enable; /* 0x10 */ + u32 pad1; + u32 color_enable; /* 0x18 */ + u32 pad2; + u32 cq_head; /* 0x20 */ + u32 pad3; + u32 cq_tail; /* 0x28 */ + u32 pad4; + u32 cq_tail_color; /* 0x30 */ + u32 pad5; + u32 interrupt_enable; /* 0x38 */ + u32 pad6; + u32 cq_entry_enable; /* 0x40 */ + u32 pad7; + u32 cq_message_enable; /* 0x48 */ + u32 pad8; + u32 interrupt_offset; /* 0x50 */ + u32 pad9; + u64 cq_message_addr; /* 0x58 */ + u32 pad10; +}; + +#ifdef ENIC_AIC +struct vnic_rx_bytes_counter { + unsigned int small_pkt_bytes_cnt; + unsigned int large_pkt_bytes_cnt; +}; +#endif + +struct vnic_cq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + unsigned int to_clean; + unsigned int last_color; + unsigned int interrupt_offset; +#ifdef ENIC_AIC + struct vnic_rx_bytes_counter pkt_size_counter; + unsigned int cur_rx_coal_timeval; + unsigned int tobe_rx_coal_timeval; + ktime_t prev_ts; +#endif +}; + +static inline unsigned int vnic_cq_service(struct vnic_cq *cq, + unsigned int work_to_do, + int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index, void *opaque), + void *opaque) +{ + struct cq_desc *cq_desc; + unsigned int work_done = 0; + u16 q_number, completed_index; + u8 type, color; + struct rte_mbuf **rx_pkts = opaque; + unsigned int ret; + + cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + cq_desc_dec(cq_desc, &type, &color, + &q_number, &completed_index); + + while (color != cq->last_color) { + if (opaque) + opaque = (void *)&(rx_pkts[work_done]); + + ret = (*q_service)(cq->vdev, cq_desc, type, + q_number, completed_index, opaque); + cq->to_clean++; + if (cq->to_clean == cq->ring.desc_count) { + cq->to_clean = 0; + cq->last_color = cq->last_color ? 0 : 1; + } + + cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + cq_desc_dec(cq_desc, &type, &color, + &q_number, &completed_index); + + if (ret) + work_done++; + if (work_done >= work_to_do) + break; + } + + return work_done; +} + +void vnic_cq_free(struct vnic_cq *cq); +int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, + unsigned int socket_id, + unsigned int desc_count, unsigned int desc_size); +void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, + unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, + unsigned int cq_tail_color, unsigned int interrupt_enable, + unsigned int cq_entry_enable, unsigned int message_enable, + unsigned int interrupt_offset, u64 message_addr); +void vnic_cq_clean(struct vnic_cq *cq); +int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count, + unsigned int desc_size); + +#endif /* _VNIC_CQ_H_ */ diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c new file mode 100644 index 00000000..e8a50287 --- /dev/null +++ b/drivers/net/enic/base/vnic_dev.c @@ -0,0 +1,1051 @@ +/* + * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include <rte_memzone.h> +#include <rte_memcpy.h> +#include <rte_string_fns.h> + +#include "vnic_dev.h" +#include "vnic_resource.h" +#include "vnic_devcmd.h" +#include "vnic_stats.h" + + +enum vnic_proxy_type { + PROXY_NONE, + PROXY_BY_BDF, + PROXY_BY_INDEX, +}; + +struct vnic_res { + void __iomem *vaddr; + dma_addr_t bus_addr; + unsigned int count; +}; + +struct vnic_intr_coal_timer_info { + u32 mul; + u32 div; + u32 max_usec; +}; + +struct vnic_dev { + void *priv; + struct rte_pci_device *pdev; + struct vnic_res res[RES_TYPE_MAX]; + enum vnic_dev_intr_mode intr_mode; + struct vnic_devcmd __iomem *devcmd; + struct vnic_devcmd_notify *notify; + struct vnic_devcmd_notify notify_copy; + dma_addr_t notify_pa; + u32 notify_sz; + dma_addr_t linkstatus_pa; + struct vnic_stats *stats; + dma_addr_t stats_pa; + struct vnic_devcmd_fw_info *fw_info; + dma_addr_t fw_info_pa; + enum vnic_proxy_type proxy; + u32 proxy_index; + u64 args[VNIC_DEVCMD_NARGS]; + u16 split_hdr_size; + int in_reset; + struct vnic_intr_coal_timer_info intr_coal_timer_info; + void *(*alloc_consistent)(void *priv, size_t size, + dma_addr_t *dma_handle, u8 *name); + void (*free_consistent)(struct rte_pci_device *hwdev, + size_t size, void *vaddr, + dma_addr_t dma_handle); +}; + +#define VNIC_MAX_RES_HDR_SIZE \ + (sizeof(struct vnic_resource_header) + \ + sizeof(struct vnic_resource) * RES_TYPE_MAX) +#define VNIC_RES_STRIDE 128 + +void *vnic_dev_priv(struct vnic_dev *vdev) +{ + return vdev->priv; +} + +void vnic_register_cbacks(struct vnic_dev *vdev, + void *(*alloc_consistent)(void *priv, size_t size, + dma_addr_t *dma_handle, u8 *name), + void (*free_consistent)(struct rte_pci_device *hwdev, + size_t size, void *vaddr, + dma_addr_t dma_handle)) +{ + vdev->alloc_consistent = alloc_consistent; + vdev->free_consistent = free_consistent; +} + +static int vnic_dev_discover_res(struct vnic_dev *vdev, + struct vnic_dev_bar *bar, unsigned int num_bars) +{ + struct vnic_resource_header __iomem *rh; + struct mgmt_barmap_hdr __iomem *mrh; + struct vnic_resource __iomem *r; + u8 type; + + if (num_bars == 0) + return -EINVAL; + + if (bar->len < VNIC_MAX_RES_HDR_SIZE) { + pr_err("vNIC BAR0 res hdr length error\n"); + return -EINVAL; + } + + rh = bar->vaddr; + mrh = bar->vaddr; + if (!rh) { + pr_err("vNIC BAR0 res hdr not mem-mapped\n"); + return -EINVAL; + } + + /* Check for mgmt vnic in addition to normal vnic */ + if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) || + (ioread32(&rh->version) != VNIC_RES_VERSION)) { + if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || + (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { + pr_err("vNIC BAR0 res magic/version error " \ + "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", + VNIC_RES_MAGIC, VNIC_RES_VERSION, + MGMTVNIC_MAGIC, MGMTVNIC_VERSION, + ioread32(&rh->magic), ioread32(&rh->version)); + return -EINVAL; + } + } + + if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC) + r = (struct vnic_resource __iomem *)(mrh + 1); + else + r = (struct vnic_resource __iomem *)(rh + 1); + + + while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { + u8 bar_num = ioread8(&r->bar); + u32 bar_offset = ioread32(&r->bar_offset); + u32 count = ioread32(&r->count); + u32 len; + + r++; + + if (bar_num >= num_bars) + continue; + + if (!bar[bar_num].len || !bar[bar_num].vaddr) + continue; + + switch (type) { + case RES_TYPE_WQ: + case RES_TYPE_RQ: + case RES_TYPE_CQ: + case RES_TYPE_INTR_CTRL: + /* each count is stride bytes long */ + len = count * VNIC_RES_STRIDE; + if (len + bar_offset > bar[bar_num].len) { + pr_err("vNIC BAR0 resource %d " \ + "out-of-bounds, offset 0x%x + " \ + "size 0x%x > bar len 0x%lx\n", + type, bar_offset, + len, + bar[bar_num].len); + return -EINVAL; + } + break; + case RES_TYPE_INTR_PBA_LEGACY: + case RES_TYPE_DEVCMD: + len = count; + break; + default: + continue; + } + + vdev->res[type].count = count; + vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr + + bar_offset; + vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset; + } + + return 0; +} + +unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, + enum vnic_res_type type) +{ + return vdev->res[type].count; +} + +void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, + unsigned int index) +{ + if (!vdev->res[type].vaddr) + return NULL; + + switch (type) { + case RES_TYPE_WQ: + case RES_TYPE_RQ: + case RES_TYPE_CQ: + case RES_TYPE_INTR_CTRL: + return (char __iomem *)vdev->res[type].vaddr + + index * VNIC_RES_STRIDE; + default: + return (char __iomem *)vdev->res[type].vaddr; + } +} + +unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size) +{ + /* The base address of the desc rings must be 512 byte aligned. + * Descriptor count is aligned to groups of 32 descriptors. A + * count of 0 means the maximum 4096 descriptors. Descriptor + * size is aligned to 16 bytes. + */ + + unsigned int count_align = 32; + unsigned int desc_align = 16; + + ring->base_align = 512; + + if (desc_count == 0) + desc_count = 4096; + + ring->desc_count = VNIC_ALIGN(desc_count, count_align); + + ring->desc_size = VNIC_ALIGN(desc_size, desc_align); + + ring->size = ring->desc_count * ring->desc_size; + ring->size_unaligned = ring->size + ring->base_align; + + return ring->size_unaligned; +} + +void vnic_set_hdr_split_size(struct vnic_dev *vdev, u16 size) +{ + vdev->split_hdr_size = size; +} + +u16 vnic_get_hdr_split_size(struct vnic_dev *vdev) +{ + return vdev->split_hdr_size; +} + +void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) +{ + memset(ring->descs, 0, ring->size); +} + +int vnic_dev_alloc_desc_ring(__attribute__((unused)) struct vnic_dev *vdev, + struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size, unsigned int socket_id, + char *z_name) +{ + const struct rte_memzone *rz; + + vnic_dev_desc_ring_size(ring, desc_count, desc_size); + + rz = rte_memzone_reserve_aligned(z_name, + ring->size_unaligned, socket_id, + 0, ENIC_ALIGN); + if (!rz) { + pr_err("Failed to allocate ring (size=%d), aborting\n", + (int)ring->size); + return -ENOMEM; + } + + ring->descs_unaligned = rz->addr; + if (!ring->descs_unaligned) { + pr_err("Failed to map allocated ring (size=%d), aborting\n", + (int)ring->size); + return -ENOMEM; + } + + ring->base_addr_unaligned = (dma_addr_t)rz->phys_addr; + + ring->base_addr = VNIC_ALIGN(ring->base_addr_unaligned, + ring->base_align); + ring->descs = (u8 *)ring->descs_unaligned + + (ring->base_addr - ring->base_addr_unaligned); + + vnic_dev_clear_desc_ring(ring); + + ring->desc_avail = ring->desc_count - 1; + + return 0; +} + +void vnic_dev_free_desc_ring(__attribute__((unused)) struct vnic_dev *vdev, + struct vnic_dev_ring *ring) +{ + if (ring->descs) + ring->descs = NULL; +} + +static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait) +{ + struct vnic_devcmd __iomem *devcmd = vdev->devcmd; + unsigned int i; + int delay; + u32 status; + int err; + + status = ioread32(&devcmd->status); + if (status == 0xFFFFFFFF) { + /* PCI-e target device is gone */ + return -ENODEV; + } + if (status & STAT_BUSY) { + + pr_err("Busy devcmd %d\n", _CMD_N(cmd)); + return -EBUSY; + } + + if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + writeq(vdev->args[i], &devcmd->args[i]); + wmb(); /* complete all writes initiated till now */ + } + + iowrite32(cmd, &devcmd->cmd); + + if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) + return 0; + + for (delay = 0; delay < wait; delay++) { + + udelay(100); + + status = ioread32(&devcmd->status); + if (status == 0xFFFFFFFF) { + /* PCI-e target device is gone */ + return -ENODEV; + } + + if (!(status & STAT_BUSY)) { + if (status & STAT_ERROR) { + err = -(int)readq(&devcmd->args[0]); + if (cmd != CMD_CAPABILITY) + pr_err("Devcmd %d failed " \ + "with error code %d\n", + _CMD_N(cmd), err); + return err; + } + + if (_CMD_DIR(cmd) & _CMD_DIR_READ) { + rmb();/* finish all reads initiated till now */ + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + vdev->args[i] = readq(&devcmd->args[i]); + } + + return 0; + } + } + + pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); + return -ETIMEDOUT; +} + +static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, + enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait) +{ + u32 status; + int err; + + memset(vdev->args, 0, sizeof(vdev->args)); + + vdev->args[0] = vdev->proxy_index; + vdev->args[1] = cmd; + vdev->args[2] = *a0; + vdev->args[3] = *a1; + + err = _vnic_dev_cmd(vdev, proxy_cmd, wait); + if (err) + return err; + + status = (u32)vdev->args[0]; + if (status & STAT_ERROR) { + err = (int)vdev->args[1]; + if (err != ERR_ECMDUNKNOWN || + cmd != CMD_CAPABILITY) + pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); + return err; + } + + *a0 = vdev->args[1]; + *a1 = vdev->args[2]; + + return 0; +} + +static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, + enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) +{ + int err; + + vdev->args[0] = *a0; + vdev->args[1] = *a1; + + err = _vnic_dev_cmd(vdev, cmd, wait); + + *a0 = vdev->args[0]; + *a1 = vdev->args[1]; + + return err; +} + +void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index) +{ + vdev->proxy = PROXY_BY_INDEX; + vdev->proxy_index = index; +} + +void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf) +{ + vdev->proxy = PROXY_BY_BDF; + vdev->proxy_index = bdf; +} + +void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev) +{ + vdev->proxy = PROXY_NONE; + vdev->proxy_index = 0; +} + +int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait) +{ + memset(vdev->args, 0, sizeof(vdev->args)); + + switch (vdev->proxy) { + case PROXY_BY_INDEX: + return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, + a0, a1, wait); + case PROXY_BY_BDF: + return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, + a0, a1, wait); + case PROXY_NONE: + default: + return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait); + } +} + +static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) +{ + u64 a0 = (u32)cmd, a1 = 0; + int wait = 1000; + int err; + + err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); + + return !(err || a0); +} + +int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, + void *value) +{ + u64 a0, a1; + int wait = 1000; + int err; + + a0 = offset; + a1 = size; + + err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); + + switch (size) { + case 1: + *(u8 *)value = (u8)a0; + break; + case 2: + *(u16 *)value = (u16)a0; + break; + case 4: + *(u32 *)value = (u32)a0; + break; + case 8: + *(u64 *)value = a0; + break; + default: + BUG(); + break; + } + + return err; +} + +int vnic_dev_stats_clear(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); +} + +int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) +{ + u64 a0, a1; + int wait = 1000; + static u32 instance; + char name[NAME_MAX]; + + if (!vdev->stats) { + snprintf((char *)name, sizeof(name), + "vnic_stats-%d", instance++); + vdev->stats = vdev->alloc_consistent(vdev->priv, + sizeof(struct vnic_stats), &vdev->stats_pa, (u8 *)name); + if (!vdev->stats) + return -ENOMEM; + } + + *stats = vdev->stats; + a0 = vdev->stats_pa; + a1 = sizeof(struct vnic_stats); + + return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); +} + +int vnic_dev_close(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); +} + +/** Deprecated. @see vnic_dev_enable_wait */ +int vnic_dev_enable(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); +} + +int vnic_dev_enable_wait(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + + if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT)) + return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); + else + return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); +} + +int vnic_dev_disable(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); +} + +int vnic_dev_open(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); +} + +int vnic_dev_open_done(struct vnic_dev *vdev, int *done) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + + *done = 0; + + err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); + if (err) + return err; + + *done = (a0 == 0); + + return 0; +} + +int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait); +} + +int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + + *done = 0; + + err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait); + if (err) + return err; + + *done = (a0 == 0); + + return 0; +} + +int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) +{ + u64 a0, a1 = 0; + int wait = 1000; + int err, i; + + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = 0; + + err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); + if (err) + return err; + + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = ((u8 *)&a0)[i]; + + return 0; +} + +int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, + int broadcast, int promisc, int allmulti) +{ + u64 a0, a1 = 0; + int wait = 1000; + int err; + + a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | + (multicast ? CMD_PFILTER_MULTICAST : 0) | + (broadcast ? CMD_PFILTER_BROADCAST : 0) | + (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | + (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); + + err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); + if (err) + pr_err("Can't set packet filter\n"); + + return err; +} + +int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + int i; + + for (i = 0; i < ETH_ALEN; i++) + ((u8 *)&a0)[i] = addr[i]; + + err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); + if (err) + pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], + err); + + return err; +} + +int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + int i; + + for (i = 0; i < ETH_ALEN; i++) + ((u8 *)&a0)[i] = addr[i]; + + err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); + if (err) + pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], + err); + + return err; +} + +int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, + u8 ig_vlan_rewrite_mode) +{ + u64 a0 = ig_vlan_rewrite_mode, a1 = 0; + int wait = 1000; + + if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE)) + return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, + &a0, &a1, wait); + else + return 0; +} + +int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr) +{ + u64 a0 = intr, a1 = 0; + int wait = 1000; + int err; + + err = vnic_dev_cmd(vdev, CMD_IAR, &a0, &a1, wait); + if (err) + pr_err("Failed to raise INTR[%d], err %d\n", intr, err); + + return err; +} + +void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state) +{ + vdev->in_reset = state; +} + +static inline int vnic_dev_in_reset(struct vnic_dev *vdev) +{ + return vdev->in_reset; +} + +int vnic_dev_notify_setcmd(struct vnic_dev *vdev, + void *notify_addr, dma_addr_t notify_pa, u16 intr) +{ + u64 a0, a1; + int wait = 1000; + int r; + + memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); + if (!vnic_dev_in_reset(vdev)) { + vdev->notify = notify_addr; + vdev->notify_pa = notify_pa; + } + + a0 = (u64)notify_pa; + a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; + a1 += sizeof(struct vnic_devcmd_notify); + + r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); + if (!vnic_dev_in_reset(vdev)) + vdev->notify_sz = (r == 0) ? (u32)a1 : 0; + + return r; +} + +int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) +{ + void *notify_addr = NULL; + dma_addr_t notify_pa = 0; + char name[NAME_MAX]; + static u32 instance; + + if (vdev->notify || vdev->notify_pa) { + return vnic_dev_notify_setcmd(vdev, vdev->notify, + vdev->notify_pa, intr); + } + if (!vnic_dev_in_reset(vdev)) { + snprintf((char *)name, sizeof(name), + "vnic_notify-%d", instance++); + notify_addr = vdev->alloc_consistent(vdev->priv, + sizeof(struct vnic_devcmd_notify), + ¬ify_pa, (u8 *)name); + if (!notify_addr) + return -ENOMEM; + } + + return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); +} + +int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) +{ + u64 a0, a1; + int wait = 1000; + int err; + + a0 = 0; /* paddr = 0 to unset notify buffer */ + a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ + a1 += sizeof(struct vnic_devcmd_notify); + + err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); + if (!vnic_dev_in_reset(vdev)) { + vdev->notify = NULL; + vdev->notify_pa = 0; + vdev->notify_sz = 0; + } + + return err; +} + +int vnic_dev_notify_unset(struct vnic_dev *vdev) +{ + if (vdev->notify && !vnic_dev_in_reset(vdev)) { + vdev->free_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_notify), + vdev->notify, + vdev->notify_pa); + } + + return vnic_dev_notify_unsetcmd(vdev); +} + +static int vnic_dev_notify_ready(struct vnic_dev *vdev) +{ + u32 *words; + unsigned int nwords = vdev->notify_sz / 4; + unsigned int i; + u32 csum; + + if (!vdev->notify || !vdev->notify_sz) + return 0; + + do { + csum = 0; + rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz); + words = (u32 *)&vdev->notify_copy; + for (i = 1; i < nwords; i++) + csum += words[i]; + } while (csum != words[0]); + + return 1; +} + +int vnic_dev_init(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = 1000; + int r = 0; + + if (vnic_dev_capable(vdev, CMD_INIT)) + r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); + else { + vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); + if (a0 & CMD_INITF_DEFAULT_MAC) { + /* Emulate these for old CMD_INIT_v1 which + * didn't pass a0 so no CMD_INITF_*. + */ + vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); + vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); + } + } + return r; +} + +int vnic_dev_deinit(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait); +} + +void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev) +{ + /* Default: hardware intr coal timer is in units of 1.5 usecs */ + vdev->intr_coal_timer_info.mul = 2; + vdev->intr_coal_timer_info.div = 3; + vdev->intr_coal_timer_info.max_usec = + vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff); +} + +int vnic_dev_link_status(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.link_state; +} + +u32 vnic_dev_port_speed(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.port_speed; +} + +void vnic_dev_set_intr_mode(struct vnic_dev *vdev, + enum vnic_dev_intr_mode intr_mode) +{ + vdev->intr_mode = intr_mode; +} + +enum vnic_dev_intr_mode vnic_dev_get_intr_mode( + struct vnic_dev *vdev) +{ + return vdev->intr_mode; +} + +u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec) +{ + return (usec * vdev->intr_coal_timer_info.mul) / + vdev->intr_coal_timer_info.div; +} + +u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles) +{ + return (hw_cycles * vdev->intr_coal_timer_info.div) / + vdev->intr_coal_timer_info.mul; +} + +u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) +{ + return vdev->intr_coal_timer_info.max_usec; +} + +void vnic_dev_unregister(struct vnic_dev *vdev) +{ + if (vdev) { + if (vdev->notify) + vdev->free_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_notify), + vdev->notify, + vdev->notify_pa); + if (vdev->stats) + vdev->free_consistent(vdev->pdev, + sizeof(struct vnic_stats), + vdev->stats, vdev->stats_pa); + if (vdev->fw_info) + vdev->free_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_fw_info), + vdev->fw_info, vdev->fw_info_pa); + kfree(vdev); + } +} + +struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, + void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar, + unsigned int num_bars) +{ + if (!vdev) { + vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC); + if (!vdev) + return NULL; + } + + vdev->priv = priv; + vdev->pdev = pdev; + + if (vnic_dev_discover_res(vdev, bar, num_bars)) + goto err_out; + + vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); + if (!vdev->devcmd) + goto err_out; + + return vdev; + +err_out: + vnic_dev_unregister(vdev); + return NULL; +} + +struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev) +{ + return vdev->pdev; +} + +int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) +{ + u64 a0, a1 = 0; + int wait = 1000; + int i; + + for (i = 0; i < ETH_ALEN; i++) + ((u8 *)&a0)[i] = mac_addr[i]; + + return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait); +} + +/* + * vnic_dev_classifier: Add/Delete classifier entries + * @vdev: vdev of the device + * @cmd: CLSF_ADD for Add filter + * CLSF_DEL for Delete filter + * @entry: In case of ADD filter, the caller passes the RQ number in this + * variable. + * This function stores the filter_id returned by the + * firmware in the same variable before return; + * + * In case of DEL filter, the caller passes the RQ number. Return + * value is irrelevant. + * @data: filter data + */ +int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, + struct filter *data) +{ + u64 a0, a1; + int wait = 1000; + dma_addr_t tlv_pa; + int ret = -EINVAL; + struct filter_tlv *tlv, *tlv_va; + struct filter_action *action; + u64 tlv_size; + static unsigned int unique_id; + char z_name[RTE_MEMZONE_NAMESIZE]; + + if (cmd == CLSF_ADD) { + tlv_size = sizeof(struct filter) + + sizeof(struct filter_action) + + 2*sizeof(struct filter_tlv); + snprintf((char *)z_name, sizeof(z_name), + "vnic_clsf_%d", unique_id++); + tlv_va = vdev->alloc_consistent(vdev->priv, + tlv_size, &tlv_pa, (u8 *)z_name); + if (!tlv_va) + return -ENOMEM; + tlv = tlv_va; + a0 = tlv_pa; + a1 = tlv_size; + memset(tlv, 0, tlv_size); + tlv->type = CLSF_TLV_FILTER; + tlv->length = sizeof(struct filter); + *(struct filter *)&tlv->val = *data; + + tlv = (struct filter_tlv *)((char *)tlv + + sizeof(struct filter_tlv) + + sizeof(struct filter)); + + tlv->type = CLSF_TLV_ACTION; + tlv->length = sizeof(struct filter_action); + action = (struct filter_action *)&tlv->val; + action->type = FILTER_ACTION_RQ_STEERING; + action->u.rq_idx = *entry; + + ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait); + *entry = (u16)a0; + vdev->free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa); + } else if (cmd == CLSF_DEL) { + a0 = *entry; + ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait); + } + + return ret; +} diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h new file mode 100644 index 00000000..113d6acc --- /dev/null +++ b/drivers/net/enic/base/vnic_dev.h @@ -0,0 +1,211 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _VNIC_DEV_H_ +#define _VNIC_DEV_H_ + +#include "enic_compat.h" +#include "rte_pci.h" +#include "vnic_resource.h" +#include "vnic_devcmd.h" + +#ifndef VNIC_PADDR_TARGET +#define VNIC_PADDR_TARGET 0x0000000000000000ULL +#endif + +#ifndef readq +static inline u64 readq(void __iomem *reg) +{ + return ((u64)readl((char *)reg + 0x4UL) << 32) | + (u64)readl(reg); +} + +static inline void writeq(u64 val, void __iomem *reg) +{ + writel(val & 0xffffffff, reg); + writel((u32)(val >> 32), (char *)reg + 0x4UL); +} +#endif + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +enum vnic_dev_intr_mode { + VNIC_DEV_INTR_MODE_UNKNOWN, + VNIC_DEV_INTR_MODE_INTX, + VNIC_DEV_INTR_MODE_MSI, + VNIC_DEV_INTR_MODE_MSIX, +}; + +struct vnic_dev_bar { + void __iomem *vaddr; + dma_addr_t bus_addr; + unsigned long len; +}; + +struct vnic_dev_ring { + void *descs; + size_t size; + dma_addr_t base_addr; + size_t base_align; + void *descs_unaligned; + size_t size_unaligned; + dma_addr_t base_addr_unaligned; + unsigned int desc_size; + unsigned int desc_count; + unsigned int desc_avail; +}; + +struct vnic_dev_iomap_info { + dma_addr_t bus_addr; + unsigned long len; + void __iomem *vaddr; +}; + +struct vnic_dev; +struct vnic_stats; + +void *vnic_dev_priv(struct vnic_dev *vdev); +unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, + enum vnic_res_type type); +void vnic_register_cbacks(struct vnic_dev *vdev, + void *(*alloc_consistent)(void *priv, size_t size, + dma_addr_t *dma_handle, u8 *name), + void (*free_consistent)(struct rte_pci_device *hwdev, + size_t size, void *vaddr, + dma_addr_t dma_handle)); +void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, + unsigned int index); +dma_addr_t vnic_dev_get_res_bus_addr(struct vnic_dev *vdev, + enum vnic_res_type type, unsigned int index); +uint8_t vnic_dev_get_res_bar(struct vnic_dev *vdev, + enum vnic_res_type type); +uint32_t vnic_dev_get_res_offset(struct vnic_dev *vdev, + enum vnic_res_type type, unsigned int index); +unsigned long vnic_dev_get_res_type_len(struct vnic_dev *vdev, + enum vnic_res_type type); +unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size); +void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring); +void vnic_set_hdr_split_size(struct vnic_dev *vdev, u16 size); +u16 vnic_get_hdr_split_size(struct vnic_dev *vdev); +int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size, unsigned int socket_id, + char *z_name); +void vnic_dev_free_desc_ring(struct vnic_dev *vdev, + struct vnic_dev_ring *ring); +int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait); +int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *args, int nargs, int wait); +void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index); +void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf); +void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev); +int vnic_dev_fw_info(struct vnic_dev *vdev, + struct vnic_devcmd_fw_info **fw_info); +int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev); +int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, + void *value); +int vnic_dev_stats_clear(struct vnic_dev *vdev); +int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); +int vnic_dev_hang_notify(struct vnic_dev *vdev); +int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, + int broadcast, int promisc, int allmulti); +int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed, + int multicast, int broadcast, int promisc, int allmulti); +int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); +int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); +int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); +int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr); +int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); +void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state); +int vnic_dev_notify_unset(struct vnic_dev *vdev); +int vnic_dev_notify_setcmd(struct vnic_dev *vdev, + void *notify_addr, dma_addr_t notify_pa, u16 intr); +int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev); +int vnic_dev_link_status(struct vnic_dev *vdev); +u32 vnic_dev_port_speed(struct vnic_dev *vdev); +u32 vnic_dev_msg_lvl(struct vnic_dev *vdev); +u32 vnic_dev_mtu(struct vnic_dev *vdev); +u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev); +u32 vnic_dev_notify_status(struct vnic_dev *vdev); +u32 vnic_dev_uif(struct vnic_dev *vdev); +int vnic_dev_close(struct vnic_dev *vdev); +int vnic_dev_enable(struct vnic_dev *vdev); +int vnic_dev_enable_wait(struct vnic_dev *vdev); +int vnic_dev_disable(struct vnic_dev *vdev); +int vnic_dev_open(struct vnic_dev *vdev, int arg); +int vnic_dev_open_done(struct vnic_dev *vdev, int *done); +int vnic_dev_init(struct vnic_dev *vdev, int arg); +int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err); +int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len); +int vnic_dev_deinit(struct vnic_dev *vdev); +void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev); +int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev); +int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); +int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); +int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg); +int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done); +void vnic_dev_set_intr_mode(struct vnic_dev *vdev, + enum vnic_dev_intr_mode intr_mode); +enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev); +u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec); +u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles); +u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev); +void vnic_dev_unregister(struct vnic_dev *vdev); +int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, + u8 ig_vlan_rewrite_mode); +struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, + void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar, + unsigned int num_bars); +struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev); +int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback); +int vnic_dev_get_size(void); +int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op); +int vnic_dev_perbi(struct vnic_dev *vdev, u64 arg, u32 op); +u32 vnic_dev_perbi_rebuild_cnt(struct vnic_dev *vdev); +int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len); +int vnic_dev_enable2(struct vnic_dev *vdev, int active); +int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status); +int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status); +int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); +int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, + struct filter *data); +#ifdef ENIC_VXLAN +int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev, + u8 overlay, u8 config); +int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, + u16 vxlan_udp_port_number); +#endif +#endif /* _VNIC_DEV_H_ */ diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h new file mode 100644 index 00000000..b3d5a6cc --- /dev/null +++ b/drivers/net/enic/base/vnic_devcmd.h @@ -0,0 +1,773 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _VNIC_DEVCMD_H_ +#define _VNIC_DEVCMD_H_ + +#define _CMD_NBITS 14 +#define _CMD_VTYPEBITS 10 +#define _CMD_FLAGSBITS 6 +#define _CMD_DIRBITS 2 + +#define _CMD_NMASK ((1 << _CMD_NBITS)-1) +#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1) +#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1) +#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1) + +#define _CMD_NSHIFT 0 +#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS) +#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS) +#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS) + +/* + * Direction bits (from host perspective). + */ +#define _CMD_DIR_NONE 0U +#define _CMD_DIR_WRITE 1U +#define _CMD_DIR_READ 2U +#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ) + +/* + * Flag bits. + */ +#define _CMD_FLAGS_NONE 0U +#define _CMD_FLAGS_NOWAIT 1U + +/* + * vNIC type bits. + */ +#define _CMD_VTYPE_NONE 0U +#define _CMD_VTYPE_ENET 1U +#define _CMD_VTYPE_FC 2U +#define _CMD_VTYPE_SCSI 4U +#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI) + +/* + * Used to create cmds.. + */ +#define _CMDCF(dir, flags, vtype, nr) \ + (((dir) << _CMD_DIRSHIFT) | \ + ((flags) << _CMD_FLAGSSHIFT) | \ + ((vtype) << _CMD_VTYPESHIFT) | \ + ((nr) << _CMD_NSHIFT)) +#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr) +#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr) + +/* + * Used to decode cmds.. + */ +#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK) +#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK) +#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK) +#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK) + +enum vnic_devcmd_cmd { + CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0), + + /* + * mcpu fw info in mem: + * in: + * (u64)a0=paddr to struct vnic_devcmd_fw_info + * action: + * Fills in struct vnic_devcmd_fw_info (128 bytes) + * note: + * An old definition of CMD_MCPU_FW_INFO + */ + CMD_MCPU_FW_INFO_OLD = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1), + + /* + * mcpu fw info in mem: + * in: + * (u64)a0=paddr to struct vnic_devcmd_fw_info + * (u16)a1=size of the structure + * out: + * (u16)a1=0 for in:a1 = 0, + * data size actually written for other values. + * action: + * Fills in first 128 bytes of vnic_devcmd_fw_info for in:a1 = 0, + * first in:a1 bytes for 0 < in:a1 <= 132, + * 132 bytes for other values of in:a1. + * note: + * CMD_MCPU_FW_INFO and CMD_MCPU_FW_INFO_OLD have the same enum 1 + * for source compatibility. + */ + CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 1), + + /* dev-specific block member: + * in: (u16)a0=offset,(u8)a1=size + * out: a0=value */ + CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2), + + /* stats clear */ + CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3), + + /* stats dump in mem: (u64)a0=paddr to stats area, + * (u16)a1=sizeof stats area */ + CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4), + + /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */ + CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7), + + /* set Rx packet filter for all: (u32)a0=filters (see CMD_PFILTER_*) */ + CMD_PACKET_FILTER_ALL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7), + + /* hang detection notification */ + CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8), + + /* MAC address in (u48)a0 */ + CMD_GET_MAC_ADDR = _CMDC(_CMD_DIR_READ, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9), + + /* add addr from (u48)a0 */ + CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12), + + /* del addr from (u48)a0 */ + CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13), + + /* add VLAN id in (u16)a0 */ + CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14), + + /* del VLAN id in (u16)a0 */ + CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15), + + /* nic_cfg in (u32)a0 */ + CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), + + /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */ + CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17), + + /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */ + CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18), + + /* initiate softreset */ + CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19), + + /* softreset status: + * out: a0=0 reset complete, a0=1 reset in progress */ + CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20), + + /* set struct vnic_devcmd_notify buffer in mem: + * in: + * (u64)a0=paddr to notify (set paddr=0 to unset) + * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify) + * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr) + * out: + * (u32)a1 = effective size + */ + CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21), + + /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct, + * (u8)a1=PXENV_UNDI_xxx */ + CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22), + + /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */ + CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23), + + /* open status: + * out: a0=0 open complete, a0=1 open in progress */ + CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24), + + /* close vnic */ + CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25), + + /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */ +/***** Replaced by CMD_INIT *****/ + CMD_INIT_v1 = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26), + + /* variant of CMD_INIT, with provisioning info + * (u64)a0=paddr of vnic_devcmd_provinfo + * (u32)a1=sizeof provision info */ + CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27), + + /* enable virtual link */ + CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), + + /* enable virtual link, waiting variant. */ + CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), + + /* disable virtual link */ + CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29), + + /* stats dump sum of all vnic stats on same uplink in mem: + * (u64)a0=paddr + * (u16)a1=sizeof stats area */ + CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30), + + /* init status: + * out: a0=0 init complete, a0=1 init in progress + * if a0=0, a1=errno */ + CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31), + + /* INT13 API: (u64)a0=paddr to vnic_int13_params struct + * (u32)a1=INT13_CMD_xxx */ + CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32), + + /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */ + CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33), + + /* undo initialize of virtual link */ + CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), + + /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */ + CMD_INIT = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 35), + + /* check fw capability of a cmd: + * in: (u32)a0=cmd + * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */ + CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36), + + /* persistent binding info + * in: (u64)a0=paddr of arg + * (u32)a1=CMD_PERBI_XXX */ + CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37), + + /* Interrupt Assert Register functionality + * in: (u16)a0=interrupt number to assert + */ + CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38), + + /* initiate hangreset, like softreset after hang detected */ + CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39), + + /* hangreset status: + * out: a0=0 reset complete, a0=1 reset in progress */ + CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40), + + /* + * Set hw ingress packet vlan rewrite mode: + * in: (u32)a0=new vlan rewrite mode + * out: (u32)a0=old vlan rewrite mode */ + CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41), + + /* + * in: (u16)a0=bdf of target vnic + * (u32)a1=cmd to proxy + * a2-a15=args to cmd in a1 + * out: (u32)a0=status of proxied cmd + * a1-a15=out args of proxied cmd */ + CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42), + + /* + * As for BY_BDF except a0 is index of hvnlink subordinate vnic + * or SR-IOV virtual vnic + */ + CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43), + + /* + * For HPP toggle: + * adapter-info-get + * in: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=size of buffer specified in a0. + * out: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or + * 0 if no VIF-CONFIG-INFO TLV was ever received. */ + CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44), + + /* + * INT13 API: (u64)a0=paddr to vnic_int13_params struct + * (u32)a1=INT13_CMD_xxx + */ + CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45), + + /* + * Set default vlan: + * in: (u16)a0=new default vlan + * (u16)a1=zero for overriding vlan with param a0, + * non-zero for resetting vlan to the default + * out: (u16)a0=old default vlan + */ + CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46), + + /* init_prov_info2: + * Variant of CMD_INIT_PROV_INFO, where it will not try to enable + * the vnic until CMD_ENABLE2 is issued. + * (u64)a0=paddr of vnic_devcmd_provinfo + * (u32)a1=sizeof provision info */ + CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47), + + /* enable2: + * (u32)a0=0 ==> standby + * =CMD_ENABLE2_ACTIVE ==> active + */ + CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48), + + /* + * cmd_status: + * Returns the status of the specified command + * Input: + * a0 = command for which status is being queried. + * Possible values are: + * CMD_SOFT_RESET + * CMD_HANG_RESET + * CMD_OPEN + * CMD_INIT + * CMD_INIT_PROV_INFO + * CMD_DEINIT + * CMD_INIT_PROV_INFO2 + * CMD_ENABLE2 + * Output: + * if status == STAT_ERROR + * a0 = ERR_ENOTSUPPORTED - status for command in a0 is + * not supported + * if status == STAT_NONE + * a0 = status of the devcmd specified in a0 as follows. + * ERR_SUCCESS - command in a0 completed successfully + * ERR_EINPROGRESS - command in a0 is still in progress + */ + CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49), + + /* + * Returns interrupt coalescing timer conversion factors. + * After calling this devcmd, ENIC driver can convert + * interrupt coalescing timer in usec into CPU cycles as follows: + * + * intr_timer_cycles = intr_timer_usec * multiplier / divisor + * + * Interrupt coalescing timer in usecs can be be converted/obtained + * from CPU cycles as follows: + * + * intr_timer_usec = intr_timer_cycles * divisor / multiplier + * + * in: none + * out: (u32)a0 = multiplier + * (u32)a1 = divisor + * (u32)a2 = maximum timer value in usec + */ + CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50), + + /* + * ISCSI DUMP API: + * in: (u64)a0=paddr of the param or param itself + * (u32)a1=ISCSI_CMD_xxx + */ + CMD_ISCSI_DUMP_REQ = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 51), + + /* + * ISCSI DUMP STATUS API: + * in: (u32)a0=cmd tag + * in: (u32)a1=ISCSI_CMD_xxx + * out: (u32)a0=cmd status + */ + CMD_ISCSI_DUMP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 52), + + /* + * Subvnic migration from MQ <--> VF. + * Enable the LIF migration from MQ to VF and vice versa. MQ and VF + * indexes are statically bound at the time of initialization. + * Based on the + * direction of migration, the resources of either MQ or the VF shall + * be attached to the LIF. + * in: (u32)a0=Direction of Migration + * 0=> Migrate to VF + * 1=> Migrate to MQ + * (u32)a1=VF index (MQ index) + */ + CMD_MIGRATE_SUBVNIC = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 53), + + + /* + * Register / Deregister the notification block for MQ subvnics + * in: + * (u64)a0=paddr to notify (set paddr=0 to unset) + * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify) + * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr) + * out: + * (u32)a1 = effective size + */ + CMD_SUBVNIC_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 54), + + /* + * Set the predefined mac address as default + * in: + * (u48)a0=mac addr + */ + CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55), + + /* Update the provisioning info of the given VIF + * (u64)a0=paddr of vnic_devcmd_provinfo + * (u32)a1=sizeof provision info */ + CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56), + + /* + * Initialization for the devcmd2 interface. + * in: (u64) a0=host result buffer physical address + * in: (u16) a1=number of entries in result buffer + */ + CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57), + + /* + * Add a filter. + * in: (u64) a0= filter address + * (u32) a1= size of filter + * out: (u32) a0=filter identifier + */ + CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58), + + /* + * Delete a filter. + * in: (u32) a0=filter identifier + */ + CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59), + + /* + * Enable a Queue Pair in User space NIC + * in: (u32) a0=Queue Pair number + * (u32) a1= command + */ + CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60), + + /* + * Disable a Queue Pair in User space NIC + * in: (u32) a0=Queue Pair number + * (u32) a1= command + */ + CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61), + + /* + * Stats dump Queue Pair in User space NIC + * in: (u32) a0=Queue Pair number + * (u64) a1=host buffer addr for status dump + * (u32) a2=length of the buffer + */ + CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62), + + /* + * Clear stats for Queue Pair in User space NIC + * in: (u32) a0=Queue Pair number + */ + CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63), + + /* + * Enable/Disable overlay offloads on the given vnic + * in: (u8) a0 = OVERLAY_FEATURE_NVGRE : NVGRE + * a0 = OVERLAY_FEATURE_VXLAN : VxLAN + * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable + * a1 = OVERLAY_OFFLOAD_DISABLE : Disable + */ + CMD_OVERLAY_OFFLOAD_ENABLE_DISABLE = + _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72), + + /* + * Configuration of overlay offloads feature on a given vNIC + * in: (u8) a0 = DEVCMD_OVERLAY_NVGRE : NVGRE + * a0 = DEVCMD_OVERLAY_VXLAN : VxLAN + * in: (u8) a1 = VXLAN_PORT_UPDATE : VxLAN + * in: (u16) a2 = unsigned short int port information + */ + CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73), +}; + +/* CMD_ENABLE2 flags */ +#define CMD_ENABLE2_STANDBY 0x0 +#define CMD_ENABLE2_ACTIVE 0x1 + +/* flags for CMD_OPEN */ +#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ + +/* flags for CMD_INIT */ +#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */ + +/* flags for CMD_PACKET_FILTER */ +#define CMD_PFILTER_DIRECTED 0x01 +#define CMD_PFILTER_MULTICAST 0x02 +#define CMD_PFILTER_BROADCAST 0x04 +#define CMD_PFILTER_PROMISCUOUS 0x08 +#define CMD_PFILTER_ALL_MULTICAST 0x10 + +/* Commands for CMD_QP_ENABLE/CM_QP_DISABLE */ +#define CMD_QP_RQWQ 0x0 + +/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */ +#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0 +#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1 +#define IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN 2 +#define IG_VLAN_REWRITE_MODE_PASS_THRU 3 + +enum vnic_devcmd_status { + STAT_NONE = 0, + STAT_BUSY = 1 << 0, /* cmd in progress */ + STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */ +}; + +enum vnic_devcmd_error { + ERR_SUCCESS = 0, + ERR_EINVAL = 1, + ERR_EFAULT = 2, + ERR_EPERM = 3, + ERR_EBUSY = 4, + ERR_ECMDUNKNOWN = 5, + ERR_EBADSTATE = 6, + ERR_ENOMEM = 7, + ERR_ETIMEDOUT = 8, + ERR_ELINKDOWN = 9, + ERR_EMAXRES = 10, + ERR_ENOTSUPPORTED = 11, + ERR_EINPROGRESS = 12, + ERR_MAX +}; + +/* + * note: hw_version and asic_rev refer to the same thing, + * but have different formats. hw_version is + * a 32-byte string (e.g. "A2") and asic_rev is + * a 16-bit integer (e.g. 0xA2). + */ +struct vnic_devcmd_fw_info { + char fw_version[32]; + char fw_build[32]; + char hw_version[32]; + char hw_serial_number[32]; + u16 asic_type; + u16 asic_rev; +}; + +enum fwinfo_asic_type { + FWINFO_ASIC_TYPE_UNKNOWN, + FWINFO_ASIC_TYPE_PALO, + FWINFO_ASIC_TYPE_SERENO, +}; + + +struct vnic_devcmd_notify { + u32 csum; /* checksum over following words */ + + u32 link_state; /* link up == 1 */ + u32 port_speed; /* effective port speed (rate limit) */ + u32 mtu; /* MTU */ + u32 msglvl; /* requested driver msg lvl */ + u32 uif; /* uplink interface */ + u32 status; /* status bits (see VNIC_STF_*) */ + u32 error; /* error code (see ERR_*) for first ERR */ + u32 link_down_cnt; /* running count of link down transitions */ + u32 perbi_rebuild_cnt; /* running count of perbi rebuilds */ +}; +#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */ +#define VNIC_STF_STD_PAUSE 0x0002 /* standard link-level pause on */ +#define VNIC_STF_PFC_PAUSE 0x0004 /* priority flow control pause on */ +/* all supported status flags */ +#define VNIC_STF_ALL (VNIC_STF_FATAL_ERR |\ + VNIC_STF_STD_PAUSE |\ + VNIC_STF_PFC_PAUSE |\ + 0) + +struct vnic_devcmd_provinfo { + u8 oui[3]; + u8 type; + u8 data[0]; +}; + +/* + * These are used in flags field of different filters to denote + * valid fields used. + */ +#define FILTER_FIELD_VALID(fld) (1 << (fld - 1)) + +#define FILTER_FIELDS_USNIC (FILTER_FIELD_VALID(1) | \ + FILTER_FIELD_VALID(2) | \ + FILTER_FIELD_VALID(3) | \ + FILTER_FIELD_VALID(4)) + +#define FILTER_FIELDS_IPV4_5TUPLE (FILTER_FIELD_VALID(1) | \ + FILTER_FIELD_VALID(2) | \ + FILTER_FIELD_VALID(3) | \ + FILTER_FIELD_VALID(4) | \ + FILTER_FIELD_VALID(5)) + +#define FILTER_FIELDS_MAC_VLAN (FILTER_FIELD_VALID(1) | \ + FILTER_FIELD_VALID(2)) + +#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1) +#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2) +#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3) +#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4) + +struct filter_usnic_id { + u32 flags; + u16 vlan; + u16 ethtype; + u8 proto_version; + u32 usnic_id; +} __attribute__((packed)); + +#define FILTER_FIELD_5TUP_PROTO FILTER_FIELD_VALID(1) +#define FILTER_FIELD_5TUP_SRC_AD FILTER_FIELD_VALID(2) +#define FILTER_FIELD_5TUP_DST_AD FILTER_FIELD_VALID(3) +#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4) +#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5) + +/* Enums for the protocol field. */ +enum protocol_e { + PROTO_UDP = 0, + PROTO_TCP = 1, +}; + +struct filter_ipv4_5tuple { + u32 flags; + u32 protocol; + u32 src_addr; + u32 dst_addr; + u16 src_port; + u16 dst_port; +} __attribute__((packed)); + +#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1) +#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2) + +struct filter_mac_vlan { + u32 flags; + u16 vlan; + u8 mac_addr[6]; +} __attribute__((packed)); + +/* Specifies the filter_action type. */ +enum { + FILTER_ACTION_RQ_STEERING = 0, + FILTER_ACTION_MAX +}; + +struct filter_action { + u32 type; + union { + u32 rq_idx; + } u; +} __attribute__((packed)); + +/* Specifies the filter type. */ +enum filter_type { + FILTER_USNIC_ID = 0, + FILTER_IPV4_5TUPLE = 1, + FILTER_MAC_VLAN = 2, + FILTER_MAX +}; + +struct filter { + u32 type; + union { + struct filter_usnic_id usnic; + struct filter_ipv4_5tuple ipv4; + struct filter_mac_vlan mac_vlan; + } u; +} __attribute__((packed)); + +enum { + CLSF_TLV_FILTER = 0, + CLSF_TLV_ACTION = 1, +}; + +#define FILTER_MAX_BUF_SIZE 100 /* Maximum size of buffer to CMD_ADD_FILTER */ + +struct filter_tlv { + uint32_t type; + uint32_t length; + uint32_t val[0]; +}; + +enum { + CLSF_ADD = 0, + CLSF_DEL = 1, +}; + +/* + * Writing cmd register causes STAT_BUSY to get set in status register. + * When cmd completes, STAT_BUSY will be cleared. + * + * If cmd completed successfully STAT_ERROR will be clear + * and args registers contain cmd-specific results. + * + * If cmd error, STAT_ERROR will be set and args[0] contains error code. + * + * status register is read-only. While STAT_BUSY is set, + * all other register contents are read-only. + */ + +/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */ +#define VNIC_DEVCMD_NARGS 15 +struct vnic_devcmd { + u32 status; /* RO */ + u32 cmd; /* RW */ + u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */ +}; + +/* + * Version 2 of the interface. + * + * Some things are carried over, notably the vnic_devcmd_cmd enum. + */ + +/* + * Flags for vnic_devcmd2.flags + */ + +#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */ + +#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS +struct vnic_devcmd2 { + u16 pad; + u16 flags; + u32 cmd; /* same command #defines as original */ + u64 args[VNIC_DEVCMD2_NARGS]; +}; + +#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS +struct devcmd2_result { + u64 results[VNIC_DEVCMD2_NRESULTS]; + u32 pad; + u16 completed_index; /* into copy WQ */ + u8 error; /* same error codes as original */ + u8 color; /* 0 or 1 as with completion queues */ +}; + +#define DEVCMD2_RING_SIZE 32 +#define DEVCMD2_DESC_SIZE 128 + +#define DEVCMD2_RESULTS_SIZE_MAX ((1 << 16) - 1) + +/* Overlay related definitions */ + +/* + * This enum lists the flag associated with each of the overlay features + */ +typedef enum { + OVERLAY_FEATURE_NVGRE = 1, + OVERLAY_FEATURE_VXLAN, + OVERLAY_FEATURE_MAX, +} overlay_feature_t; + +#define OVERLAY_OFFLOAD_ENABLE 0 +#define OVERLAY_OFFLOAD_DISABLE 1 + +#define OVERLAY_CFG_VXLAN_PORT_UPDATE 0 +#endif /* _VNIC_DEVCMD_H_ */ diff --git a/drivers/net/enic/base/vnic_enet.h b/drivers/net/enic/base/vnic_enet.h new file mode 100644 index 00000000..cc34998f --- /dev/null +++ b/drivers/net/enic/base/vnic_enet.h @@ -0,0 +1,77 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _VNIC_ENIC_H_ +#define _VNIC_ENIC_H_ + +/* Device-specific region: enet configuration */ +struct vnic_enet_config { + u32 flags; + u32 wq_desc_count; + u32 rq_desc_count; + u16 mtu; + u16 intr_timer_deprecated; + u8 intr_timer_type; + u8 intr_mode; + char devname[16]; + u32 intr_timer_usec; + u16 loop_tag; + u16 vf_rq_count; + u16 num_arfs; + u64 mem_paddr; +}; + +#define VENETF_TSO 0x1 /* TSO enabled */ +#define VENETF_LRO 0x2 /* LRO enabled */ +#define VENETF_RXCSUM 0x4 /* RX csum enabled */ +#define VENETF_TXCSUM 0x8 /* TX csum enabled */ +#define VENETF_RSS 0x10 /* RSS enabled */ +#define VENETF_RSSHASH_IPV4 0x20 /* Hash on IPv4 fields */ +#define VENETF_RSSHASH_TCPIPV4 0x40 /* Hash on TCP + IPv4 fields */ +#define VENETF_RSSHASH_IPV6 0x80 /* Hash on IPv6 fields */ +#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */ +#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */ +#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */ +#define VENETF_LOOP 0x800 /* Loopback enabled */ +#define VENETF_VMQ 0x4000 /* using VMQ flag for VMware NETQ */ +#define VENETF_VXLAN 0x10000 /* VxLAN offload */ +#define VENETF_NVGRE 0x20000 /* NVGRE offload */ +#define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */ +#define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */ + +#define VENET_INTR_MODE_ANY 0 /* Try MSI-X, then MSI, then INTx */ +#define VENET_INTR_MODE_MSI 1 /* Try MSI then INTx */ +#define VENET_INTR_MODE_INTX 2 /* Try INTx only */ + +#endif /* _VNIC_ENIC_H_ */ diff --git a/drivers/net/enic/base/vnic_intr.c b/drivers/net/enic/base/vnic_intr.c new file mode 100644 index 00000000..04bb4261 --- /dev/null +++ b/drivers/net/enic/base/vnic_intr.c @@ -0,0 +1,77 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "vnic_dev.h" +#include "vnic_intr.h" + +void vnic_intr_free(struct vnic_intr *intr) +{ + intr->ctrl = NULL; +} + +int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, + unsigned int index) +{ + intr->index = index; + intr->vdev = vdev; + + intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); + if (!intr->ctrl) { + pr_err("Failed to hook INTR[%d].ctrl resource\n", index); + return -EINVAL; + } + + return 0; +} + +void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer, + unsigned int coalescing_type, unsigned int mask_on_assertion) +{ + vnic_intr_coalescing_timer_set(intr, coalescing_timer); + iowrite32(coalescing_type, &intr->ctrl->coalescing_type); + iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion); + iowrite32(0, &intr->ctrl->int_credits); +} + +void vnic_intr_coalescing_timer_set(struct vnic_intr *intr, + u32 coalescing_timer) +{ + iowrite32(vnic_dev_intr_coal_timer_usec_to_hw(intr->vdev, + coalescing_timer), &intr->ctrl->coalescing_timer); +} + +void vnic_intr_clean(struct vnic_intr *intr) +{ + iowrite32(0, &intr->ctrl->int_credits); +} diff --git a/drivers/net/enic/base/vnic_intr.h b/drivers/net/enic/base/vnic_intr.h new file mode 100644 index 00000000..da089bcf --- /dev/null +++ b/drivers/net/enic/base/vnic_intr.h @@ -0,0 +1,125 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _VNIC_INTR_H_ +#define _VNIC_INTR_H_ + + +#include "vnic_dev.h" + +#define VNIC_INTR_TIMER_TYPE_ABS 0 +#define VNIC_INTR_TIMER_TYPE_QUIET 1 + +/* Interrupt control */ +struct vnic_intr_ctrl { + u32 coalescing_timer; /* 0x00 */ + u32 pad0; + u32 coalescing_value; /* 0x08 */ + u32 pad1; + u32 coalescing_type; /* 0x10 */ + u32 pad2; + u32 mask_on_assertion; /* 0x18 */ + u32 pad3; + u32 mask; /* 0x20 */ + u32 pad4; + u32 int_credits; /* 0x28 */ + u32 pad5; + u32 int_credit_return; /* 0x30 */ + u32 pad6; +}; + +struct vnic_intr { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */ +}; + +static inline void vnic_intr_unmask(struct vnic_intr *intr) +{ + iowrite32(0, &intr->ctrl->mask); +} + +static inline void vnic_intr_mask(struct vnic_intr *intr) +{ + iowrite32(1, &intr->ctrl->mask); +} + +static inline int vnic_intr_masked(struct vnic_intr *intr) +{ + return ioread32(&intr->ctrl->mask); +} + +static inline void vnic_intr_return_credits(struct vnic_intr *intr, + unsigned int credits, int unmask, int reset_timer) +{ +#define VNIC_INTR_UNMASK_SHIFT 16 +#define VNIC_INTR_RESET_TIMER_SHIFT 17 + + u32 int_credit_return = (credits & 0xffff) | + (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) | + (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0); + + iowrite32(int_credit_return, &intr->ctrl->int_credit_return); +} + +static inline unsigned int vnic_intr_credits(struct vnic_intr *intr) +{ + return ioread32(&intr->ctrl->int_credits); +} + +static inline void vnic_intr_return_all_credits(struct vnic_intr *intr) +{ + unsigned int credits = vnic_intr_credits(intr); + int unmask = 1; + int reset_timer = 1; + + vnic_intr_return_credits(intr, credits, unmask, reset_timer); +} + +static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba) +{ + /* read PBA without clearing */ + return ioread32(legacy_pba); +} + +void vnic_intr_free(struct vnic_intr *intr); +int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, + unsigned int index); +void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer, + unsigned int coalescing_type, unsigned int mask_on_assertion); +void vnic_intr_coalescing_timer_set(struct vnic_intr *intr, + u32 coalescing_timer); +void vnic_intr_clean(struct vnic_intr *intr); + +#endif /* _VNIC_INTR_H_ */ diff --git a/drivers/net/enic/base/vnic_nic.h b/drivers/net/enic/base/vnic_nic.h new file mode 100644 index 00000000..88907c00 --- /dev/null +++ b/drivers/net/enic/base/vnic_nic.h @@ -0,0 +1,87 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _VNIC_NIC_H_ +#define _VNIC_NIC_H_ + +#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL +#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0 +#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8) +#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL +#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8 +#define NIC_CFG_RSS_HASH_BITS (7UL << 16) +#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL +#define NIC_CFG_RSS_HASH_BITS_SHIFT 16 +#define NIC_CFG_RSS_BASE_CPU (7UL << 19) +#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL +#define NIC_CFG_RSS_BASE_CPU_SHIFT 19 +#define NIC_CFG_RSS_ENABLE (1UL << 22) +#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL +#define NIC_CFG_RSS_ENABLE_SHIFT 22 +#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23) +#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL +#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23 +#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24) +#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL +#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24 + +#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1) +#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2) +#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3) +#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4) +#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5) +#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6) + +static inline void vnic_set_nic_cfg(u32 *nic_cfg, + u8 rss_default_cpu, u8 rss_hash_type, + u8 rss_hash_bits, u8 rss_base_cpu, + u8 rss_enable, u8 tso_ipid_split_en, + u8 ig_vlan_strip_en) +{ + *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) | + ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD) + << NIC_CFG_RSS_HASH_TYPE_SHIFT) | + ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD) + << NIC_CFG_RSS_HASH_BITS_SHIFT) | + ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD) + << NIC_CFG_RSS_BASE_CPU_SHIFT) | + ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD) + << NIC_CFG_RSS_ENABLE_SHIFT) | + ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD) + << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) | + ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD) + << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT); +} + +#endif /* _VNIC_NIC_H_ */ diff --git a/drivers/net/enic/base/vnic_resource.h b/drivers/net/enic/base/vnic_resource.h new file mode 100644 index 00000000..b7a9b612 --- /dev/null +++ b/drivers/net/enic/base/vnic_resource.h @@ -0,0 +1,96 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _VNIC_RESOURCE_H_ +#define _VNIC_RESOURCE_H_ + +#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */ +#define VNIC_RES_VERSION 0x00000000L +#define MGMTVNIC_MAGIC 0x544d474dL /* 'MGMT' */ +#define MGMTVNIC_VERSION 0x00000000L + +/* The MAC address assigned to the CFG vNIC is fixed. */ +#define MGMTVNIC_MAC { 0x02, 0x00, 0x54, 0x4d, 0x47, 0x4d } + +/* vNIC resource types */ +enum vnic_res_type { + RES_TYPE_EOL, /* End-of-list */ + RES_TYPE_WQ, /* Work queues */ + RES_TYPE_RQ, /* Receive queues */ + RES_TYPE_CQ, /* Completion queues */ + RES_TYPE_MEM, /* Window to dev memory */ + RES_TYPE_NIC_CFG, /* Enet NIC config registers */ + RES_TYPE_RSS_KEY, /* Enet RSS secret key */ + RES_TYPE_RSS_CPU, /* Enet RSS indirection table */ + RES_TYPE_TX_STATS, /* Netblock Tx statistic regs */ + RES_TYPE_RX_STATS, /* Netblock Rx statistic regs */ + RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */ + RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */ + RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */ + RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */ + RES_TYPE_DEBUG, /* Debug-only info */ + RES_TYPE_DEV, /* Device-specific region */ + RES_TYPE_DEVCMD, /* Device command region */ + RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */ + RES_TYPE_SUBVNIC, /* subvnic resource type */ + RES_TYPE_MQ_WQ, /* MQ Work queues */ + RES_TYPE_MQ_RQ, /* MQ Receive queues */ + RES_TYPE_MQ_CQ, /* MQ Completion queues */ + RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */ + RES_TYPE_DEVCMD2, /* Device control region */ + RES_TYPE_MAX, /* Count of resource types */ +}; + +struct vnic_resource_header { + u32 magic; + u32 version; +}; + +struct mgmt_barmap_hdr { + u32 magic; /* magic number */ + u32 version; /* header format version */ + u16 lif; /* loopback lif for mgmt frames */ + u16 pci_slot; /* installed pci slot */ + char serial[16]; /* card serial number */ +}; + +struct vnic_resource { + u8 type; + u8 bar; + u8 pad[2]; + u32 bar_offset; + u32 count; +}; + +#endif /* _VNIC_RESOURCE_H_ */ diff --git a/drivers/net/enic/base/vnic_rq.c b/drivers/net/enic/base/vnic_rq.c new file mode 100644 index 00000000..cb62c5e5 --- /dev/null +++ b/drivers/net/enic/base/vnic_rq.c @@ -0,0 +1,175 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "vnic_dev.h" +#include "vnic_rq.h" + +void vnic_rq_free(struct vnic_rq *rq) +{ + struct vnic_dev *vdev; + + vdev = rq->vdev; + + vnic_dev_free_desc_ring(vdev, &rq->ring); + + rq->ctrl = NULL; +} + +int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, + unsigned int desc_count, unsigned int desc_size) +{ + int rc; + char res_name[NAME_MAX]; + static int instance; + + rq->index = index; + rq->vdev = vdev; + + rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); + if (!rq->ctrl) { + pr_err("Failed to hook RQ[%d] resource\n", index); + return -EINVAL; + } + + vnic_rq_disable(rq); + + snprintf(res_name, sizeof(res_name), "%d-rq-%d", instance++, index); + rc = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size, + rq->socket_id, res_name); + return rc; +} + +void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u64 paddr; + unsigned int count = rq->ring.desc_count; + + paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &rq->ctrl->ring_base); + iowrite32(count, &rq->ctrl->ring_size); + iowrite32(cq_index, &rq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); + iowrite32(0, &rq->ctrl->dropped_packet_count); + iowrite32(0, &rq->ctrl->error_status); + iowrite32(fetch_index, &rq->ctrl->fetch_index); + iowrite32(posted_index, &rq->ctrl->posted_index); + +} + +void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u32 fetch_index = 0; + /* Use current fetch_index as the ring starting point */ + fetch_index = ioread32(&rq->ctrl->fetch_index); + + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + /* Hardware surprise removal: reset fetch_index */ + fetch_index = 0; + } + + vnic_rq_init_start(rq, cq_index, + fetch_index, fetch_index, + error_interrupt_enable, + error_interrupt_offset); + rq->rxst_idx = 0; + rq->tot_pkts = 0; +} + +void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error) +{ + iowrite32(error, &rq->ctrl->error_status); +} + +unsigned int vnic_rq_error_status(struct vnic_rq *rq) +{ + return ioread32(&rq->ctrl->error_status); +} + +void vnic_rq_enable(struct vnic_rq *rq) +{ + iowrite32(1, &rq->ctrl->enable); +} + +int vnic_rq_disable(struct vnic_rq *rq) +{ + unsigned int wait; + + iowrite32(0, &rq->ctrl->enable); + + /* Wait for HW to ACK disable request */ + for (wait = 0; wait < 1000; wait++) { + if (!(ioread32(&rq->ctrl->running))) + return 0; + udelay(10); + } + + pr_err("Failed to disable RQ[%d]\n", rq->index); + + return -ETIMEDOUT; +} + +void vnic_rq_clean(struct vnic_rq *rq, + void (*buf_clean)(struct rte_mbuf **buf)) +{ + struct rte_mbuf **buf; + u32 fetch_index, i; + unsigned int count = rq->ring.desc_count; + + buf = &rq->mbuf_ring[0]; + + for (i = 0; i < count; i++) { + (*buf_clean)(buf); + buf++; + } + rq->ring.desc_avail = count - 1; + rq->rx_nb_hold = 0; + + /* Use current fetch_index as the ring starting point */ + fetch_index = ioread32(&rq->ctrl->fetch_index); + + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + /* Hardware surprise removal: reset fetch_index */ + fetch_index = 0; + } + + iowrite32(fetch_index, &rq->ctrl->posted_index); + + vnic_dev_clear_desc_ring(&rq->ring); +} diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h new file mode 100644 index 00000000..e083ccc2 --- /dev/null +++ b/drivers/net/enic/base/vnic_rq.h @@ -0,0 +1,152 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _VNIC_RQ_H_ +#define _VNIC_RQ_H_ + + +#include "vnic_dev.h" +#include "vnic_cq.h" + +/* Receive queue control */ +struct vnic_rq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 posted_index; /* 0x10 */ + u32 pad1; + u32 cq_index; /* 0x18 */ + u32 pad2; + u32 enable; /* 0x20 */ + u32 pad3; + u32 running; /* 0x28 */ + u32 pad4; + u32 fetch_index; /* 0x30 */ + u32 pad5; + u32 error_interrupt_enable; /* 0x38 */ + u32 pad6; + u32 error_interrupt_offset; /* 0x40 */ + u32 pad7; + u32 error_status; /* 0x48 */ + u32 pad8; + u32 dropped_packet_count; /* 0x50 */ + u32 pad9; + u32 dropped_packet_count_rc; /* 0x58 */ + u32 pad10; +}; + +struct vnic_rq { + unsigned int index; + unsigned int posted_index; + struct vnic_dev *vdev; + struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */ + unsigned int mbuf_next_idx; /* next mb to consume */ + void *os_buf_head; + unsigned int pkts_outstanding; + uint16_t rx_nb_hold; + uint16_t rx_free_thresh; + unsigned int socket_id; + struct rte_mempool *mp; + uint16_t rxst_idx; + uint32_t tot_pkts; +}; + +static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) +{ + /* how many does SW own? */ + return rq->ring.desc_avail; +} + +static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) +{ + /* how many does HW own? */ + return rq->ring.desc_count - rq->ring.desc_avail - 1; +} + + + +enum desc_return_options { + VNIC_RQ_RETURN_DESC, + VNIC_RQ_DEFER_RETURN_DESC, +}; + +static inline int vnic_rq_fill(struct vnic_rq *rq, + int (*buf_fill)(struct vnic_rq *rq)) +{ + int err; + + while (vnic_rq_desc_avail(rq) > 0) { + + err = (*buf_fill)(rq); + if (err) + return err; + } + + return 0; +} + +static inline int vnic_rq_fill_count(struct vnic_rq *rq, + int (*buf_fill)(struct vnic_rq *rq), unsigned int count) +{ + int err; + + while ((vnic_rq_desc_avail(rq) > 0) && (count--)) { + + err = (*buf_fill)(rq); + if (err) + return err; + } + + return 0; +} + +void vnic_rq_free(struct vnic_rq *rq); +int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, + unsigned int desc_count, unsigned int desc_size); +void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error); +unsigned int vnic_rq_error_status(struct vnic_rq *rq); +void vnic_rq_enable(struct vnic_rq *rq); +int vnic_rq_disable(struct vnic_rq *rq); +void vnic_rq_clean(struct vnic_rq *rq, + void (*buf_clean)(struct rte_mbuf **buf)); +#endif /* _VNIC_RQ_H_ */ diff --git a/drivers/net/enic/base/vnic_rss.c b/drivers/net/enic/base/vnic_rss.c new file mode 100644 index 00000000..1cf055b0 --- /dev/null +++ b/drivers/net/enic/base/vnic_rss.c @@ -0,0 +1,84 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "enic_compat.h" +#include "vnic_rss.h" + +void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key) +{ + u32 i; + u32 *p; + u16 *q; + + for (i = 0; i < 4; ++i) { + p = (u32 *)(key + (10 * i)); + iowrite32(*p++, &rss_key->key[i].b[0]); + iowrite32(*p++, &rss_key->key[i].b[4]); + q = (u16 *)p; + iowrite32(*q, &rss_key->key[i].b[8]); + } +} + +void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu) +{ + u32 i; + u32 *p = (u32 *)cpu; + + for (i = 0; i < 32; ++i) + iowrite32(*p++, &rss_cpu->cpu[i].b[0]); +} + +void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key) +{ + u32 i; + u32 *p; + u16 *q; + + for (i = 0; i < 4; ++i) { + p = (u32 *)(key + (10 * i)); + *p++ = ioread32(&rss_key->key[i].b[0]); + *p++ = ioread32(&rss_key->key[i].b[4]); + q = (u16 *)p; + *q = (u16)ioread32(&rss_key->key[i].b[8]); + } +} + +void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu) +{ + u32 i; + u32 *p = (u32 *)cpu; + + for (i = 0; i < 32; ++i) + *p++ = ioread32(&rss_cpu->cpu[i].b[0]); +} diff --git a/drivers/net/enic/base/vnic_rss.h b/drivers/net/enic/base/vnic_rss.h new file mode 100644 index 00000000..ebb18b59 --- /dev/null +++ b/drivers/net/enic/base/vnic_rss.h @@ -0,0 +1,60 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VNIC_RSS_H_ +#define _VNIC_RSS_H_ + +/* RSS key array */ +union vnic_rss_key { + struct { + u8 b[10]; + u8 b_pad[6]; + } key[4]; + u64 raw[8]; +}; + +/* RSS cpu array */ +union vnic_rss_cpu { + struct { + u8 b[4]; + u8 b_pad[4]; + } cpu[32]; + u64 raw[32]; +}; + +void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key); +void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu); +void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key); +void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu); + +#endif /* _VNIC_RSS_H_ */ diff --git a/drivers/net/enic/base/vnic_stats.h b/drivers/net/enic/base/vnic_stats.h new file mode 100644 index 00000000..0c779d8a --- /dev/null +++ b/drivers/net/enic/base/vnic_stats.h @@ -0,0 +1,85 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _VNIC_STATS_H_ +#define _VNIC_STATS_H_ + +/* Tx statistics */ +struct vnic_tx_stats { + u64 tx_frames_ok; + u64 tx_unicast_frames_ok; + u64 tx_multicast_frames_ok; + u64 tx_broadcast_frames_ok; + u64 tx_bytes_ok; + u64 tx_unicast_bytes_ok; + u64 tx_multicast_bytes_ok; + u64 tx_broadcast_bytes_ok; + u64 tx_drops; + u64 tx_errors; + u64 tx_tso; + u64 rsvd[16]; +}; + +/* Rx statistics */ +struct vnic_rx_stats { + u64 rx_frames_ok; + u64 rx_frames_total; + u64 rx_unicast_frames_ok; + u64 rx_multicast_frames_ok; + u64 rx_broadcast_frames_ok; + u64 rx_bytes_ok; + u64 rx_unicast_bytes_ok; + u64 rx_multicast_bytes_ok; + u64 rx_broadcast_bytes_ok; + u64 rx_drop; + u64 rx_no_bufs; + u64 rx_errors; + u64 rx_rss; + u64 rx_crc_errors; + u64 rx_frames_64; + u64 rx_frames_127; + u64 rx_frames_255; + u64 rx_frames_511; + u64 rx_frames_1023; + u64 rx_frames_1518; + u64 rx_frames_to_max; + u64 rsvd[16]; +}; + +struct vnic_stats { + struct vnic_tx_stats tx; + struct vnic_rx_stats rx; +}; + +#endif /* _VNIC_STATS_H_ */ diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c new file mode 100644 index 00000000..a3ef4170 --- /dev/null +++ b/drivers/net/enic/base/vnic_wq.c @@ -0,0 +1,244 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "vnic_dev.h" +#include "vnic_wq.h" + +static inline +int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int index, enum vnic_res_type res_type) +{ + wq->ctrl = vnic_dev_get_res(vdev, res_type, index); + if (!wq->ctrl) + return -EINVAL; + return 0; +} + +static inline +int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size) +{ + char res_name[NAME_MAX]; + static int instance; + + snprintf(res_name, sizeof(res_name), "%d-wq-%d", instance++, wq->index); + return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size, + wq->socket_id, res_name); +} + +static int vnic_wq_alloc_bufs(struct vnic_wq *wq) +{ + struct vnic_wq_buf *buf; + unsigned int i, j, count = wq->ring.desc_count; + unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); + + for (i = 0; i < blks; i++) { + wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); + if (!wq->bufs[i]) + return -ENOMEM; + } + + for (i = 0; i < blks; i++) { + buf = wq->bufs[i]; + for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) { + buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j; + buf->desc = (u8 *)wq->ring.descs + + wq->ring.desc_size * buf->index; + if (buf->index + 1 == count) { + buf->next = wq->bufs[0]; + break; + } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) { + buf->next = wq->bufs[i + 1]; + } else { + buf->next = buf + 1; + buf++; + } + } + } + + wq->to_use = wq->to_clean = wq->bufs[0]; + + return 0; +} + +void vnic_wq_free(struct vnic_wq *wq) +{ + struct vnic_dev *vdev; + unsigned int i; + + vdev = wq->vdev; + + vnic_dev_free_desc_ring(vdev, &wq->ring); + + for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { + if (wq->bufs[i]) { + kfree(wq->bufs[i]); + wq->bufs[i] = NULL; + } + } + + wq->ctrl = NULL; +} + +int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count, + unsigned int desc_size) +{ + int mem_size = 0; + + mem_size += vnic_dev_desc_ring_size(&wq->ring, desc_count, desc_size); + + mem_size += VNIC_WQ_BUF_BLKS_NEEDED(wq->ring.desc_count) * + VNIC_WQ_BUF_BLK_SZ(wq->ring.desc_count); + + return mem_size; +} + + +int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = index; + wq->vdev = vdev; + + err = vnic_wq_get_ctrl(vdev, wq, index, RES_TYPE_WQ); + if (err) { + pr_err("Failed to hook WQ[%d] resource, err %d\n", index, err); + return err; + } + + vnic_wq_disable(wq); + + err = vnic_wq_alloc_ring(vdev, wq, desc_count, desc_size); + if (err) + return err; + + err = vnic_wq_alloc_bufs(wq); + if (err) { + vnic_wq_free(wq); + return err; + } + + return 0; +} + +void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u64 paddr; + unsigned int count = wq->ring.desc_count; + + paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &wq->ctrl->ring_base); + iowrite32(count, &wq->ctrl->ring_size); + iowrite32(fetch_index, &wq->ctrl->fetch_index); + iowrite32(posted_index, &wq->ctrl->posted_index); + iowrite32(cq_index, &wq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); + iowrite32(0, &wq->ctrl->error_status); + + wq->to_use = wq->to_clean = + &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] + [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)]; +} + +void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + vnic_wq_init_start(wq, cq_index, 0, 0, + error_interrupt_enable, + error_interrupt_offset); +} + +void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error) +{ + iowrite32(error, &wq->ctrl->error_status); +} + +unsigned int vnic_wq_error_status(struct vnic_wq *wq) +{ + return ioread32(&wq->ctrl->error_status); +} + +void vnic_wq_enable(struct vnic_wq *wq) +{ + iowrite32(1, &wq->ctrl->enable); +} + +int vnic_wq_disable(struct vnic_wq *wq) +{ + unsigned int wait; + + iowrite32(0, &wq->ctrl->enable); + + /* Wait for HW to ACK disable request */ + for (wait = 0; wait < 1000; wait++) { + if (!(ioread32(&wq->ctrl->running))) + return 0; + udelay(10); + } + + pr_err("Failed to disable WQ[%d]\n", wq->index); + + return -ETIMEDOUT; +} + +void vnic_wq_clean(struct vnic_wq *wq, + void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) +{ + struct vnic_wq_buf *buf; + + buf = wq->to_clean; + + while (vnic_wq_desc_used(wq) > 0) { + + (*buf_clean)(wq, buf); + + buf = wq->to_clean = buf->next; + wq->ring.desc_avail++; + } + + wq->to_use = wq->to_clean = wq->bufs[0]; + + iowrite32(0, &wq->ctrl->fetch_index); + iowrite32(0, &wq->ctrl->posted_index); + iowrite32(0, &wq->ctrl->error_status); + + vnic_dev_clear_desc_ring(&wq->ring); +} diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h new file mode 100644 index 00000000..c23de625 --- /dev/null +++ b/drivers/net/enic/base/vnic_wq.h @@ -0,0 +1,282 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _VNIC_WQ_H_ +#define _VNIC_WQ_H_ + + +#include "vnic_dev.h" +#include "vnic_cq.h" + +/* Work queue control */ +struct vnic_wq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 posted_index; /* 0x10 */ + u32 pad1; + u32 cq_index; /* 0x18 */ + u32 pad2; + u32 enable; /* 0x20 */ + u32 pad3; + u32 running; /* 0x28 */ + u32 pad4; + u32 fetch_index; /* 0x30 */ + u32 pad5; + u32 dca_value; /* 0x38 */ + u32 pad6; + u32 error_interrupt_enable; /* 0x40 */ + u32 pad7; + u32 error_interrupt_offset; /* 0x48 */ + u32 pad8; + u32 error_status; /* 0x50 */ + u32 pad9; +}; + +struct vnic_wq_buf { + struct vnic_wq_buf *next; + dma_addr_t dma_addr; + void *os_buf; + unsigned int len; + unsigned int index; + int sop; + void *desc; + uint64_t wr_id; /* Cookie */ + uint8_t cq_entry; /* Gets completion event from hw */ + uint8_t desc_skip_cnt; /* Num descs to occupy */ + uint8_t compressed_send; /* Both hdr and payload in one desc */ +}; + +/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ +#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32 +#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64 +#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \ + ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \ + VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)) +#define VNIC_WQ_BUF_BLK_SZ(entries) \ + (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf)) +#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ + DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries)) +#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) + +struct vnic_wq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; + struct vnic_wq_buf *to_use; + struct vnic_wq_buf *to_clean; + unsigned int pkts_outstanding; + unsigned int socket_id; +}; + +static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) +{ + /* how many does SW own? */ + return wq->ring.desc_avail; +} + +static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) +{ + /* how many does HW own? */ + return wq->ring.desc_count - wq->ring.desc_avail - 1; +} + +static inline void *vnic_wq_next_desc(struct vnic_wq *wq) +{ + return wq->to_use->desc; +} + +#define PI_LOG2_CACHE_LINE_SIZE 5 +#define PI_INDEX_BITS 12 +#define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1) +#define PI_PREFETCH_LEN_MASK ((1U << PI_LOG2_CACHE_LINE_SIZE) - 1) +#define PI_PREFETCH_LEN_OFF 16 +#define PI_PREFETCH_ADDR_BITS 43 +#define PI_PREFETCH_ADDR_MASK ((1ULL << PI_PREFETCH_ADDR_BITS) - 1) +#define PI_PREFETCH_ADDR_OFF 21 + +/** How many cache lines are touched by buffer (addr, len). */ +static inline unsigned int num_cache_lines_touched(dma_addr_t addr, + unsigned int len) +{ + const unsigned long mask = PI_PREFETCH_LEN_MASK; + const unsigned long laddr = (unsigned long)addr; + unsigned long lines, equiv_len; + /* A. If addr is aligned, our solution is just to round up len to the + next boundary. + + e.g. addr = 0, len = 48 + +--------------------+ + |XXXXXXXXXXXXXXXXXXXX| 32-byte cacheline a + +--------------------+ + |XXXXXXXXXX | cacheline b + +--------------------+ + + B. If addr is not aligned, however, we may use an extra + cacheline. e.g. addr = 12, len = 22 + + +--------------------+ + | XXXXXXXXXXXXX| + +--------------------+ + |XX | + +--------------------+ + + Our solution is to make the problem equivalent to case A + above by adding the empty space in the first cacheline to the length: + unsigned long len; + + +--------------------+ + |eeeeeeeXXXXXXXXXXXXX| "e" is empty space, which we add to len + +--------------------+ + |XX | + +--------------------+ + + */ + equiv_len = len + (laddr & mask); + + /* Now we can just round up this len to the next 32-byte boundary. */ + lines = (equiv_len + mask) & (~mask); + + /* Scale bytes -> cachelines. */ + return lines >> PI_LOG2_CACHE_LINE_SIZE; +} + +static inline u64 vnic_cached_posted_index(dma_addr_t addr, unsigned int len, + unsigned int index) +{ + unsigned int num_cache_lines = num_cache_lines_touched(addr, len); + /* Wish we could avoid a branch here. We could have separate + * vnic_wq_post() and vinc_wq_post_inline(), the latter + * only supporting < 1k (2^5 * 2^5) sends, I suppose. This would + * eliminate the if (eop) branch as well. + */ + if (num_cache_lines > PI_PREFETCH_LEN_MASK) + num_cache_lines = 0; + return (index & PI_INDEX_MASK) | + ((num_cache_lines & PI_PREFETCH_LEN_MASK) << PI_PREFETCH_LEN_OFF) | + (((addr >> PI_LOG2_CACHE_LINE_SIZE) & + PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF); +} + +static inline void vnic_wq_post(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, + unsigned int len, int sop, int eop, + uint8_t desc_skip_cnt, uint8_t cq_entry, + uint8_t compressed_send, uint64_t wrid) +{ + struct vnic_wq_buf *buf = wq->to_use; + + buf->sop = sop; + buf->cq_entry = cq_entry; + buf->compressed_send = compressed_send; + buf->desc_skip_cnt = desc_skip_cnt; + buf->os_buf = os_buf; + buf->dma_addr = dma_addr; + buf->len = len; + buf->wr_id = wrid; + + buf = buf->next; + if (eop) { +#ifdef DO_PREFETCH + uint64_t wr = vnic_cached_posted_index(dma_addr, len, + buf->index); +#endif + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); +#ifdef DO_PREFETCH + /* Intel chipsets seem to limit the rate of PIOs that we can + * push on the bus. Thus, it is very important to do a single + * 64 bit write here. With two 32-bit writes, my maximum + * pkt/sec rate was cut almost in half. -AJF + */ + iowrite64((uint64_t)wr, &wq->ctrl->posted_index); +#else + iowrite32(buf->index, &wq->ctrl->posted_index); +#endif + } + wq->to_use = buf; + + wq->ring.desc_avail -= desc_skip_cnt; +} + +static inline void vnic_wq_service(struct vnic_wq *wq, + struct cq_desc *cq_desc, u16 completed_index, + void (*buf_service)(struct vnic_wq *wq, + struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), + void *opaque) +{ + struct vnic_wq_buf *buf; + + buf = wq->to_clean; + while (1) { + + (*buf_service)(wq, cq_desc, buf, opaque); + + wq->ring.desc_avail++; + + wq->to_clean = buf->next; + + if (buf->index == completed_index) + break; + + buf = wq->to_clean; + } +} + +void vnic_wq_free(struct vnic_wq *wq); +int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, + unsigned int desc_count, unsigned int desc_size); +void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error); +unsigned int vnic_wq_error_status(struct vnic_wq *wq); +void vnic_wq_enable(struct vnic_wq *wq); +int vnic_wq_disable(struct vnic_wq *wq); +void vnic_wq_clean(struct vnic_wq *wq, + void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); +int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count, + unsigned int desc_size); + +#endif /* _VNIC_WQ_H_ */ diff --git a/drivers/net/enic/base/wq_enet_desc.h b/drivers/net/enic/base/wq_enet_desc.h new file mode 100644 index 00000000..db41d00e --- /dev/null +++ b/drivers/net/enic/base/wq_enet_desc.h @@ -0,0 +1,113 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _WQ_ENET_DESC_H_ +#define _WQ_ENET_DESC_H_ + +/* Ethernet work queue descriptor: 16B */ +struct wq_enet_desc { + __le64 address; + __le16 length; + __le16 mss_loopback; + __le16 header_length_flags; + __le16 vlan_tag; +}; + +#define WQ_ENET_ADDR_BITS 64 +#define WQ_ENET_LEN_BITS 14 +#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1) +#define WQ_ENET_MSS_BITS 14 +#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1) +#define WQ_ENET_MSS_SHIFT 2 +#define WQ_ENET_LOOPBACK_SHIFT 1 +#define WQ_ENET_HDRLEN_BITS 10 +#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1) +#define WQ_ENET_FLAGS_OM_BITS 2 +#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1) +#define WQ_ENET_FLAGS_EOP_SHIFT 12 +#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13 +#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14 +#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15 + +#define WQ_ENET_OFFLOAD_MODE_CSUM 0 +#define WQ_ENET_OFFLOAD_MODE_RESERVED 1 +#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2 +#define WQ_ENET_OFFLOAD_MODE_TSO 3 + +static inline void wq_enet_desc_enc(struct wq_enet_desc *desc, + u64 address, u16 length, u16 mss, u16 header_length, + u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, + u8 vlan_tag_insert, u16 vlan_tag, u8 loopback) +{ + desc->address = cpu_to_le64(address); + desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK); + desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << + WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT); + desc->header_length_flags = cpu_to_le16( + (header_length & WQ_ENET_HDRLEN_MASK) | + (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS | + (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT | + (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT | + (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT | + (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT); + desc->vlan_tag = cpu_to_le16(vlan_tag); +} + +static inline void wq_enet_desc_dec(struct wq_enet_desc *desc, + u64 *address, u16 *length, u16 *mss, u16 *header_length, + u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, + u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback) +{ + *address = le64_to_cpu(desc->address); + *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK; + *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & + WQ_ENET_MSS_MASK; + *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >> + WQ_ENET_LOOPBACK_SHIFT) & 1); + *header_length = le16_to_cpu(desc->header_length_flags) & + WQ_ENET_HDRLEN_MASK; + *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK); + *eop = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_EOP_SHIFT) & 1); + *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1); + *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1); + *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1); + *vlan_tag = le16_to_cpu(desc->vlan_tag); +} + +#endif /* _WQ_ENET_DESC_H_ */ diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h new file mode 100644 index 00000000..8c914f5b --- /dev/null +++ b/drivers/net/enic/enic.h @@ -0,0 +1,213 @@ +/* + * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _ENIC_H_ +#define _ENIC_H_ + +#include "vnic_enet.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_nic.h" +#include "vnic_rss.h" +#include "enic_res.h" +#include "cq_enet_desc.h" + +#define DRV_NAME "enic_pmd" +#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver" +#define DRV_VERSION "1.0.0.6" +#define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc" + +#define ENIC_WQ_MAX 8 +#define ENIC_RQ_MAX 8 +#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) +#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) + +#define VLAN_ETH_HLEN 18 + +#define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) + +#define ENICPMD_BDF_LENGTH 13 /* 0000:00:00.0'\0' */ +#define PKT_TX_TCP_UDP_CKSUM 0x6000 +#define ENIC_CALC_IP_CKSUM 1 +#define ENIC_CALC_TCP_UDP_CKSUM 2 +#define ENIC_MAX_MTU 9000 +#define ENIC_PAGE_SIZE 4096 +#define PAGE_ROUND_UP(x) \ + ((((unsigned long)(x)) + ENIC_PAGE_SIZE-1) & (~(ENIC_PAGE_SIZE-1))) + +#define ENICPMD_VFIO_PATH "/dev/vfio/vfio" +/*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/ + +#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ +#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ + + +#define ENICPMD_FDIR_MAX 64 + +struct enic_fdir_node { + struct rte_eth_fdir_filter filter; + u16 fltr_id; + u16 rq_index; +}; + +struct enic_fdir { + struct rte_eth_fdir_stats stats; + struct rte_hash *hash; + struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX]; +}; + +/* Per-instance private data structure */ +struct enic { + struct enic *next; + struct rte_pci_device *pdev; + struct vnic_enet_config config; + struct vnic_dev_bar bar0; + struct vnic_dev *vdev; + + unsigned int port_id; + struct rte_eth_dev *rte_dev; + struct enic_fdir fdir; + char bdf_name[ENICPMD_BDF_LENGTH]; + int dev_fd; + int iommu_group_fd; + int iommu_groupid; + int eventfd; + uint8_t mac_addr[ETH_ALEN]; + pthread_t err_intr_thread; + int promisc; + int allmulti; + u8 ig_vlan_strip_en; + int link_status; + u8 hw_ip_checksum; + + unsigned int flags; + unsigned int priv_flags; + + /* work queue */ + struct vnic_wq wq[ENIC_WQ_MAX]; + unsigned int wq_count; + + /* receive queue */ + struct vnic_rq rq[ENIC_RQ_MAX]; + unsigned int rq_count; + + /* completion queue */ + struct vnic_cq cq[ENIC_CQ_MAX]; + unsigned int cq_count; + + /* interrupt resource */ + struct vnic_intr intr; + unsigned int intr_count; +}; + +static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq) +{ + return rq; +} + +static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) +{ + return enic->rq_count + wq; +} + +static inline unsigned int enic_msix_err_intr(__rte_unused struct enic *enic) +{ + return 0; +} + +static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev) +{ + return (struct enic *)eth_dev->data->dev_private; +} + +#define RTE_LIBRTE_ENIC_ASSERT_ENABLE +#ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE +#define ASSERT(x) do { \ + if (!(x)) \ + rte_panic("ENIC: x"); \ +} while (0) +#else +#define ASSERT(x) +#endif + +extern void enic_fdir_stats_get(struct enic *enic, + struct rte_eth_fdir_stats *stats); +extern int enic_fdir_add_fltr(struct enic *enic, + struct rte_eth_fdir_filter *params); +extern int enic_fdir_del_fltr(struct enic *enic, + struct rte_eth_fdir_filter *params); +extern void enic_free_wq(void *txq); +extern int enic_alloc_intr_resources(struct enic *enic); +extern int enic_setup_finish(struct enic *enic); +extern int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, + unsigned int socket_id, uint16_t nb_desc); +extern void enic_start_wq(struct enic *enic, uint16_t queue_idx); +extern int enic_stop_wq(struct enic *enic, uint16_t queue_idx); +extern void enic_start_rq(struct enic *enic, uint16_t queue_idx); +extern int enic_stop_rq(struct enic *enic, uint16_t queue_idx); +extern void enic_free_rq(void *rxq); +extern int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, + unsigned int socket_id, struct rte_mempool *mp, + uint16_t nb_desc); +extern int enic_set_rss_nic_cfg(struct enic *enic); +extern int enic_set_vnic_res(struct enic *enic); +extern void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size); +extern int enic_enable(struct enic *enic); +extern int enic_disable(struct enic *enic); +extern void enic_remove(struct enic *enic); +extern int enic_get_link_status(struct enic *enic); +extern void enic_dev_stats_get(struct enic *enic, + struct rte_eth_stats *r_stats); +extern void enic_dev_stats_clear(struct enic *enic); +extern void enic_add_packet_filter(struct enic *enic); +extern void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr); +extern void enic_del_mac_address(struct enic *enic); +extern unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq); +extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, + struct rte_mbuf *tx_pkt, unsigned short len, + uint8_t sop, uint8_t eop, uint8_t cq_entry, + uint16_t ol_flags, uint16_t vlan_tag); + +extern void enic_post_wq_index(struct vnic_wq *wq); +extern int enic_probe(struct enic *enic); +extern int enic_clsf_init(struct enic *enic); +extern void enic_clsf_destroy(struct enic *enic); +uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +#endif /* _ENIC_H_ */ diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c new file mode 100644 index 00000000..edb56e1d --- /dev/null +++ b/drivers/net/enic/enic_clsf.c @@ -0,0 +1,255 @@ +/* + * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include <libgen.h> + +#include <rte_ethdev.h> +#include <rte_malloc.h> +#include <rte_hash.h> +#include <rte_byteorder.h> + +#include "enic_compat.h" +#include "enic.h" +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "cq_enet_desc.h" +#include "vnic_enet.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_nic.h" + +#ifdef RTE_MACHINE_CPUFLAG_SSE4_2 +#include <rte_hash_crc.h> +#define DEFAULT_HASH_FUNC rte_hash_crc +#else +#include <rte_jhash.h> +#define DEFAULT_HASH_FUNC rte_jhash +#endif + +#define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX + +void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats) +{ + *stats = enic->fdir.stats; +} + +int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params) +{ + int32_t pos; + struct enic_fdir_node *key; + /* See if the key is in the table */ + pos = rte_hash_del_key(enic->fdir.hash, params); + switch (pos) { + case -EINVAL: + case -ENOENT: + enic->fdir.stats.f_remove++; + return -EINVAL; + default: + /* The entry is present in the table */ + key = enic->fdir.nodes[pos]; + + /* Delete the filter */ + vnic_dev_classifier(enic->vdev, CLSF_DEL, + &key->fltr_id, NULL); + rte_free(key); + enic->fdir.nodes[pos] = NULL; + enic->fdir.stats.free++; + enic->fdir.stats.remove++; + break; + } + return 0; +} + +int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params) +{ + struct enic_fdir_node *key; + struct filter fltr = {0}; + int32_t pos; + u8 do_free = 0; + u16 old_fltr_id = 0; + u32 flowtype_supported; + u16 flex_bytes; + u16 queue; + + flowtype_supported = ( + (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type) || + (RTE_ETH_FLOW_NONFRAG_IPV4_UDP == params->input.flow_type)); + + flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) | + (params->input.flow_ext.flexbytes[0] & 0xFF)); + + if (!enic->fdir.hash || + (params->input.flow_ext.vlan_tci & 0xFFF) || + !flowtype_supported || flex_bytes || + params->action.behavior /* drop */) { + enic->fdir.stats.f_add++; + return -ENOTSUP; + } + + queue = params->action.rx_queue; + /* See if the key is already there in the table */ + pos = rte_hash_del_key(enic->fdir.hash, params); + switch (pos) { + case -EINVAL: + enic->fdir.stats.f_add++; + return -EINVAL; + case -ENOENT: + /* Add a new classifier entry */ + if (!enic->fdir.stats.free) { + enic->fdir.stats.f_add++; + return -ENOSPC; + } + key = rte_zmalloc("enic_fdir_node", + sizeof(struct enic_fdir_node), 0); + if (!key) { + enic->fdir.stats.f_add++; + return -ENOMEM; + } + break; + default: + /* The entry is already present in the table. + * Check if there is a change in queue + */ + key = enic->fdir.nodes[pos]; + enic->fdir.nodes[pos] = NULL; + if (unlikely(key->rq_index == queue)) { + /* Nothing to be done */ + pos = rte_hash_add_key(enic->fdir.hash, params); + enic->fdir.nodes[pos] = key; + enic->fdir.stats.f_add++; + dev_warning(enic, + "FDIR rule is already present\n"); + return 0; + } + + if (likely(enic->fdir.stats.free)) { + /* Add the filter and then delete the old one. + * This is to avoid packets from going into the + * default queue during the window between + * delete and add + */ + do_free = 1; + old_fltr_id = key->fltr_id; + } else { + /* No free slots in the classifier. + * Delete the filter and add the modified one later + */ + vnic_dev_classifier(enic->vdev, CLSF_DEL, + &key->fltr_id, NULL); + enic->fdir.stats.free++; + } + + break; + } + + key->filter = *params; + key->rq_index = queue; + + fltr.type = FILTER_IPV4_5TUPLE; + fltr.u.ipv4.src_addr = rte_be_to_cpu_32( + params->input.flow.ip4_flow.src_ip); + fltr.u.ipv4.dst_addr = rte_be_to_cpu_32( + params->input.flow.ip4_flow.dst_ip); + fltr.u.ipv4.src_port = rte_be_to_cpu_16( + params->input.flow.udp4_flow.src_port); + fltr.u.ipv4.dst_port = rte_be_to_cpu_16( + params->input.flow.udp4_flow.dst_port); + + if (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type) + fltr.u.ipv4.protocol = PROTO_TCP; + else + fltr.u.ipv4.protocol = PROTO_UDP; + + fltr.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE; + + if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) { + key->fltr_id = queue; + } else { + dev_err(enic, "Add classifier entry failed\n"); + enic->fdir.stats.f_add++; + rte_free(key); + return -1; + } + + if (do_free) + vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL); + else{ + enic->fdir.stats.free--; + enic->fdir.stats.add++; + } + + pos = rte_hash_add_key(enic->fdir.hash, params); + enic->fdir.nodes[pos] = key; + return 0; +} + +void enic_clsf_destroy(struct enic *enic) +{ + u32 index; + struct enic_fdir_node *key; + /* delete classifier entries */ + for (index = 0; index < ENICPMD_FDIR_MAX; index++) { + key = enic->fdir.nodes[index]; + if (key) { + vnic_dev_classifier(enic->vdev, CLSF_DEL, + &key->fltr_id, NULL); + rte_free(key); + } + } + + if (enic->fdir.hash) { + rte_hash_free(enic->fdir.hash); + enic->fdir.hash = NULL; + } +} + +int enic_clsf_init(struct enic *enic) +{ + struct rte_hash_parameters hash_params = { + .name = "enicpmd_clsf_hash", + .entries = ENICPMD_CLSF_HASH_ENTRIES, + .key_len = sizeof(struct rte_eth_fdir_filter), + .hash_func = DEFAULT_HASH_FUNC, + .hash_func_init_val = 0, + .socket_id = SOCKET_ID_ANY, + }; + + enic->fdir.hash = rte_hash_create(&hash_params); + memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats)); + enic->fdir.stats.free = ENICPMD_FDIR_MAX; + return NULL == enic->fdir.hash; +} diff --git a/drivers/net/enic/enic_compat.h b/drivers/net/enic/enic_compat.h new file mode 100644 index 00000000..5dbd983b --- /dev/null +++ b/drivers/net/enic/enic_compat.h @@ -0,0 +1,146 @@ +/* + * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _ENIC_COMPAT_H_ +#define _ENIC_COMPAT_H_ + +#include <stdio.h> +#include <unistd.h> + +#include <rte_atomic.h> +#include <rte_malloc.h> +#include <rte_log.h> + +#define ENIC_PAGE_ALIGN 4096UL +#define ENIC_ALIGN ENIC_PAGE_ALIGN +#define NAME_MAX 255 +#define ETH_ALEN 6 + +#define __iomem + +#define rmb() rte_rmb() /* dpdk rte provided rmb */ +#define wmb() rte_wmb() /* dpdk rte provided wmb */ + +#define le16_to_cpu +#define le32_to_cpu +#define le64_to_cpu +#define cpu_to_le16 +#define cpu_to_le32 +#define cpu_to_le64 + +#ifndef offsetof +#define offsetof(t, m) ((size_t) &((t *)0)->m) +#endif + +#define pr_err(y, args...) dev_err(0, y, ##args) +#define pr_warn(y, args...) dev_warning(0, y, ##args) +#define BUG() pr_err("BUG at %s:%d", __func__, __LINE__) + +#define VNIC_ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) +#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) +#define udelay usleep +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + +#define kzalloc(size, flags) calloc(1, size) +#define kfree(x) free(x) + +#define dev_printk(level, fmt, args...) \ + RTE_LOG(level, PMD, "rte_enic_pmd: " fmt, ## args) + +#define dev_err(x, args...) dev_printk(ERR, args) +#define dev_info(x, args...) dev_printk(INFO, args) +#define dev_warning(x, args...) dev_printk(WARNING, args) +#define dev_debug(x, args...) dev_printk(DEBUG, args) + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 + +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef unsigned long long u64; +typedef unsigned long long dma_addr_t; + +static inline uint32_t ioread32(volatile void *addr) +{ + return *(volatile uint32_t *)addr; +} + +static inline uint16_t ioread16(volatile void *addr) +{ + return *(volatile uint16_t *)addr; +} + +static inline uint8_t ioread8(volatile void *addr) +{ + return *(volatile uint8_t *)addr; +} + +static inline void iowrite32(uint32_t val, volatile void *addr) +{ + *(volatile uint32_t *)addr = val; +} + +static inline void iowrite16(uint16_t val, volatile void *addr) +{ + *(volatile uint16_t *)addr = val; +} + +static inline void iowrite8(uint8_t val, volatile void *addr) +{ + *(volatile uint8_t *)addr = val; +} + +static inline unsigned int readl(volatile void __iomem *addr) +{ + return *(volatile unsigned int *)addr; +} + +static inline void writel(unsigned int val, volatile void __iomem *addr) +{ + *(volatile unsigned int *)addr = val; +} + +#define min_t(type, x, y) ({ \ + type __min1 = (x); \ + type __min2 = (y); \ + __min1 < __min2 ? __min1 : __min2; }) + +#define max_t(type, x, y) ({ \ + type __max1 = (x); \ + type __max2 = (y); \ + __max1 > __max2 ? __max1 : __max2; }) + +#endif /* _ENIC_COMPAT_H_ */ diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c new file mode 100644 index 00000000..6bea9405 --- /dev/null +++ b/drivers/net/enic/enic_ethdev.c @@ -0,0 +1,687 @@ +/* + * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include <stdio.h> +#include <stdint.h> + +#include <rte_dev.h> +#include <rte_pci.h> +#include <rte_ethdev.h> +#include <rte_string_fns.h> + +#include "vnic_intr.h" +#include "vnic_cq.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_enet.h" +#include "enic.h" + +#ifdef RTE_LIBRTE_ENIC_DEBUG +#define ENICPMD_FUNC_TRACE() \ + RTE_LOG(DEBUG, PMD, "ENICPMD trace: %s\n", __func__) +#else +#define ENICPMD_FUNC_TRACE() (void)0 +#endif + +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id pci_id_enic_map[] = { +#define RTE_PCI_DEV_ID_DECL_ENIC(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#ifndef PCI_VENDOR_ID_CISCO +#define PCI_VENDOR_ID_CISCO 0x1137 +#endif +#include "rte_pci_dev_ids.h" +RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) +RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) +{.vendor_id = 0, /* Sentinal */}, +}; + +static int +enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev, + enum rte_filter_op filter_op, void *arg) +{ + struct enic *enic = pmd_priv(eth_dev); + int ret = 0; + + ENICPMD_FUNC_TRACE(); + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) + return -EINVAL; + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + case RTE_ETH_FILTER_UPDATE: + ret = enic_fdir_add_fltr(enic, + (struct rte_eth_fdir_filter *)arg); + break; + + case RTE_ETH_FILTER_DELETE: + ret = enic_fdir_del_fltr(enic, + (struct rte_eth_fdir_filter *)arg); + break; + + case RTE_ETH_FILTER_STATS: + enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg); + break; + + case RTE_ETH_FILTER_FLUSH: + case RTE_ETH_FILTER_INFO: + dev_warning(enic, "unsupported operation %u", filter_op); + ret = -ENOTSUP; + break; + default: + dev_err(enic, "unknown operation %u", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = -EINVAL; + + if (RTE_ETH_FILTER_FDIR == filter_type) + ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg); + else + dev_warning(enic, "Filter type (%d) not supported", + filter_type); + + return ret; +} + +static void enicpmd_dev_tx_queue_release(void *txq) +{ + ENICPMD_FUNC_TRACE(); + enic_free_wq(txq); +} + +static int enicpmd_dev_setup_intr(struct enic *enic) +{ + int ret; + unsigned int index; + + ENICPMD_FUNC_TRACE(); + + /* Are we done with the init of all the queues? */ + for (index = 0; index < enic->cq_count; index++) { + if (!enic->cq[index].ctrl) + break; + } + + if (enic->cq_count != index) + return 0; + + ret = enic_alloc_intr_resources(enic); + if (ret) { + dev_err(enic, "alloc intr failed\n"); + return ret; + } + enic_init_vnic_resources(enic); + + ret = enic_setup_finish(enic); + if (ret) + dev_err(enic, "setup could not be finished\n"); + + return ret; +} + +static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + int ret; + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + if (queue_idx >= ENIC_WQ_MAX) { + dev_err(enic, + "Max number of TX queues exceeded. Max is %d\n", + ENIC_WQ_MAX); + return -EINVAL; + } + + eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx]; + + ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc); + if (ret) { + dev_err(enic, "error in allocating wq\n"); + return ret; + } + + return enicpmd_dev_setup_intr(enic); +} + +static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev, + uint16_t queue_idx) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + + enic_start_wq(enic, queue_idx); + eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, + uint16_t queue_idx) +{ + int ret; + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + + ret = enic_stop_wq(enic, queue_idx); + if (ret) + dev_err(enic, "error in stopping wq %d\n", queue_idx); + else + eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; + + return ret; +} + +static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev, + uint16_t queue_idx) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + + enic_start_rq(enic, queue_idx); + eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, + uint16_t queue_idx) +{ + int ret; + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + + ret = enic_stop_rq(enic, queue_idx); + if (ret) + dev_err(enic, "error in stopping rq %d\n", queue_idx); + else + eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; + + return ret; +} + +static void enicpmd_dev_rx_queue_release(void *rxq) +{ + ENICPMD_FUNC_TRACE(); + enic_free_rq(rxq); +} + +static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + int ret; + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + if (queue_idx >= ENIC_RQ_MAX) { + dev_err(enic, + "Max number of RX queues exceeded. Max is %d\n", + ENIC_RQ_MAX); + return -EINVAL; + } + + eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx]; + + ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc); + if (ret) { + dev_err(enic, "error in allocating rq\n"); + return ret; + } + + enic->rq[queue_idx].rx_free_thresh = rx_conf->rx_free_thresh; + dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx, + enic->rq[queue_idx].rx_free_thresh); + + return enicpmd_dev_setup_intr(enic); +} + +static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev, + uint16_t vlan_id, int on) +{ + struct enic *enic = pmd_priv(eth_dev); + int err; + + ENICPMD_FUNC_TRACE(); + if (on) + err = enic_add_vlan(enic, vlan_id); + else + err = enic_del_vlan(enic, vlan_id); + return err; +} + +static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + + if (mask & ETH_VLAN_STRIP_MASK) { + if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) + enic->ig_vlan_strip_en = 1; + else + enic->ig_vlan_strip_en = 0; + } + enic_set_rss_nic_cfg(enic); + + + if (mask & ETH_VLAN_FILTER_MASK) { + dev_warning(enic, + "Configuration of VLAN filter is not supported\n"); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + dev_warning(enic, + "Configuration of extended VLAN is not supported\n"); + } +} + +static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev) +{ + int ret; + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + ret = enic_set_vnic_res(enic); + if (ret) { + dev_err(enic, "Set vNIC resource num failed, aborting\n"); + return ret; + } + + if (eth_dev->data->dev_conf.rxmode.split_hdr_size && + eth_dev->data->dev_conf.rxmode.header_split) { + /* Enable header-data-split */ + enic_set_hdr_split_size(enic, + eth_dev->data->dev_conf.rxmode.split_hdr_size); + } + + enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum; + return 0; +} + +/* Start the device. + * It returns 0 on success. + */ +static int enicpmd_dev_start(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + return enic_enable(enic); +} + +/* + * Stop device: disable rx and tx functions to allow for reconfiguring. + */ +static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev) +{ + struct rte_eth_link link; + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + enic_disable(enic); + memset(&link, 0, sizeof(link)); + rte_atomic64_cmpset((uint64_t *)ð_dev->data->dev_link, + *(uint64_t *)ð_dev->data->dev_link, + *(uint64_t *)&link); +} + +/* + * Stop device. + */ +static void enicpmd_dev_close(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + enic_remove(enic); +} + +static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev, + __rte_unused int wait_to_complete) +{ + struct enic *enic = pmd_priv(eth_dev); + int ret; + int link_status = 0; + + ENICPMD_FUNC_TRACE(); + link_status = enic_get_link_status(enic); + ret = (link_status == enic->link_status); + enic->link_status = link_status; + eth_dev->data->dev_link.link_status = link_status; + eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); + return ret; +} + +static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *stats) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + enic_dev_stats_get(enic, stats); +} + +static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + enic_dev_stats_clear(enic); +} + +static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *device_info) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + device_info->max_rx_queues = enic->rq_count; + device_info->max_tx_queues = enic->wq_count; + device_info->min_rx_bufsize = ENIC_MIN_MTU; + device_info->max_rx_pktlen = enic->config.mtu; + device_info->max_mac_addrs = 1; + device_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + device_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + device_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH + }; +} + +static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == enic_recv_pkts) + return ptypes; + return NULL; +} + +static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + enic->promisc = 1; + enic_add_packet_filter(enic); +} + +static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + enic->promisc = 0; + enic_add_packet_filter(enic); +} + +static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + enic->allmulti = 1; + enic_add_packet_filter(enic); +} + +static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + enic->allmulti = 0; + enic_add_packet_filter(enic); +} + +static void enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev, + struct ether_addr *mac_addr, + __rte_unused uint32_t index, __rte_unused uint32_t pool) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + enic_set_mac_address(enic, mac_addr->addr_bytes); +} + +static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused uint32_t index) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + enic_del_mac_address(enic); +} + + +static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t index; + unsigned int frags; + unsigned int pkt_len; + unsigned int seg_len; + unsigned int inc_len; + unsigned int nb_segs; + struct rte_mbuf *tx_pkt, *next_tx_pkt; + struct vnic_wq *wq = (struct vnic_wq *)tx_queue; + struct enic *enic = vnic_dev_priv(wq->vdev); + unsigned short vlan_id; + unsigned short ol_flags; + uint8_t last_seg, eop; + unsigned int host_tx_descs = 0; + + for (index = 0; index < nb_pkts; index++) { + tx_pkt = *tx_pkts++; + inc_len = 0; + nb_segs = tx_pkt->nb_segs; + if (nb_segs > vnic_wq_desc_avail(wq)) { + if (index > 0) + enic_post_wq_index(wq); + + /* wq cleanup and try again */ + if (!enic_cleanup_wq(enic, wq) || + (nb_segs > vnic_wq_desc_avail(wq))) { + return index; + } + } + + pkt_len = tx_pkt->pkt_len; + vlan_id = tx_pkt->vlan_tci; + ol_flags = tx_pkt->ol_flags; + for (frags = 0; inc_len < pkt_len; frags++) { + if (!tx_pkt) + break; + next_tx_pkt = tx_pkt->next; + seg_len = tx_pkt->data_len; + inc_len += seg_len; + + host_tx_descs++; + last_seg = 0; + eop = 0; + if ((pkt_len == inc_len) || !next_tx_pkt) { + eop = 1; + /* post if last packet in batch or > thresh */ + if ((index == (nb_pkts - 1)) || + (host_tx_descs > ENIC_TX_POST_THRESH)) { + last_seg = 1; + host_tx_descs = 0; + } + } + enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len, + !frags, eop, last_seg, ol_flags, vlan_id); + tx_pkt = next_tx_pkt; + } + } + + enic_cleanup_wq(enic, wq); + return index; +} + +static const struct eth_dev_ops enicpmd_eth_dev_ops = { + .dev_configure = enicpmd_dev_configure, + .dev_start = enicpmd_dev_start, + .dev_stop = enicpmd_dev_stop, + .dev_set_link_up = NULL, + .dev_set_link_down = NULL, + .dev_close = enicpmd_dev_close, + .promiscuous_enable = enicpmd_dev_promiscuous_enable, + .promiscuous_disable = enicpmd_dev_promiscuous_disable, + .allmulticast_enable = enicpmd_dev_allmulticast_enable, + .allmulticast_disable = enicpmd_dev_allmulticast_disable, + .link_update = enicpmd_dev_link_update, + .stats_get = enicpmd_dev_stats_get, + .stats_reset = enicpmd_dev_stats_reset, + .queue_stats_mapping_set = NULL, + .dev_infos_get = enicpmd_dev_info_get, + .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get, + .mtu_set = NULL, + .vlan_filter_set = enicpmd_vlan_filter_set, + .vlan_tpid_set = NULL, + .vlan_offload_set = enicpmd_vlan_offload_set, + .vlan_strip_queue_set = NULL, + .rx_queue_start = enicpmd_dev_rx_queue_start, + .rx_queue_stop = enicpmd_dev_rx_queue_stop, + .tx_queue_start = enicpmd_dev_tx_queue_start, + .tx_queue_stop = enicpmd_dev_tx_queue_stop, + .rx_queue_setup = enicpmd_dev_rx_queue_setup, + .rx_queue_release = enicpmd_dev_rx_queue_release, + .rx_queue_count = NULL, + .rx_descriptor_done = NULL, + .tx_queue_setup = enicpmd_dev_tx_queue_setup, + .tx_queue_release = enicpmd_dev_tx_queue_release, + .dev_led_on = NULL, + .dev_led_off = NULL, + .flow_ctrl_get = NULL, + .flow_ctrl_set = NULL, + .priority_flow_ctrl_set = NULL, + .mac_addr_add = enicpmd_add_mac_addr, + .mac_addr_remove = enicpmd_remove_mac_addr, + .filter_ctrl = enicpmd_dev_filter_ctrl, +}; + +struct enic *enicpmd_list_head = NULL; +/* Initialize the driver + * It returns 0 on success. + */ +static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pdev; + struct rte_pci_addr *addr; + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + + enic->port_id = eth_dev->data->port_id; + enic->rte_dev = eth_dev; + eth_dev->dev_ops = &enicpmd_eth_dev_ops; + eth_dev->rx_pkt_burst = &enic_recv_pkts; + eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts; + + pdev = eth_dev->pci_dev; + rte_eth_copy_pci_info(eth_dev, pdev); + enic->pdev = pdev; + addr = &pdev->addr; + + snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x", + addr->domain, addr->bus, addr->devid, addr->function); + + return enic_probe(enic); +} + +static struct eth_driver rte_enic_pmd = { + .pci_drv = { + .name = "rte_enic_pmd", + .id_table = pci_id_enic_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + }, + .eth_dev_init = eth_enicpmd_dev_init, + .dev_private_size = sizeof(struct enic), +}; + +/* Driver initialization routine. + * Invoked once at EAL init time. + * Register as the [Poll Mode] Driver of Cisco ENIC device. + */ +static int +rte_enic_pmd_init(__rte_unused const char *name, + __rte_unused const char *params) +{ + ENICPMD_FUNC_TRACE(); + + rte_eth_driver_register(&rte_enic_pmd); + return 0; +} + +static struct rte_driver rte_enic_driver = { + .type = PMD_PDEV, + .init = rte_enic_pmd_init, +}; + +PMD_REGISTER_DRIVER(rte_enic_driver); diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c new file mode 100644 index 00000000..e3da51db --- /dev/null +++ b/drivers/net/enic/enic_main.c @@ -0,0 +1,978 @@ +/* + * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include <stdio.h> + +#include <sys/stat.h> +#include <sys/mman.h> +#include <fcntl.h> +#include <libgen.h> + +#include <rte_pci.h> +#include <rte_memzone.h> +#include <rte_malloc.h> +#include <rte_mbuf.h> +#include <rte_string_fns.h> +#include <rte_ethdev.h> + +#include "enic_compat.h" +#include "enic.h" +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "cq_enet_desc.h" +#include "vnic_enet.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_nic.h" +#include "enic_vnic_wq.h" + +static inline struct rte_mbuf * +rte_rxmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + m = __rte_mbuf_raw_alloc(mp); + __rte_mbuf_sanity_check_raw(m, 0); + return m; +} + + +static inline int enic_is_sriov_vf(struct enic *enic) +{ + return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; +} + +static int is_zero_addr(uint8_t *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} + +static int is_mcast_addr(uint8_t *addr) +{ + return addr[0] & 1; +} + +static int is_eth_addr_valid(uint8_t *addr) +{ + return !is_mcast_addr(addr) && !is_zero_addr(addr); +} + +static void +enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq) +{ + uint16_t i; + + if (!rq || !rq->mbuf_ring) { + dev_debug(enic, "Pointer to rq or mbuf_ring is NULL"); + return; + } + + for (i = 0; i < enic->config.rq_desc_count; i++) { + if (rq->mbuf_ring[i]) { + rte_pktmbuf_free_seg(rq->mbuf_ring[i]); + rq->mbuf_ring[i] = NULL; + } + } +} + + +void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size) +{ + vnic_set_hdr_split_size(enic->vdev, split_hdr_size); +} + +static void enic_free_wq_buf(__rte_unused struct vnic_wq *wq, struct vnic_wq_buf *buf) +{ + struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->os_buf; + + rte_mempool_put(mbuf->pool, mbuf); + buf->os_buf = NULL; +} + +static void enic_wq_free_buf(struct vnic_wq *wq, + __rte_unused struct cq_desc *cq_desc, + struct vnic_wq_buf *buf, + __rte_unused void *opaque) +{ + enic_free_wq_buf(wq, buf); +} + +static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, + __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque) +{ + struct enic *enic = vnic_dev_priv(vdev); + + vnic_wq_service(&enic->wq[q_number], cq_desc, + completed_index, enic_wq_free_buf, + opaque); + + return 0; +} + +static void enic_log_q_error(struct enic *enic) +{ + unsigned int i; + u32 error_status; + + for (i = 0; i < enic->wq_count; i++) { + error_status = vnic_wq_error_status(&enic->wq[i]); + if (error_status) + dev_err(enic, "WQ[%d] error_status %d\n", i, + error_status); + } + + for (i = 0; i < enic->rq_count; i++) { + error_status = vnic_rq_error_status(&enic->rq[i]); + if (error_status) + dev_err(enic, "RQ[%d] error_status %d\n", i, + error_status); + } +} + +unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq) +{ + unsigned int cq = enic_cq_wq(enic, wq->index); + + /* Return the work done */ + return vnic_cq_service(&enic->cq[cq], + -1 /*wq_work_to_do*/, enic_wq_service, NULL); +} + +void enic_post_wq_index(struct vnic_wq *wq) +{ + enic_vnic_post_wq_index(wq); +} + +void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, + struct rte_mbuf *tx_pkt, unsigned short len, + uint8_t sop, uint8_t eop, uint8_t cq_entry, + uint16_t ol_flags, uint16_t vlan_tag) +{ + struct wq_enet_desc *desc = vnic_wq_next_desc(wq); + uint16_t mss = 0; + uint8_t vlan_tag_insert = 0; + uint64_t bus_addr = (dma_addr_t) + (tx_pkt->buf_physaddr + tx_pkt->data_off); + + if (sop) { + if (ol_flags & PKT_TX_VLAN_PKT) + vlan_tag_insert = 1; + + if (enic->hw_ip_checksum) { + if (ol_flags & PKT_TX_IP_CKSUM) + mss |= ENIC_CALC_IP_CKSUM; + + if (ol_flags & PKT_TX_TCP_UDP_CKSUM) + mss |= ENIC_CALC_TCP_UDP_CKSUM; + } + } + + wq_enet_desc_enc(desc, + bus_addr, + len, + mss, + 0 /* header_length */, + 0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */, + eop, + cq_entry, + 0 /* fcoe_encap */, + vlan_tag_insert, + vlan_tag, + 0 /* loopback */); + + enic_vnic_post_wq(wq, (void *)tx_pkt, bus_addr, len, + sop, + 1 /*desc_skip_cnt*/, + cq_entry, + 0 /*compressed send*/, + 0 /*wrid*/); +} + +void enic_dev_stats_clear(struct enic *enic) +{ + if (vnic_dev_stats_clear(enic->vdev)) + dev_err(enic, "Error in clearing stats\n"); +} + +void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) +{ + struct vnic_stats *stats; + + if (vnic_dev_stats_dump(enic->vdev, &stats)) { + dev_err(enic, "Error in getting stats\n"); + return; + } + + r_stats->ipackets = stats->rx.rx_frames_ok; + r_stats->opackets = stats->tx.tx_frames_ok; + + r_stats->ibytes = stats->rx.rx_bytes_ok; + r_stats->obytes = stats->tx.tx_bytes_ok; + + r_stats->ierrors = stats->rx.rx_errors; + r_stats->oerrors = stats->tx.tx_errors; + + r_stats->imissed = stats->rx.rx_drop; + + r_stats->imcasts = stats->rx.rx_multicast_frames_ok; + r_stats->rx_nombuf = stats->rx.rx_no_bufs; +} + +void enic_del_mac_address(struct enic *enic) +{ + if (vnic_dev_del_addr(enic->vdev, enic->mac_addr)) + dev_err(enic, "del mac addr failed\n"); +} + +void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr) +{ + int err; + + if (!is_eth_addr_valid(mac_addr)) { + dev_err(enic, "invalid mac address\n"); + return; + } + + err = vnic_dev_del_addr(enic->vdev, mac_addr); + if (err) { + dev_err(enic, "del mac addr failed\n"); + return; + } + + ether_addr_copy((struct ether_addr *)mac_addr, + (struct ether_addr *)enic->mac_addr); + + err = vnic_dev_add_addr(enic->vdev, mac_addr); + if (err) { + dev_err(enic, "add mac addr failed\n"); + return; + } +} + +static void +enic_free_rq_buf(struct rte_mbuf **mbuf) +{ + if (*mbuf == NULL) + return; + + rte_pktmbuf_free(*mbuf); + mbuf = NULL; +} + +void enic_init_vnic_resources(struct enic *enic) +{ + unsigned int error_interrupt_enable = 1; + unsigned int error_interrupt_offset = 0; + unsigned int index = 0; + + for (index = 0; index < enic->rq_count; index++) { + vnic_rq_init(&enic->rq[index], + enic_cq_rq(enic, index), + error_interrupt_enable, + error_interrupt_offset); + } + + for (index = 0; index < enic->wq_count; index++) { + vnic_wq_init(&enic->wq[index], + enic_cq_wq(enic, index), + error_interrupt_enable, + error_interrupt_offset); + } + + vnic_dev_stats_clear(enic->vdev); + + for (index = 0; index < enic->cq_count; index++) { + vnic_cq_init(&enic->cq[index], + 0 /* flow_control_enable */, + 1 /* color_enable */, + 0 /* cq_head */, + 0 /* cq_tail */, + 1 /* cq_tail_color */, + 0 /* interrupt_enable */, + 1 /* cq_entry_enable */, + 0 /* cq_message_enable */, + 0 /* interrupt offset */, + 0 /* cq_message_addr */); + } + + vnic_intr_init(&enic->intr, + enic->config.intr_timer_usec, + enic->config.intr_timer_type, + /*mask_on_assertion*/1); +} + + +static int +enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) +{ + struct rte_mbuf *mb; + struct rq_enet_desc *rqd = rq->ring.descs; + unsigned i; + dma_addr_t dma_addr; + + dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index, + rq->ring.desc_count); + + for (i = 0; i < rq->ring.desc_count; i++, rqd++) { + mb = rte_rxmbuf_alloc(rq->mp); + if (mb == NULL) { + dev_err(enic, "RX mbuf alloc failed queue_id=%u\n", + (unsigned)rq->index); + return -ENOMEM; + } + + dma_addr = (dma_addr_t)(mb->buf_physaddr + mb->data_off); + + rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP, + mb->buf_len); + rq->mbuf_ring[i] = mb; + } + + /* make sure all prior writes are complete before doing the PIO write */ + rte_rmb(); + + /* Post all but the last 2 cache lines' worth of descriptors */ + rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE + / sizeof(struct rq_enet_desc)); + rq->rx_nb_hold = 0; + + dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n", + enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold); + iowrite32(rq->posted_index, &rq->ctrl->posted_index); + rte_rmb(); + + return 0; + +} + +static void * +enic_alloc_consistent(__rte_unused void *priv, size_t size, + dma_addr_t *dma_handle, u8 *name) +{ + void *vaddr; + const struct rte_memzone *rz; + *dma_handle = 0; + + rz = rte_memzone_reserve_aligned((const char *)name, + size, SOCKET_ID_ANY, 0, ENIC_ALIGN); + if (!rz) { + pr_err("%s : Failed to allocate memory requested for %s\n", + __func__, name); + return NULL; + } + + vaddr = rz->addr; + *dma_handle = (dma_addr_t)rz->phys_addr; + + return vaddr; +} + +static void +enic_free_consistent(__rte_unused struct rte_pci_device *hwdev, + __rte_unused size_t size, + __rte_unused void *vaddr, + __rte_unused dma_addr_t dma_handle) +{ + /* Nothing to be done */ +} + +static void +enic_intr_handler(__rte_unused struct rte_intr_handle *handle, + void *arg) +{ + struct enic *enic = pmd_priv((struct rte_eth_dev *)arg); + + vnic_intr_return_all_credits(&enic->intr); + + enic_log_q_error(enic); +} + +int enic_enable(struct enic *enic) +{ + unsigned int index; + int err; + struct rte_eth_dev *eth_dev = enic->rte_dev; + + eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); + eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ + + if (enic_clsf_init(enic)) + dev_warning(enic, "Init of hash table for clsf failed."\ + "Flow director feature will not work\n"); + + for (index = 0; index < enic->rq_count; index++) { + err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]); + if (err) { + dev_err(enic, "Failed to alloc RX queue mbufs\n"); + return err; + } + } + + for (index = 0; index < enic->wq_count; index++) + vnic_wq_enable(&enic->wq[index]); + for (index = 0; index < enic->rq_count; index++) + vnic_rq_enable(&enic->rq[index]); + + vnic_dev_enable_wait(enic->vdev); + + /* Register and enable error interrupt */ + rte_intr_callback_register(&(enic->pdev->intr_handle), + enic_intr_handler, (void *)enic->rte_dev); + + rte_intr_enable(&(enic->pdev->intr_handle)); + vnic_intr_unmask(&enic->intr); + + return 0; +} + +int enic_alloc_intr_resources(struct enic *enic) +{ + int err; + + dev_info(enic, "vNIC resources used: "\ + "wq %d rq %d cq %d intr %d\n", + enic->wq_count, enic->rq_count, + enic->cq_count, enic->intr_count); + + err = vnic_intr_alloc(enic->vdev, &enic->intr, 0); + if (err) + enic_free_vnic_resources(enic); + + return err; +} + +void enic_free_rq(void *rxq) +{ + struct vnic_rq *rq = (struct vnic_rq *)rxq; + struct enic *enic = vnic_dev_priv(rq->vdev); + + enic_rxmbuf_queue_release(enic, rq); + rte_free(rq->mbuf_ring); + rq->mbuf_ring = NULL; + vnic_rq_free(rq); + vnic_cq_free(&enic->cq[rq->index]); +} + +void enic_start_wq(struct enic *enic, uint16_t queue_idx) +{ + vnic_wq_enable(&enic->wq[queue_idx]); +} + +int enic_stop_wq(struct enic *enic, uint16_t queue_idx) +{ + return vnic_wq_disable(&enic->wq[queue_idx]); +} + +void enic_start_rq(struct enic *enic, uint16_t queue_idx) +{ + vnic_rq_enable(&enic->rq[queue_idx]); +} + +int enic_stop_rq(struct enic *enic, uint16_t queue_idx) +{ + return vnic_rq_disable(&enic->rq[queue_idx]); +} + +int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, + unsigned int socket_id, struct rte_mempool *mp, + uint16_t nb_desc) +{ + int rc; + struct vnic_rq *rq = &enic->rq[queue_idx]; + + rq->socket_id = socket_id; + rq->mp = mp; + + if (nb_desc) { + if (nb_desc > enic->config.rq_desc_count) { + dev_warning(enic, + "RQ %d - number of rx desc in cmd line (%d)"\ + "is greater than that in the UCSM/CIMC adapter"\ + "policy. Applying the value in the adapter "\ + "policy (%d).\n", + queue_idx, nb_desc, enic->config.rq_desc_count); + nb_desc = enic->config.rq_desc_count; + } + dev_info(enic, "RX Queues - effective number of descs:%d\n", + nb_desc); + } + + /* Allocate queue resources */ + rc = vnic_rq_alloc(enic->vdev, rq, queue_idx, + nb_desc, sizeof(struct rq_enet_desc)); + if (rc) { + dev_err(enic, "error in allocation of rq\n"); + goto err_exit; + } + + rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx, + socket_id, nb_desc, + sizeof(struct cq_enet_rq_desc)); + if (rc) { + dev_err(enic, "error in allocation of cq for rq\n"); + goto err_free_rq_exit; + } + + /* Allocate the mbuf ring */ + rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring", + sizeof(struct rte_mbuf *) * nb_desc, + RTE_CACHE_LINE_SIZE, rq->socket_id); + + if (rq->mbuf_ring != NULL) + return 0; + + /* cleanup on error */ + vnic_cq_free(&enic->cq[queue_idx]); +err_free_rq_exit: + vnic_rq_free(rq); +err_exit: + return -ENOMEM; +} + +void enic_free_wq(void *txq) +{ + struct vnic_wq *wq = (struct vnic_wq *)txq; + struct enic *enic = vnic_dev_priv(wq->vdev); + + vnic_wq_free(wq); + vnic_cq_free(&enic->cq[enic->rq_count + wq->index]); +} + +int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, + unsigned int socket_id, uint16_t nb_desc) +{ + int err; + struct vnic_wq *wq = &enic->wq[queue_idx]; + unsigned int cq_index = enic_cq_wq(enic, queue_idx); + + wq->socket_id = socket_id; + if (nb_desc) { + if (nb_desc > enic->config.wq_desc_count) { + dev_warning(enic, + "WQ %d - number of tx desc in cmd line (%d)"\ + "is greater than that in the UCSM/CIMC adapter"\ + "policy. Applying the value in the adapter "\ + "policy (%d)\n", + queue_idx, nb_desc, enic->config.wq_desc_count); + } else if (nb_desc != enic->config.wq_desc_count) { + enic->config.wq_desc_count = nb_desc; + dev_info(enic, + "TX Queues - effective number of descs:%d\n", + nb_desc); + } + } + + /* Allocate queue resources */ + err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx, + enic->config.wq_desc_count, + sizeof(struct wq_enet_desc)); + if (err) { + dev_err(enic, "error in allocation of wq\n"); + return err; + } + + err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index, + socket_id, enic->config.wq_desc_count, + sizeof(struct cq_enet_wq_desc)); + if (err) { + vnic_wq_free(wq); + dev_err(enic, "error in allocation of cq for wq\n"); + } + + return err; +} + +int enic_disable(struct enic *enic) +{ + unsigned int i; + int err; + + vnic_intr_mask(&enic->intr); + (void)vnic_intr_masked(&enic->intr); /* flush write */ + + vnic_dev_disable(enic->vdev); + + enic_clsf_destroy(enic); + + if (!enic_is_sriov_vf(enic)) + vnic_dev_del_addr(enic->vdev, enic->mac_addr); + + for (i = 0; i < enic->wq_count; i++) { + err = vnic_wq_disable(&enic->wq[i]); + if (err) + return err; + } + for (i = 0; i < enic->rq_count; i++) { + err = vnic_rq_disable(&enic->rq[i]); + if (err) + return err; + } + + vnic_dev_set_reset_flag(enic->vdev, 1); + vnic_dev_notify_unset(enic->vdev); + + for (i = 0; i < enic->wq_count; i++) + vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); + + for (i = 0; i < enic->rq_count; i++) + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + for (i = 0; i < enic->cq_count; i++) + vnic_cq_clean(&enic->cq[i]); + vnic_intr_clean(&enic->intr); + + return 0; +} + +static int enic_dev_wait(struct vnic_dev *vdev, + int (*start)(struct vnic_dev *, int), + int (*finished)(struct vnic_dev *, int *), + int arg) +{ + int done; + int err; + int i; + + err = start(vdev, arg); + if (err) + return err; + + /* Wait for func to complete...2 seconds max */ + for (i = 0; i < 2000; i++) { + err = finished(vdev, &done); + if (err) + return err; + if (done) + return 0; + usleep(1000); + } + return -ETIMEDOUT; +} + +static int enic_dev_open(struct enic *enic) +{ + int err; + + err = enic_dev_wait(enic->vdev, vnic_dev_open, + vnic_dev_open_done, 0); + if (err) + dev_err(enic_get_dev(enic), + "vNIC device open failed, err %d\n", err); + + return err; +} + +static int enic_set_rsskey(struct enic *enic) +{ + dma_addr_t rss_key_buf_pa; + union vnic_rss_key *rss_key_buf_va = NULL; + static union vnic_rss_key rss_key = { + .key = { + [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}}, + [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}}, + [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}}, + [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}}, + } + }; + int err; + u8 name[NAME_MAX]; + + snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name); + rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key), + &rss_key_buf_pa, name); + if (!rss_key_buf_va) + return -ENOMEM; + + rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key)); + + err = enic_set_rss_key(enic, + rss_key_buf_pa, + sizeof(union vnic_rss_key)); + + enic_free_consistent(enic->pdev, sizeof(union vnic_rss_key), + rss_key_buf_va, rss_key_buf_pa); + + return err; +} + +static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) +{ + dma_addr_t rss_cpu_buf_pa; + union vnic_rss_cpu *rss_cpu_buf_va = NULL; + int i; + int err; + u8 name[NAME_MAX]; + + snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name); + rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu), + &rss_cpu_buf_pa, name); + if (!rss_cpu_buf_va) + return -ENOMEM; + + for (i = 0; i < (1 << rss_hash_bits); i++) + (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; + + err = enic_set_rss_cpu(enic, + rss_cpu_buf_pa, + sizeof(union vnic_rss_cpu)); + + enic_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), + rss_cpu_buf_va, rss_cpu_buf_pa); + + return err; +} + +static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, + u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable) +{ + const u8 tso_ipid_split_en = 0; + int err; + + /* Enable VLAN tag stripping */ + + err = enic_set_nic_cfg(enic, + rss_default_cpu, rss_hash_type, + rss_hash_bits, rss_base_cpu, + rss_enable, tso_ipid_split_en, + enic->ig_vlan_strip_en); + + return err; +} + +int enic_set_rss_nic_cfg(struct enic *enic) +{ + const u8 rss_default_cpu = 0; + const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | + NIC_CFG_RSS_HASH_TYPE_IPV6 | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; + const u8 rss_hash_bits = 7; + const u8 rss_base_cpu = 0; + u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); + + if (rss_enable) { + if (!enic_set_rsskey(enic)) { + if (enic_set_rsscpu(enic, rss_hash_bits)) { + rss_enable = 0; + dev_warning(enic, "RSS disabled, "\ + "Failed to set RSS cpu indirection table."); + } + } else { + rss_enable = 0; + dev_warning(enic, + "RSS disabled, Failed to set RSS key.\n"); + } + } + + return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, + rss_hash_bits, rss_base_cpu, rss_enable); +} + +int enic_setup_finish(struct enic *enic) +{ + int ret; + + ret = enic_set_rss_nic_cfg(enic); + if (ret) { + dev_err(enic, "Failed to config nic, aborting.\n"); + return -1; + } + + vnic_dev_add_addr(enic->vdev, enic->mac_addr); + + /* Default conf */ + vnic_dev_packet_filter(enic->vdev, + 1 /* directed */, + 1 /* multicast */, + 1 /* broadcast */, + 0 /* promisc */, + 1 /* allmulti */); + + enic->promisc = 0; + enic->allmulti = 1; + + return 0; +} + +void enic_add_packet_filter(struct enic *enic) +{ + /* Args -> directed, multicast, broadcast, promisc, allmulti */ + vnic_dev_packet_filter(enic->vdev, 1, 1, 1, + enic->promisc, enic->allmulti); +} + +int enic_get_link_status(struct enic *enic) +{ + return vnic_dev_link_status(enic->vdev); +} + +static void enic_dev_deinit(struct enic *enic) +{ + struct rte_eth_dev *eth_dev = enic->rte_dev; + + rte_free(eth_dev->data->mac_addrs); +} + + +int enic_set_vnic_res(struct enic *enic) +{ + struct rte_eth_dev *eth_dev = enic->rte_dev; + + if ((enic->rq_count < eth_dev->data->nb_rx_queues) || + (enic->wq_count < eth_dev->data->nb_tx_queues)) { + dev_err(dev, "Not enough resources configured, aborting\n"); + return -1; + } + + enic->rq_count = eth_dev->data->nb_rx_queues; + enic->wq_count = eth_dev->data->nb_tx_queues; + if (enic->cq_count < (enic->rq_count + enic->wq_count)) { + dev_err(dev, "Not enough resources configured, aborting\n"); + return -1; + } + + enic->cq_count = enic->rq_count + enic->wq_count; + return 0; +} + +static int enic_dev_init(struct enic *enic) +{ + int err; + struct rte_eth_dev *eth_dev = enic->rte_dev; + + vnic_dev_intr_coal_timer_info_default(enic->vdev); + + /* Get vNIC configuration + */ + err = enic_get_vnic_config(enic); + if (err) { + dev_err(dev, "Get vNIC configuration failed, aborting\n"); + return err; + } + + eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0); + if (!eth_dev->data->mac_addrs) { + dev_err(enic, "mac addr storage alloc failed, aborting.\n"); + return -1; + } + ether_addr_copy((struct ether_addr *) enic->mac_addr, + ð_dev->data->mac_addrs[0]); + + + /* Get available resource counts + */ + enic_get_res_counts(enic); + + vnic_dev_set_reset_flag(enic->vdev, 0); + + return 0; + +} + +int enic_probe(struct enic *enic) +{ + struct rte_pci_device *pdev = enic->pdev; + int err = -1; + + dev_debug(enic, " Initializing ENIC PMD version %s\n", DRV_VERSION); + + enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr; + enic->bar0.len = pdev->mem_resource[0].len; + + /* Register vNIC device */ + enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1); + if (!enic->vdev) { + dev_err(enic, "vNIC registration failed, aborting\n"); + goto err_out; + } + + vnic_register_cbacks(enic->vdev, + enic_alloc_consistent, + enic_free_consistent); + + /* Issue device open to get device in known state */ + err = enic_dev_open(enic); + if (err) { + dev_err(enic, "vNIC dev open failed, aborting\n"); + goto err_out_unregister; + } + + /* Set ingress vlan rewrite mode before vnic initialization */ + err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, + IG_VLAN_REWRITE_MODE_PASS_THRU); + if (err) { + dev_err(enic, + "Failed to set ingress vlan rewrite mode, aborting.\n"); + goto err_out_dev_close; + } + + /* Issue device init to initialize the vnic-to-switch link. + * We'll start with carrier off and wait for link UP + * notification later to turn on carrier. We don't need + * to wait here for the vnic-to-switch link initialization + * to complete; link UP notification is the indication that + * the process is complete. + */ + + err = vnic_dev_init(enic->vdev, 0); + if (err) { + dev_err(enic, "vNIC dev init failed, aborting\n"); + goto err_out_dev_close; + } + + err = enic_dev_init(enic); + if (err) { + dev_err(enic, "Device initialization failed, aborting\n"); + goto err_out_dev_close; + } + + return 0; + +err_out_dev_close: + vnic_dev_close(enic->vdev); +err_out_unregister: + vnic_dev_unregister(enic->vdev); +err_out: + return err; +} + +void enic_remove(struct enic *enic) +{ + enic_dev_deinit(enic); + vnic_dev_close(enic->vdev); + vnic_dev_unregister(enic->vdev); +} diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c new file mode 100644 index 00000000..ebe379dd --- /dev/null +++ b/drivers/net/enic/enic_res.c @@ -0,0 +1,218 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "enic_compat.h" +#include "rte_ethdev.h" +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "cq_enet_desc.h" +#include "vnic_resource.h" +#include "vnic_enet.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_nic.h" +#include "vnic_rss.h" +#include "enic_res.h" +#include "enic.h" + +int enic_get_vnic_config(struct enic *enic) +{ + struct vnic_enet_config *c = &enic->config; + int err; + + err = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr); + if (err) { + dev_err(enic_get_dev(enic), + "Error getting MAC addr, %d\n", err); + return err; + } + +#define GET_CONFIG(m) \ + do { \ + err = vnic_dev_spec(enic->vdev, \ + offsetof(struct vnic_enet_config, m), \ + sizeof(c->m), &c->m); \ + if (err) { \ + dev_err(enic_get_dev(enic), \ + "Error getting %s, %d\n", #m, err); \ + return err; \ + } \ + } while (0) + + GET_CONFIG(flags); + GET_CONFIG(wq_desc_count); + GET_CONFIG(rq_desc_count); + GET_CONFIG(mtu); + GET_CONFIG(intr_timer_type); + GET_CONFIG(intr_mode); + GET_CONFIG(intr_timer_usec); + GET_CONFIG(loop_tag); + GET_CONFIG(num_arfs); + + c->wq_desc_count = + min_t(u32, ENIC_MAX_WQ_DESCS, + max_t(u32, ENIC_MIN_WQ_DESCS, + c->wq_desc_count)); + c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ + + c->rq_desc_count = + min_t(u32, ENIC_MAX_RQ_DESCS, + max_t(u32, ENIC_MIN_RQ_DESCS, + c->rq_desc_count)); + c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ + + if (c->mtu == 0) + c->mtu = 1500; + c->mtu = min_t(u16, ENIC_MAX_MTU, + max_t(u16, ENIC_MIN_MTU, + c->mtu)); + + c->intr_timer_usec = min_t(u32, c->intr_timer_usec, + vnic_dev_get_intr_coal_timer_max(enic->vdev)); + + dev_info(enic_get_dev(enic), + "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " + "wq/rq %d/%d mtu %d\n", + enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2], + enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5], + c->wq_desc_count, c->rq_desc_count, c->mtu); + dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s " + "rss %s intr mode %s type %s timer %d usec " + "loopback tag 0x%04x\n", + ENIC_SETTING(enic, TXCSUM) ? "yes" : "no", + ENIC_SETTING(enic, RXCSUM) ? "yes" : "no", + ENIC_SETTING(enic, RSS) ? "yes" : "no", + c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" : + c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" : + c->intr_mode == VENET_INTR_MODE_ANY ? "any" : + "unknown", + c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" : + c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" : + "unknown", + c->intr_timer_usec, + c->loop_tag); + + return 0; +} + +int enic_add_vlan(struct enic *enic, u16 vlanid) +{ + u64 a0 = vlanid, a1 = 0; + int wait = 1000; + int err; + + err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait); + if (err) + dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err); + + return err; +} + +int enic_del_vlan(struct enic *enic, u16 vlanid) +{ + u64 a0 = vlanid, a1 = 0; + int wait = 1000; + int err; + + err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait); + if (err) + dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err); + + return err; +} + +int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, + u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, + u8 ig_vlan_strip_en) +{ + u64 a0, a1; + u32 nic_cfg; + int wait = 1000; + + vnic_set_nic_cfg(&nic_cfg, rss_default_cpu, + rss_hash_type, rss_hash_bits, rss_base_cpu, + rss_enable, tso_ipid_split_en, ig_vlan_strip_en); + + a0 = nic_cfg; + a1 = 0; + + return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait); +} + +int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len) +{ + u64 a0 = (u64)key_pa, a1 = len; + int wait = 1000; + + return vnic_dev_cmd(enic->vdev, CMD_RSS_KEY, &a0, &a1, wait); +} + +int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len) +{ + u64 a0 = (u64)cpu_pa, a1 = len; + int wait = 1000; + + return vnic_dev_cmd(enic->vdev, CMD_RSS_CPU, &a0, &a1, wait); +} + +void enic_free_vnic_resources(struct enic *enic) +{ + unsigned int i; + + for (i = 0; i < enic->wq_count; i++) + vnic_wq_free(&enic->wq[i]); + for (i = 0; i < enic->rq_count; i++) + vnic_rq_free(&enic->rq[i]); + for (i = 0; i < enic->cq_count; i++) + vnic_cq_free(&enic->cq[i]); + vnic_intr_free(&enic->intr); +} + +void enic_get_res_counts(struct enic *enic) +{ + enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); + enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); + enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); + enic->intr_count = vnic_dev_get_res_count(enic->vdev, + RES_TYPE_INTR_CTRL); + + dev_info(enic_get_dev(enic), + "vNIC resources avail: wq %d rq %d cq %d intr %d\n", + enic->wq_count, enic->rq_count, + enic->cq_count, enic->intr_count); +} diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h new file mode 100644 index 00000000..00fa71de --- /dev/null +++ b/drivers/net/enic/enic_res.h @@ -0,0 +1,154 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _ENIC_RES_H_ +#define _ENIC_RES_H_ + +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "vnic_wq.h" +#include "vnic_rq.h" + +#define ENIC_MIN_WQ_DESCS 64 +#define ENIC_MAX_WQ_DESCS 4096 +#define ENIC_MIN_RQ_DESCS 64 +#define ENIC_MAX_RQ_DESCS 4096 + +#define ENIC_MIN_MTU 68 +#define ENIC_MAX_MTU 9000 + +#define ENIC_MULTICAST_PERFECT_FILTERS 32 +#define ENIC_UNICAST_PERFECT_FILTERS 32 + +#define ENIC_NON_TSO_MAX_DESC 16 +#define ENIC_DEFAULT_RX_FREE_THRESH 32 +#define ENIC_TX_POST_THRESH (ENIC_MIN_WQ_DESCS / 2) + +#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) + +static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + unsigned int mss_or_csum_offset, unsigned int hdr_len, + int vlan_tag_insert, unsigned int vlan_tag, + int offload_mode, int cq_entry, int sop, int eop, int loopback) +{ + struct wq_enet_desc *desc = vnic_wq_next_desc(wq); + u8 desc_skip_cnt = 1; + u8 compressed_send = 0; + u64 wrid = 0; + + wq_enet_desc_enc(desc, + (u64)dma_addr | VNIC_PADDR_TARGET, + (u16)len, + (u16)mss_or_csum_offset, + (u16)hdr_len, (u8)offload_mode, + (u8)eop, (u8)cq_entry, + 0, /* fcoe_encap */ + (u8)vlan_tag_insert, + (u16)vlan_tag, + (u8)loopback); + + vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt, + (u8)cq_entry, compressed_send, wrid); +} + +static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + 0, 0, 0, 0, 0, + eop, 0 /* !SOP */, eop, loopback); +} + +static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, + dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert, + unsigned int vlan_tag, int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + 0, 0, vlan_tag_insert, vlan_tag, + WQ_ENET_OFFLOAD_MODE_CSUM, + eop, 1 /* SOP */, eop, loopback); +} + +static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + int ip_csum, int tcpudp_csum, int vlan_tag_insert, + unsigned int vlan_tag, int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0), + 0, vlan_tag_insert, vlan_tag, + WQ_ENET_OFFLOAD_MODE_CSUM, + eop, 1 /* SOP */, eop, loopback); +} + +static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + unsigned int csum_offset, unsigned int hdr_len, + int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + csum_offset, hdr_len, vlan_tag_insert, vlan_tag, + WQ_ENET_OFFLOAD_MODE_CSUM_L4, + eop, 1 /* SOP */, eop, loopback); +} + +static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, + unsigned int vlan_tag, int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + mss, hdr_len, vlan_tag_insert, vlan_tag, + WQ_ENET_OFFLOAD_MODE_TSO, + eop, 1 /* SOP */, eop, loopback); +} + +struct enic; + +int enic_get_vnic_config(struct enic *); +int enic_add_vlan(struct enic *enic, u16 vlanid); +int enic_del_vlan(struct enic *enic, u16 vlanid); +int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, + u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, + u8 ig_vlan_strip_en); +int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len); +int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len); +void enic_get_res_counts(struct enic *enic); +void enic_init_vnic_resources(struct enic *enic); +int enic_alloc_vnic_resources(struct enic *); +void enic_free_vnic_resources(struct enic *); + +#endif /* _ENIC_RES_H_ */ diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c new file mode 100644 index 00000000..232987a5 --- /dev/null +++ b/drivers/net/enic/enic_rx.c @@ -0,0 +1,359 @@ +/* + * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include <rte_mbuf.h> +#include <rte_ethdev.h> +#include <rte_prefetch.h> + +#include "enic_compat.h" +#include "rq_enet_desc.h" +#include "enic.h" + +#define RTE_PMD_USE_PREFETCH + +#ifdef RTE_PMD_USE_PREFETCH +/* + * Prefetch a cache line into all cache levels. + */ +#define rte_enic_prefetch(p) rte_prefetch0(p) +#else +#define rte_enic_prefetch(p) do {} while (0) +#endif + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while (0) +#endif + +static inline struct rte_mbuf * +rte_rxmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + m = __rte_mbuf_raw_alloc(mp); + __rte_mbuf_sanity_check_raw(m, 0); + return m; +} + +static inline uint16_t +enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd) +{ + return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK; +} + +static inline uint16_t +enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd) +{ + return(le16_to_cpu(crd->bytes_written_flags) & + ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK); +} + +static inline uint8_t +enic_cq_rx_desc_packet_error(uint16_t bwflags) +{ + return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) == + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED); +} + +static inline uint8_t +enic_cq_rx_desc_eop(uint16_t ciflags) +{ + return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP) + == CQ_ENET_RQ_DESC_FLAGS_EOP; +} + +static inline uint8_t +enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd) +{ + return ((le16_to_cpu(cqrd->q_number_rss_type_flags) & + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) == + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC); +} + +static inline uint8_t +enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd) +{ + return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) == + CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK); +} + +static inline uint8_t +enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd) +{ + return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) == + CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK); +} + +static inline uint8_t +enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd) +{ + return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >> + CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK); +} + +static inline uint32_t +enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd) +{ + return le32_to_cpu(cqrd->rss_hash); +} + +static inline uint16_t +enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd) +{ + return le16_to_cpu(cqrd->vlan); +} + +static inline uint16_t +enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + return le16_to_cpu(cqrd->bytes_written_flags) & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; +} + +static inline uint8_t +enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + uint16_t bwflags; + int ret = 0; + uint64_t pkt_err_flags = 0; + + bwflags = enic_cq_rx_desc_bwflags(cqrd); + if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) { + pkt_err_flags = PKT_RX_MAC_ERR; + ret = 1; + } + *pkt_err_flags_out = pkt_err_flags; + return ret; +} + +/* + * Lookup table to translate RX CQ flags to mbuf flags. + */ +static inline uint32_t +enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + uint8_t cqrd_flags = cqrd->flags; + static const uint32_t cq_type_table[128] __rte_cache_aligned = { + [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, + [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 + | RTE_PTYPE_L4_UDP, + [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 + | RTE_PTYPE_L4_TCP, + [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 + | RTE_PTYPE_L4_FRAG, + [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, + [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 + | RTE_PTYPE_L4_UDP, + [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 + | RTE_PTYPE_L4_TCP, + [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 + | RTE_PTYPE_L4_FRAG, + /* All others reserved */ + }; + cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT + | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 + | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; + return cq_type_table[cqrd_flags]; +} + +static inline void +enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + uint16_t ciflags, bwflags, pkt_flags = 0; + ciflags = enic_cq_rx_desc_ciflags(cqrd); + bwflags = enic_cq_rx_desc_bwflags(cqrd); + + mbuf->ol_flags = 0; + + /* flags are meaningless if !EOP */ + if (unlikely(!enic_cq_rx_desc_eop(ciflags))) + goto mbuf_flags_done; + + /* VLAN stripping */ + if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { + pkt_flags |= PKT_RX_VLAN_PKT; + mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd); + } else { + mbuf->vlan_tci = 0; + } + + /* RSS flag */ + if (enic_cq_rx_desc_rss_type(cqrd)) { + pkt_flags |= PKT_RX_RSS_HASH; + mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); + } + + /* checksum flags */ + if (!enic_cq_rx_desc_csum_not_calc(cqrd) && + (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) { + if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd))) + pkt_flags |= PKT_RX_IP_CKSUM_BAD; + if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) { + if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))) + pkt_flags |= PKT_RX_L4_CKSUM_BAD; + } + } + + mbuf_flags_done: + mbuf->ol_flags = pkt_flags; +} + +static inline uint32_t +enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1) +{ + uint32_t d = i0 + i1; + ASSERT(i0 < n_descriptors); + ASSERT(i1 < n_descriptors); + d -= (d >= n_descriptors) ? n_descriptors : 0; + return d; +} + + +uint16_t +enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct vnic_rq *rq = rx_queue; + struct enic *enic = vnic_dev_priv(rq->vdev); + unsigned int rx_id; + struct rte_mbuf *nmb, *rxmb; + uint16_t nb_rx = 0; + uint16_t nb_hold; + struct vnic_cq *cq; + volatile struct cq_desc *cqd_ptr; + uint8_t color; + + cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */ + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; + + nb_hold = rq->rx_nb_hold; /* mbufs held by software */ + + while (nb_rx < nb_pkts) { + volatile struct rq_enet_desc *rqd_ptr; + dma_addr_t dma_addr; + struct cq_desc cqd; + uint64_t ol_err_flags; + uint8_t packet_error; + + /* Check for pkts available */ + color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) + & CQ_DESC_COLOR_MASK; + if (color == cq->last_color) + break; + + /* Get the cq descriptor and rq pointer */ + cqd = *cqd_ptr; + rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id; + + /* allocate a new mbuf */ + nmb = rte_rxmbuf_alloc(rq->mp); + if (nmb == NULL) { + dev_err(enic, "RX mbuf alloc failed port=%u qid=%u", + enic->port_id, (unsigned)rq->index); + rte_eth_devices[enic->port_id]. + data->rx_mbuf_alloc_failed++; + break; + } + + /* A packet error means descriptor and data are untrusted */ + packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags); + + /* Get the mbuf to return and replace with one just allocated */ + rxmb = rq->mbuf_ring[rx_id]; + rq->mbuf_ring[rx_id] = nmb; + + /* Increment cqd, rqd, mbuf_table index */ + rx_id++; + if (unlikely(rx_id == rq->ring.desc_count)) { + rx_id = 0; + cq->last_color = cq->last_color ? 0 : 1; + } + + /* Prefetch next mbuf & desc while processing current one */ + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; + rte_enic_prefetch(cqd_ptr); + rte_enic_prefetch(rq->mbuf_ring[rx_id]); + rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs) + + rx_id); + + /* Push descriptor for newly allocated mbuf */ + dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off); + rqd_ptr->address = rte_cpu_to_le_64(dma_addr); + rqd_ptr->length_type = cpu_to_le16(nmb->buf_len); + + /* Fill in the rest of the mbuf */ + rxmb->data_off = RTE_PKTMBUF_HEADROOM; + rxmb->nb_segs = 1; + rxmb->next = NULL; + rxmb->port = enic->port_id; + if (!packet_error) { + rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd); + rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); + enic_cq_rx_to_pkt_flags(&cqd, rxmb); + } else { + rxmb->pkt_len = 0; + rxmb->packet_type = 0; + rxmb->ol_flags = 0; + } + rxmb->data_len = rxmb->pkt_len; + + /* prefetch mbuf data for caller */ + rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr, + RTE_PKTMBUF_HEADROOM)); + + /* store the mbuf address into the next entry of the array */ + rx_pkts[nb_rx++] = rxmb; + } + + nb_hold += nb_rx; + cq->to_clean = rx_id; + + if (nb_hold > rq->rx_free_thresh) { + rq->posted_index = enic_ring_add(rq->ring.desc_count, + rq->posted_index, nb_hold); + nb_hold = 0; + rte_mb(); + iowrite32(rq->posted_index, &rq->ctrl->posted_index); + } + + rq->rx_nb_hold = nb_hold; + + return nb_rx; +} diff --git a/drivers/net/enic/rte_pmd_enic_version.map b/drivers/net/enic/rte_pmd_enic_version.map new file mode 100644 index 00000000..ef353984 --- /dev/null +++ b/drivers/net/enic/rte_pmd_enic_version.map @@ -0,0 +1,4 @@ +DPDK_2.0 { + + local: *; +}; diff --git a/drivers/net/fm10k/Makefile b/drivers/net/fm10k/Makefile new file mode 100644 index 00000000..602a2d2d --- /dev/null +++ b/drivers/net/fm10k/Makefile @@ -0,0 +1,103 @@ +# BSD LICENSE +# +# Copyright(c) 2013-2015 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_fm10k.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_fm10k_version.map + +LIBABIVER := 1 + +ifeq ($(CC), icc) +# +# CFLAGS for icc +# +CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 + +else ifeq ($(CC), clang) +# +## CFLAGS for clang +# +CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args +CFLAGS_BASE_DRIVER += -Wno-unused-variable +CFLAGS_BASE_DRIVER += -Wno-missing-field-initializers + +else +# +# CFLAGS for gcc +# +CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args +CFLAGS_BASE_DRIVER += -Wno-unused-variable +CFLAGS_BASE_DRIVER += -Wno-missing-field-initializers + +ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1) +CFLAGS += -Wno-deprecated +CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable +endif +endif + +# +# Add extra flags for base driver source files to disable warnings in them +# +BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))) +$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +VPATH += $(SRCDIR)/base + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_rxtx.c + +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_pf.c +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_tlv.c +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_common.c +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_mbx.c +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_api.c +SRCS-$(CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR) += fm10k_rxtx_vec.c + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_net + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/fm10k/base/fm10k_api.c b/drivers/net/fm10k/base/fm10k_api.c new file mode 100644 index 00000000..c49d20df --- /dev/null +++ b/drivers/net/fm10k/base/fm10k_api.c @@ -0,0 +1,363 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "fm10k_api.h" +#include "fm10k_common.h" + +/** + * fm10k_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +s32 fm10k_set_mac_type(struct fm10k_hw *hw) +{ + s32 ret_val = FM10K_SUCCESS; + + DEBUGFUNC("fm10k_set_mac_type"); + + if (hw->vendor_id != FM10K_INTEL_VENDOR_ID) { + ERROR_REPORT2(FM10K_ERROR_UNSUPPORTED, + "Unsupported vendor id: %x\n", hw->vendor_id); + return FM10K_ERR_DEVICE_NOT_SUPPORTED; + } + + switch (hw->device_id) { + case FM10K_DEV_ID_PF: +#ifdef BOULDER_RAPIDS_HW + case FM10K_DEV_ID_SDI_FM10420_QDA2: +#endif /* BOULDER_RAPIDS_HW */ +#ifdef ATWOOD_CHANNEL_HW + case FM10K_DEV_ID_SDI_FM10420_DA2: +#endif /* ATWOOD_CHANNEL_HW */ + hw->mac.type = fm10k_mac_pf; + break; + case FM10K_DEV_ID_VF: + hw->mac.type = fm10k_mac_vf; + break; + default: + ret_val = FM10K_ERR_DEVICE_NOT_SUPPORTED; + ERROR_REPORT2(FM10K_ERROR_UNSUPPORTED, + "Unsupported device id: %x\n", + hw->device_id); + break; + } + + DEBUGOUT2("fm10k_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, ret_val); + + return ret_val; +} + +/** + * fm10k_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This will assign function pointers and assign the MAC type and PHY code. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The fm10k_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +s32 fm10k_init_shared_code(struct fm10k_hw *hw) +{ + s32 status; + + DEBUGFUNC("fm10k_init_shared_code"); + + /* Set the mac type */ + fm10k_set_mac_type(hw); + + switch (hw->mac.type) { + case fm10k_mac_pf: + status = fm10k_init_ops_pf(hw); + break; + case fm10k_mac_vf: + status = fm10k_init_ops_vf(hw); + break; + default: + status = FM10K_ERR_DEVICE_NOT_SUPPORTED; + break; + } + + return status; +} + +#define fm10k_call_func(hw, func, params, error) \ + ((func) ? (func params) : (error)) + +/** + * fm10k_reset_hw - Reset the hardware to known good state + * @hw: pointer to hardware structure + * + * This function should return the hardware to a state similar to the + * one it is in after being powered on. + **/ +s32 fm10k_reset_hw(struct fm10k_hw *hw) +{ + return fm10k_call_func(hw, hw->mac.ops.reset_hw, (hw), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_init_hw - Initialize the hardware + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting and then starting the hardware + **/ +s32 fm10k_init_hw(struct fm10k_hw *hw) +{ + return fm10k_call_func(hw, hw->mac.ops.init_hw, (hw), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_stop_hw - Prepares hardware to shutdown Rx/Tx + * @hw: pointer to hardware structure + * + * Disables Rx/Tx queues and disables the DMA engine. + **/ +s32 fm10k_stop_hw(struct fm10k_hw *hw) +{ + return fm10k_call_func(hw, hw->mac.ops.stop_hw, (hw), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_start_hw - Prepares hardware for Rx/Tx + * @hw: pointer to hardware structure + * + * This function sets the flags indicating that the hardware is ready to + * begin operation. + **/ +s32 fm10k_start_hw(struct fm10k_hw *hw) +{ + return fm10k_call_func(hw, hw->mac.ops.start_hw, (hw), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_get_bus_info - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets the PCI bus info (speed, width, type) within the fm10k_hw structure + **/ +s32 fm10k_get_bus_info(struct fm10k_hw *hw) +{ + return fm10k_call_func(hw, hw->mac.ops.get_bus_info, (hw), + FM10K_NOT_IMPLEMENTED); +} + +#ifndef NO_IS_SLOT_APPROPRIATE_CHECK +/** + * fm10k_is_slot_appropriate - Indicate appropriate slot for this SKU + * @hw: pointer to hardware structure + * + * Looks at the PCIe bus info to confirm whether or not this slot can support + * the necessary bandwidth for this device. + **/ +bool fm10k_is_slot_appropriate(struct fm10k_hw *hw) +{ + if (hw->mac.ops.is_slot_appropriate) + return hw->mac.ops.is_slot_appropriate(hw); + return true; +} + +#endif +/** + * fm10k_update_vlan - Clear VLAN ID to VLAN filter table + * @hw: pointer to hardware structure + * @vid: VLAN ID to add to table + * @idx: Index indicating VF ID or PF ID in table + * @set: Indicates if this is a set or clear operation + * + * This function adds or removes the corresponding VLAN ID from the VLAN + * filter table for the corresponding function. + **/ +s32 fm10k_update_vlan(struct fm10k_hw *hw, u32 vid, u8 idx, bool set) +{ + return fm10k_call_func(hw, hw->mac.ops.update_vlan, (hw, vid, idx, set), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_read_mac_addr - Reads MAC address + * @hw: pointer to hardware structure + * + * Reads the MAC address out of the interface and stores it in the HW + * structures. + **/ +s32 fm10k_read_mac_addr(struct fm10k_hw *hw) +{ + return fm10k_call_func(hw, hw->mac.ops.read_mac_addr, (hw), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_update_hw_stats - Update hw statistics + * @hw: pointer to hardware structure + * + * This function updates statistics that are related to hardware. + * */ +void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) +{ + if (hw->mac.ops.update_hw_stats) + hw->mac.ops.update_hw_stats(hw, stats); +} + +/** + * fm10k_rebind_hw_stats - Reset base for hw statistics + * @hw: pointer to hardware structure + * + * This function resets the base for statistics that are related to hardware. + * */ +void fm10k_rebind_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) +{ + if (hw->mac.ops.rebind_hw_stats) + hw->mac.ops.rebind_hw_stats(hw, stats); +} + +/** + * fm10k_configure_dglort_map - Configures GLORT entry and queues + * @hw: pointer to hardware structure + * @dglort: pointer to dglort configuration structure + * + * Reads the configuration structure contained in dglort_cfg and uses + * that information to then populate a DGLORTMAP/DEC entry and the queues + * to which it has been assigned. + **/ +s32 fm10k_configure_dglort_map(struct fm10k_hw *hw, + struct fm10k_dglort_cfg *dglort) +{ + return fm10k_call_func(hw, hw->mac.ops.configure_dglort_map, + (hw, dglort), FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_set_dma_mask - Configures PhyAddrSpace to limit DMA to system + * @hw: pointer to hardware structure + * @dma_mask: 64 bit DMA mask required for platform + * + * This function configures the endpoint to limit the access to memory + * beyond what is physically in the system. + **/ +void fm10k_set_dma_mask(struct fm10k_hw *hw, u64 dma_mask) +{ + if (hw->mac.ops.set_dma_mask) + hw->mac.ops.set_dma_mask(hw, dma_mask); +} + +/** + * fm10k_get_fault - Record a fault in one of the interface units + * @hw: pointer to hardware structure + * @type: pointer to fault type register offset + * @fault: pointer to memory location to record the fault + * + * Record the fault register contents to the fault data structure and + * clear the entry from the register. + * + * Returns ERR_PARAM if invalid register is specified or no error is present. + **/ +s32 fm10k_get_fault(struct fm10k_hw *hw, int type, struct fm10k_fault *fault) +{ + return fm10k_call_func(hw, hw->mac.ops.get_fault, (hw, type, fault), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_update_uc_addr - Update device unicast address + * @hw: pointer to the HW structure + * @lport: logical port ID to update - unused + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * @flags: flags field to indicate add and secure - unused + * + * This function is used to add or remove unicast MAC addresses + **/ +s32 fm10k_update_uc_addr(struct fm10k_hw *hw, u16 lport, + const u8 *mac, u16 vid, bool add, u8 flags) +{ + return fm10k_call_func(hw, hw->mac.ops.update_uc_addr, + (hw, lport, mac, vid, add, flags), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_update_mc_addr - Update device multicast address + * @hw: pointer to the HW structure + * @lport: logical port ID to update - unused + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * + * This function is used to add or remove multicast MAC addresses + **/ +s32 fm10k_update_mc_addr(struct fm10k_hw *hw, u16 lport, + const u8 *mac, u16 vid, bool add) +{ + return fm10k_call_func(hw, hw->mac.ops.update_mc_addr, + (hw, lport, mac, vid, add), + FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_adjust_systime - Adjust systime frequency + * @hw: pointer to hardware structure + * @ppb: adjustment rate in parts per billion + * + * This function is meant to update the frequency of the clock represented + * by the SYSTIME register. + **/ +s32 fm10k_adjust_systime(struct fm10k_hw *hw, s32 ppb) +{ + return fm10k_call_func(hw, hw->mac.ops.adjust_systime, + (hw, ppb), FM10K_NOT_IMPLEMENTED); +} + +/** + * fm10k_notify_offset - Notify switch of change in PTP offset + * @hw: pointer to hardware structure + * @offset: 64bit unsigned offset from hardware SYSTIME value + * + * This function is meant to notify switch of change in the PTP offset for + * the hardware SYSTIME registers. + **/ +s32 fm10k_notify_offset(struct fm10k_hw *hw, u64 offset) +{ + return fm10k_call_func(hw, hw->mac.ops.notify_offset, + (hw, offset), FM10K_NOT_IMPLEMENTED); +} diff --git a/drivers/net/fm10k/base/fm10k_api.h b/drivers/net/fm10k/base/fm10k_api.h new file mode 100644 index 00000000..2ab31496 --- /dev/null +++ b/drivers/net/fm10k/base/fm10k_api.h @@ -0,0 +1,64 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _FM10K_API_H_ +#define _FM10K_API_H_ + +#include "fm10k_pf.h" +#include "fm10k_vf.h" + +s32 fm10k_set_mac_type(struct fm10k_hw *hw); +s32 fm10k_reset_hw(struct fm10k_hw *hw); +s32 fm10k_init_hw(struct fm10k_hw *hw); +s32 fm10k_stop_hw(struct fm10k_hw *hw); +s32 fm10k_start_hw(struct fm10k_hw *hw); +s32 fm10k_init_shared_code(struct fm10k_hw *hw); +s32 fm10k_get_bus_info(struct fm10k_hw *hw); +#ifndef NO_IS_SLOT_APPROPRIATE_CHECK +bool fm10k_is_slot_appropriate(struct fm10k_hw *hw); +#endif +s32 fm10k_update_vlan(struct fm10k_hw *hw, u32 vid, u8 idx, bool set); +s32 fm10k_read_mac_addr(struct fm10k_hw *hw); +void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats); +void fm10k_rebind_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats); +s32 fm10k_configure_dglort_map(struct fm10k_hw *hw, + struct fm10k_dglort_cfg *dglort); +void fm10k_set_dma_mask(struct fm10k_hw *hw, u64 dma_mask); +s32 fm10k_get_fault(struct fm10k_hw *hw, int type, struct fm10k_fault *fault); +s32 fm10k_update_uc_addr(struct fm10k_hw *hw, u16 lport, + const u8 *mac, u16 vid, bool add, u8 flags); +s32 fm10k_update_mc_addr(struct fm10k_hw *hw, u16 lport, + const u8 *mac, u16 vid, bool add); +s32 fm10k_adjust_systime(struct fm10k_hw *hw, s32 ppb); +s32 fm10k_notify_offset(struct fm10k_hw *hw, u64 offset); +#endif /* _FM10K_API_H_ */ diff --git a/drivers/net/fm10k/base/fm10k_common.c b/drivers/net/fm10k/base/fm10k_common.c new file mode 100644 index 00000000..a90d2f0b --- /dev/null +++ b/drivers/net/fm10k/base/fm10k_common.c @@ -0,0 +1,572 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "fm10k_common.h" + +/** + * fm10k_get_bus_info_generic - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Gets the PCI bus info (speed, width, type) then calls helper function to + * store this data within the fm10k_hw structure. + **/ +STATIC s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw) +{ + u16 link_cap, link_status, device_cap, device_control; + + DEBUGFUNC("fm10k_get_bus_info_generic"); + + /* Get the maximum link width and speed from PCIe config space */ + link_cap = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_LINK_CAP); + + switch (link_cap & FM10K_PCIE_LINK_WIDTH) { + case FM10K_PCIE_LINK_WIDTH_1: + hw->bus_caps.width = fm10k_bus_width_pcie_x1; + break; + case FM10K_PCIE_LINK_WIDTH_2: + hw->bus_caps.width = fm10k_bus_width_pcie_x2; + break; + case FM10K_PCIE_LINK_WIDTH_4: + hw->bus_caps.width = fm10k_bus_width_pcie_x4; + break; + case FM10K_PCIE_LINK_WIDTH_8: + hw->bus_caps.width = fm10k_bus_width_pcie_x8; + break; + default: + hw->bus_caps.width = fm10k_bus_width_unknown; + break; + } + + switch (link_cap & FM10K_PCIE_LINK_SPEED) { + case FM10K_PCIE_LINK_SPEED_2500: + hw->bus_caps.speed = fm10k_bus_speed_2500; + break; + case FM10K_PCIE_LINK_SPEED_5000: + hw->bus_caps.speed = fm10k_bus_speed_5000; + break; + case FM10K_PCIE_LINK_SPEED_8000: + hw->bus_caps.speed = fm10k_bus_speed_8000; + break; + default: + hw->bus_caps.speed = fm10k_bus_speed_unknown; + break; + } + + /* Get the PCIe maximum payload size for the PCIe function */ + device_cap = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_DEV_CAP); + + switch (device_cap & FM10K_PCIE_DEV_CAP_PAYLOAD) { + case FM10K_PCIE_DEV_CAP_PAYLOAD_128: + hw->bus_caps.payload = fm10k_bus_payload_128; + break; + case FM10K_PCIE_DEV_CAP_PAYLOAD_256: + hw->bus_caps.payload = fm10k_bus_payload_256; + break; + case FM10K_PCIE_DEV_CAP_PAYLOAD_512: + hw->bus_caps.payload = fm10k_bus_payload_512; + break; + default: + hw->bus_caps.payload = fm10k_bus_payload_unknown; + break; + } + + /* Get the negotiated link width and speed from PCIe config space */ + link_status = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_LINK_STATUS); + + switch (link_status & FM10K_PCIE_LINK_WIDTH) { + case FM10K_PCIE_LINK_WIDTH_1: + hw->bus.width = fm10k_bus_width_pcie_x1; + break; + case FM10K_PCIE_LINK_WIDTH_2: + hw->bus.width = fm10k_bus_width_pcie_x2; + break; + case FM10K_PCIE_LINK_WIDTH_4: + hw->bus.width = fm10k_bus_width_pcie_x4; + break; + case FM10K_PCIE_LINK_WIDTH_8: + hw->bus.width = fm10k_bus_width_pcie_x8; + break; + default: + hw->bus.width = fm10k_bus_width_unknown; + break; + } + + switch (link_status & FM10K_PCIE_LINK_SPEED) { + case FM10K_PCIE_LINK_SPEED_2500: + hw->bus.speed = fm10k_bus_speed_2500; + break; + case FM10K_PCIE_LINK_SPEED_5000: + hw->bus.speed = fm10k_bus_speed_5000; + break; + case FM10K_PCIE_LINK_SPEED_8000: + hw->bus.speed = fm10k_bus_speed_8000; + break; + default: + hw->bus.speed = fm10k_bus_speed_unknown; + break; + } + + /* Get the negotiated PCIe maximum payload size for the PCIe function */ + device_control = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_DEV_CTRL); + + switch (device_control & FM10K_PCIE_DEV_CTRL_PAYLOAD) { + case FM10K_PCIE_DEV_CTRL_PAYLOAD_128: + hw->bus.payload = fm10k_bus_payload_128; + break; + case FM10K_PCIE_DEV_CTRL_PAYLOAD_256: + hw->bus.payload = fm10k_bus_payload_256; + break; + case FM10K_PCIE_DEV_CTRL_PAYLOAD_512: + hw->bus.payload = fm10k_bus_payload_512; + break; + default: + hw->bus.payload = fm10k_bus_payload_unknown; + break; + } + + return FM10K_SUCCESS; +} + +u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw) +{ + u16 msix_count; + + DEBUGFUNC("fm10k_get_pcie_msix_count_generic"); + + /* read in value from MSI-X capability register */ + msix_count = FM10K_READ_PCI_WORD(hw, FM10K_PCI_MSIX_MSG_CTRL); + msix_count &= FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > FM10K_MAX_MSIX_VECTORS) + msix_count = FM10K_MAX_MSIX_VECTORS; + + return msix_count; +} + +/** + * fm10k_init_ops_generic - Inits function ptrs + * @hw: pointer to the hardware structure + * + * Initialize the function pointers. + **/ +s32 fm10k_init_ops_generic(struct fm10k_hw *hw) +{ + struct fm10k_mac_info *mac = &hw->mac; + + DEBUGFUNC("fm10k_init_ops_generic"); + + /* MAC */ + mac->ops.get_bus_info = &fm10k_get_bus_info_generic; + + /* initialize GLORT state to avoid any false hits */ + mac->dglort_map = FM10K_DGLORTMAP_NONE; + + return FM10K_SUCCESS; +} + +/** + * fm10k_start_hw_generic - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * This function sets the Tx ready flag to indicate that the Tx path has + * been initialized. + **/ +s32 fm10k_start_hw_generic(struct fm10k_hw *hw) +{ + DEBUGFUNC("fm10k_start_hw_generic"); + + /* set flag indicating we are beginning Tx */ + hw->mac.tx_ready = true; + + return FM10K_SUCCESS; +} + +/** + * fm10k_disable_queues_generic - Stop Tx/Rx queues + * @hw: pointer to hardware structure + * @q_cnt: number of queues to be disabled + * + **/ +s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt) +{ + u32 reg; + u16 i, time; + + DEBUGFUNC("fm10k_disable_queues_generic"); + + /* clear tx_ready to prevent any false hits for reset */ + hw->mac.tx_ready = false; + + /* clear the enable bit for all rings */ + for (i = 0; i < q_cnt; i++) { + reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i)); + FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), + reg & ~FM10K_TXDCTL_ENABLE); + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i)); + FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), + reg & ~FM10K_RXQCTL_ENABLE); + } + + FM10K_WRITE_FLUSH(hw); + usec_delay(1); + + /* loop through all queues to verify that they are all disabled */ + for (i = 0, time = FM10K_QUEUE_DISABLE_TIMEOUT; time;) { + /* if we are at end of rings all rings are disabled */ + if (i == q_cnt) + return FM10K_SUCCESS; + + /* if queue enables cleared, then move to next ring pair */ + reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i)); + if (!~reg || !(reg & FM10K_TXDCTL_ENABLE)) { + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i)); + if (!~reg || !(reg & FM10K_RXQCTL_ENABLE)) { + i++; + continue; + } + } + + /* decrement time and wait 1 usec */ + time--; + if (time) + usec_delay(1); + } + + return FM10K_ERR_REQUESTS_PENDING; +} + +/** + * fm10k_stop_hw_generic - Stop Tx/Rx units + * @hw: pointer to hardware structure + * + **/ +s32 fm10k_stop_hw_generic(struct fm10k_hw *hw) +{ + DEBUGFUNC("fm10k_stop_hw_generic"); + + return fm10k_disable_queues_generic(hw, hw->mac.max_queues); +} + +/** + * fm10k_read_hw_stats_32b - Reads value of 32-bit registers + * @hw: pointer to the hardware structure + * @addr: address of register containing a 32-bit value + * + * Function reads the content of the register and returns the delta + * between the base and the current value. + * **/ +u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr, + struct fm10k_hw_stat *stat) +{ + u32 delta = FM10K_READ_REG(hw, addr) - stat->base_l; + + DEBUGFUNC("fm10k_read_hw_stats_32b"); + + if (FM10K_REMOVED(hw->hw_addr)) + stat->base_h = 0; + + return delta; +} + +/** + * fm10k_read_hw_stats_48b - Reads value of 48-bit registers + * @hw: pointer to the hardware structure + * @addr: address of register containing the lower 32-bit value + * + * Function reads the content of 2 registers, combined to represent a 48-bit + * statistical value. Extra processing is required to handle overflowing. + * Finally, a delta value is returned representing the difference between the + * values stored in registers and values stored in the statistic counters. + * **/ +STATIC u64 fm10k_read_hw_stats_48b(struct fm10k_hw *hw, u32 addr, + struct fm10k_hw_stat *stat) +{ + u32 count_l; + u32 count_h; + u32 count_tmp; + u64 delta; + + DEBUGFUNC("fm10k_read_hw_stats_48b"); + + count_h = FM10K_READ_REG(hw, addr + 1); + + /* Check for overflow */ + do { + count_tmp = count_h; + count_l = FM10K_READ_REG(hw, addr); + count_h = FM10K_READ_REG(hw, addr + 1); + } while (count_h != count_tmp); + + delta = ((u64)(count_h - stat->base_h) << 32) + count_l; + delta -= stat->base_l; + + return delta & FM10K_48_BIT_MASK; +} + +/** + * fm10k_update_hw_base_48b - Updates 48-bit statistic base value + * @stat: pointer to the hardware statistic structure + * @delta: value to be updated into the hardware statistic structure + * + * Function receives a value and determines if an update is required based on + * a delta calculation. Only the base value will be updated. + **/ +STATIC void fm10k_update_hw_base_48b(struct fm10k_hw_stat *stat, u64 delta) +{ + DEBUGFUNC("fm10k_update_hw_base_48b"); + + if (!delta) + return; + + /* update lower 32 bits */ + delta += stat->base_l; + stat->base_l = (u32)delta; + + /* update upper 32 bits */ + stat->base_h += (u32)(delta >> 32); +} + +/** + * fm10k_update_hw_stats_tx_q - Updates TX queue statistics counters + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * + * Function updates the TX queue statistics counters that are related to the + * hardware. + **/ +STATIC void fm10k_update_hw_stats_tx_q(struct fm10k_hw *hw, + struct fm10k_hw_stats_q *q, + u32 idx) +{ + u32 id_tx, id_tx_prev, tx_packets; + u64 tx_bytes = 0; + + DEBUGFUNC("fm10k_update_hw_stats_tx_q"); + + /* Retrieve TX Owner Data */ + id_tx = FM10K_READ_REG(hw, FM10K_TXQCTL(idx)); + + /* Process TX Ring */ + do { + tx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPTC(idx), + &q->tx_packets); + + if (tx_packets) + tx_bytes = fm10k_read_hw_stats_48b(hw, + FM10K_QBTC_L(idx), + &q->tx_bytes); + + /* Re-Check Owner Data */ + id_tx_prev = id_tx; + id_tx = FM10K_READ_REG(hw, FM10K_TXQCTL(idx)); + } while ((id_tx ^ id_tx_prev) & FM10K_TXQCTL_ID_MASK); + + /* drop non-ID bits and set VALID ID bit */ + id_tx &= FM10K_TXQCTL_ID_MASK; + id_tx |= FM10K_STAT_VALID; + + /* update packet counts */ + if (q->tx_stats_idx == id_tx) { + q->tx_packets.count += tx_packets; + q->tx_bytes.count += tx_bytes; + } + + /* update bases and record ID */ + fm10k_update_hw_base_32b(&q->tx_packets, tx_packets); + fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes); + + q->tx_stats_idx = id_tx; +} + +/** + * fm10k_update_hw_stats_rx_q - Updates RX queue statistics counters + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * + * Function updates the RX queue statistics counters that are related to the + * hardware. + **/ +STATIC void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw, + struct fm10k_hw_stats_q *q, + u32 idx) +{ + u32 id_rx, id_rx_prev, rx_packets, rx_drops; + u64 rx_bytes = 0; + + DEBUGFUNC("fm10k_update_hw_stats_rx_q"); + + /* Retrieve RX Owner Data */ + id_rx = FM10K_READ_REG(hw, FM10K_RXQCTL(idx)); + + /* Process RX Ring */ + do { + rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx), + &q->rx_drops); + + rx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPRC(idx), + &q->rx_packets); + + if (rx_packets) + rx_bytes = fm10k_read_hw_stats_48b(hw, + FM10K_QBRC_L(idx), + &q->rx_bytes); + + /* Re-Check Owner Data */ + id_rx_prev = id_rx; + id_rx = FM10K_READ_REG(hw, FM10K_RXQCTL(idx)); + } while ((id_rx ^ id_rx_prev) & FM10K_RXQCTL_ID_MASK); + + /* drop non-ID bits and set VALID ID bit */ + id_rx &= FM10K_RXQCTL_ID_MASK; + id_rx |= FM10K_STAT_VALID; + + /* update packet counts */ + if (q->rx_stats_idx == id_rx) { + q->rx_drops.count += rx_drops; + q->rx_packets.count += rx_packets; + q->rx_bytes.count += rx_bytes; + } + + /* update bases and record ID */ + fm10k_update_hw_base_32b(&q->rx_drops, rx_drops); + fm10k_update_hw_base_32b(&q->rx_packets, rx_packets); + fm10k_update_hw_base_48b(&q->rx_bytes, rx_bytes); + + q->rx_stats_idx = id_rx; +} + +/** + * fm10k_update_hw_stats_q - Updates queue statistics counters + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * @count: number of queues to iterate over + * + * Function updates the queue statistics counters that are related to the + * hardware. + **/ +void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, + u32 idx, u32 count) +{ + u32 i; + + DEBUGFUNC("fm10k_update_hw_stats_q"); + + for (i = 0; i < count; i++, idx++, q++) { + fm10k_update_hw_stats_tx_q(hw, q, idx); + fm10k_update_hw_stats_rx_q(hw, q, idx); + } +} + +/** + * fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * @count: number of queues to iterate over + * + * Function invalidates the index values for the queues so any updates that + * may have happened are ignored and the base for the queue stats is reset. + **/ +void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count) +{ + u32 i; + + for (i = 0; i < count; i++, idx++, q++) { + q->rx_stats_idx = 0; + q->tx_stats_idx = 0; + } +} + +/** + * fm10k_get_host_state_generic - Returns the state of the host + * @hw: pointer to hardware structure + * @host_ready: pointer to boolean value that will record host state + * + * This function will check the health of the mailbox and Tx queue 0 + * in order to determine if we should report that the link is up or not. + **/ +s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + struct fm10k_mac_info *mac = &hw->mac; + s32 ret_val = FM10K_SUCCESS; + u32 txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(0)); + + DEBUGFUNC("fm10k_get_host_state_generic"); + + /* process upstream mailbox in case interrupts were disabled */ + mbx->ops.process(hw, mbx); + + /* If Tx is no longer enabled link should come down */ + if (!(~txdctl) || !(txdctl & FM10K_TXDCTL_ENABLE)) + mac->get_host_state = true; + + /* exit if not checking for link, or link cannot be changed */ + if (!mac->get_host_state || !(~txdctl)) + goto out; + + /* if we somehow dropped the Tx enable we should reset */ + if (hw->mac.tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) { + ret_val = FM10K_ERR_RESET_REQUESTED; + goto out; + } + + /* if Mailbox timed out we should request reset */ + if (!mbx->timeout) { + ret_val = FM10K_ERR_RESET_REQUESTED; + goto out; + } + + /* verify Mailbox is still valid */ + if (!mbx->ops.tx_ready(mbx, FM10K_VFMBX_MSG_MTU)) + goto out; + + /* interface cannot receive traffic without logical ports */ + if (mac->dglort_map == FM10K_DGLORTMAP_NONE) + goto out; + + /* if we passed all the tests above then the switch is ready and we no + * longer need to check for link + */ + mac->get_host_state = false; + +out: + *host_ready = !mac->get_host_state; + return ret_val; +} diff --git a/drivers/net/fm10k/base/fm10k_common.h b/drivers/net/fm10k/base/fm10k_common.h new file mode 100644 index 00000000..45fbbc0b --- /dev/null +++ b/drivers/net/fm10k/base/fm10k_common.h @@ -0,0 +1,52 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _FM10K_COMMON_H_ +#define _FM10K_COMMON_H_ + +#include "fm10k_type.h" + +u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw); +s32 fm10k_init_ops_generic(struct fm10k_hw *hw); +s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt); +s32 fm10k_start_hw_generic(struct fm10k_hw *hw); +s32 fm10k_stop_hw_generic(struct fm10k_hw *hw); +u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr, + struct fm10k_hw_stat *stat); +#define fm10k_update_hw_base_32b(stat, delta) ((stat)->base_l += (delta)) +void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, + u32 idx, u32 count); +#define fm10k_unbind_hw_stats_32b(s) ((s)->base_h = 0) +void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count); +s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready); +#endif /* _FM10K_COMMON_H_ */ diff --git a/drivers/net/fm10k/base/fm10k_mbx.c b/drivers/net/fm10k/base/fm10k_mbx.c new file mode 100644 index 00000000..2e704340 --- /dev/null +++ b/drivers/net/fm10k/base/fm10k_mbx.c @@ -0,0 +1,2250 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "fm10k_common.h" + +/** + * fm10k_fifo_init - Initialize a message FIFO + * @fifo: pointer to FIFO + * @buffer: pointer to memory to be used to store FIFO + * @size: maximum message size to store in FIFO, must be 2^n - 1 + **/ +STATIC void fm10k_fifo_init(struct fm10k_mbx_fifo *fifo, u32 *buffer, u16 size) +{ + fifo->buffer = buffer; + fifo->size = size; + fifo->head = 0; + fifo->tail = 0; +} + +/** + * fm10k_fifo_used - Retrieve used space in FIFO + * @fifo: pointer to FIFO + * + * This function returns the number of DWORDs used in the FIFO + **/ +STATIC u16 fm10k_fifo_used(struct fm10k_mbx_fifo *fifo) +{ + return fifo->tail - fifo->head; +} + +/** + * fm10k_fifo_unused - Retrieve unused space in FIFO + * @fifo: pointer to FIFO + * + * This function returns the number of unused DWORDs in the FIFO + **/ +STATIC u16 fm10k_fifo_unused(struct fm10k_mbx_fifo *fifo) +{ + return fifo->size + fifo->head - fifo->tail; +} + +/** + * fm10k_fifo_empty - Test to verify if FIFO is empty + * @fifo: pointer to FIFO + * + * This function returns true if the FIFO is empty, else false + **/ +STATIC bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo) +{ + return fifo->head == fifo->tail; +} + +/** + * fm10k_fifo_head_offset - returns indices of head with given offset + * @fifo: pointer to FIFO + * @offset: offset to add to head + * + * This function returns the indices into the FIFO based on head + offset + **/ +STATIC u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) +{ + return (fifo->head + offset) & (fifo->size - 1); +} + +/** + * fm10k_fifo_tail_offset - returns indices of tail with given offset + * @fifo: pointer to FIFO + * @offset: offset to add to tail + * + * This function returns the indices into the FIFO based on tail + offset + **/ +STATIC u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset) +{ + return (fifo->tail + offset) & (fifo->size - 1); +} + +/** + * fm10k_fifo_head_len - Retrieve length of first message in FIFO + * @fifo: pointer to FIFO + * + * This function returns the size of the first message in the FIFO + **/ +STATIC u16 fm10k_fifo_head_len(struct fm10k_mbx_fifo *fifo) +{ + u32 *head = fifo->buffer + fm10k_fifo_head_offset(fifo, 0); + + /* verify there is at least 1 DWORD in the fifo so *head is valid */ + if (fm10k_fifo_empty(fifo)) + return 0; + + /* retieve the message length */ + return FM10K_TLV_DWORD_LEN(*head); +} + +/** + * fm10k_fifo_head_drop - Drop the first message in FIFO + * @fifo: pointer to FIFO + * + * This function returns the size of the message dropped from the FIFO + **/ +STATIC u16 fm10k_fifo_head_drop(struct fm10k_mbx_fifo *fifo) +{ + u16 len = fm10k_fifo_head_len(fifo); + + /* update head so it is at the start of next frame */ + fifo->head += len; + + return len; +} + +/** + * fm10k_fifo_drop_all - Drop all messages in FIFO + * @fifo: pointer to FIFO + * + * This function resets the head pointer to drop all messages in the FIFO and + * ensure the FIFO is empty. + **/ +STATIC void fm10k_fifo_drop_all(struct fm10k_mbx_fifo *fifo) +{ + fifo->head = fifo->tail; +} + +/** + * fm10k_mbx_index_len - Convert a head/tail index into a length value + * @mbx: pointer to mailbox + * @head: head index + * @tail: head index + * + * This function takes the head and tail index and determines the length + * of the data indicated by this pair. + **/ +STATIC u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) +{ + u16 len = tail - head; + + /* we wrapped so subtract 2, one for index 0, one for all 1s index */ + if (len > tail) + len -= 2; + + return len & ((mbx->mbmem_len << 1) - 1); +} + +/** + * fm10k_mbx_tail_add - Determine new tail value with added offset + * @mbx: pointer to mailbox + * @offset: length to add to tail offset + * + * This function takes the local tail index and recomputes it for + * a given length added as an offset. + **/ +STATIC u16 fm10k_mbx_tail_add(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 tail = (mbx->tail + offset + 1) & ((mbx->mbmem_len << 1) - 1); + + /* add/sub 1 because we cannot have offset 0 or all 1s */ + return (tail > mbx->tail) ? --tail : ++tail; +} + +/** + * fm10k_mbx_tail_sub - Determine new tail value with subtracted offset + * @mbx: pointer to mailbox + * @offset: length to add to tail offset + * + * This function takes the local tail index and recomputes it for + * a given length added as an offset. + **/ +STATIC u16 fm10k_mbx_tail_sub(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 tail = (mbx->tail - offset - 1) & ((mbx->mbmem_len << 1) - 1); + + /* sub/add 1 because we cannot have offset 0 or all 1s */ + return (tail < mbx->tail) ? ++tail : --tail; +} + +/** + * fm10k_mbx_head_add - Determine new head value with added offset + * @mbx: pointer to mailbox + * @offset: length to add to head offset + * + * This function takes the local head index and recomputes it for + * a given length added as an offset. + **/ +STATIC u16 fm10k_mbx_head_add(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 head = (mbx->head + offset + 1) & ((mbx->mbmem_len << 1) - 1); + + /* add/sub 1 because we cannot have offset 0 or all 1s */ + return (head > mbx->head) ? --head : ++head; +} + +/** + * fm10k_mbx_head_sub - Determine new head value with subtracted offset + * @mbx: pointer to mailbox + * @offset: length to add to head offset + * + * This function takes the local head index and recomputes it for + * a given length added as an offset. + **/ +STATIC u16 fm10k_mbx_head_sub(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 head = (mbx->head - offset - 1) & ((mbx->mbmem_len << 1) - 1); + + /* sub/add 1 because we cannot have offset 0 or all 1s */ + return (head < mbx->head) ? ++head : --head; +} + +/** + * fm10k_mbx_pushed_tail_len - Retrieve the length of message being pushed + * @mbx: pointer to mailbox + * + * This function will return the length of the message currently being + * pushed onto the tail of the Rx queue. + **/ +STATIC u16 fm10k_mbx_pushed_tail_len(struct fm10k_mbx_info *mbx) +{ + u32 *tail = mbx->rx.buffer + fm10k_fifo_tail_offset(&mbx->rx, 0); + + /* pushed tail is only valid if pushed is set */ + if (!mbx->pushed) + return 0; + + return FM10K_TLV_DWORD_LEN(*tail); +} + +/** + * fm10k_fifo_write_copy - pulls data off of msg and places it in FIFO + * @fifo: pointer to FIFO + * @msg: message array to populate + * @tail_offset: additional offset to add to tail pointer + * @len: length of FIFO to copy into message header + * + * This function will take a message and copy it into a section of the + * FIFO. In order to get something into a location other than just + * the tail you can use tail_offset to adjust the pointer. + **/ +STATIC void fm10k_fifo_write_copy(struct fm10k_mbx_fifo *fifo, + const u32 *msg, u16 tail_offset, u16 len) +{ + u16 end = fm10k_fifo_tail_offset(fifo, tail_offset); + u32 *tail = fifo->buffer + end; + + /* track when we should cross the end of the FIFO */ + end = fifo->size - end; + + /* copy end of message before start of message */ + if (end < len) + memcpy(fifo->buffer, msg + end, (len - end) << 2); + else + end = len; + + /* Copy remaining message into Tx FIFO */ + memcpy(tail, msg, end << 2); +} + +/** + * fm10k_fifo_enqueue - Enqueues the message to the tail of the FIFO + * @fifo: pointer to FIFO + * @msg: message array to read + * + * This function enqueues a message up to the size specified by the length + * contained in the first DWORD of the message and will place at the tail + * of the FIFO. It will return 0 on success, or a negative value on error. + **/ +STATIC s32 fm10k_fifo_enqueue(struct fm10k_mbx_fifo *fifo, const u32 *msg) +{ + u16 len = FM10K_TLV_DWORD_LEN(*msg); + + DEBUGFUNC("fm10k_fifo_enqueue"); + + /* verify parameters */ + if (len > fifo->size) + return FM10K_MBX_ERR_SIZE; + + /* verify there is room for the message */ + if (len > fm10k_fifo_unused(fifo)) + return FM10K_MBX_ERR_NO_SPACE; + + /* Copy message into FIFO */ + fm10k_fifo_write_copy(fifo, msg, 0, len); + + /* memory barrier to guarantee FIFO is written before tail update */ + FM10K_WMB(); + + /* Update Tx FIFO tail */ + fifo->tail += len; + + return FM10K_SUCCESS; +} + +/** + * fm10k_mbx_validate_msg_size - Validate incoming message based on size + * @mbx: pointer to mailbox + * @len: length of data pushed onto buffer + * + * This function analyzes the frame and will return a non-zero value when + * the start of a message larger than the mailbox is detected. + **/ +STATIC u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u16 total_len = 0, msg_len; + u32 *msg; + + DEBUGFUNC("fm10k_mbx_validate_msg_size"); + + /* length should include previous amounts pushed */ + len += mbx->pushed; + + /* offset in message is based off of current message size */ + do { + msg = fifo->buffer + fm10k_fifo_tail_offset(fifo, total_len); + msg_len = FM10K_TLV_DWORD_LEN(*msg); + total_len += msg_len; + } while (total_len < len); + + /* message extends out of pushed section, but fits in FIFO */ + if ((len < total_len) && (msg_len <= mbx->max_size)) + return 0; + + /* return length of invalid section */ + return (len < total_len) ? len : (len - total_len); +} + +/** + * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will take a section of the Tx FIFO and copy it into the + * mailbox memory. The offset in mbmem is based on the lower bits of the + * tail and len determines the length to copy. + **/ +STATIC void fm10k_mbx_write_copy(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->tx; + u32 mbmem = mbx->mbmem_reg; + u32 *head = fifo->buffer; + u16 end, len, tail, mask; + + DEBUGFUNC("fm10k_mbx_write_copy"); + + if (!mbx->tail_len) + return; + + /* determine data length and mbmem tail index */ + mask = mbx->mbmem_len - 1; + len = mbx->tail_len; + tail = fm10k_mbx_tail_sub(mbx, len); + if (tail > mask) + tail++; + + /* determine offset in the ring */ + end = fm10k_fifo_head_offset(fifo, mbx->pulled); + head += end; + + /* memory barrier to guarantee data is ready to be read */ + FM10K_RMB(); + + /* Copy message from Tx FIFO */ + for (end = fifo->size - end; len; head = fifo->buffer) { + do { + /* adjust tail to match offset for FIFO */ + tail &= mask; + if (!tail) + tail++; + + mbx->tx_mbmem_pulled++; + + /* write message to hardware FIFO */ + FM10K_WRITE_MBX(hw, mbmem + tail++, *(head++)); + } while (--len && --end); + } +} + +/** + * fm10k_mbx_pull_head - Pulls data off of head of Tx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @head: acknowledgement number last received + * + * This function will push the tail index forward based on the remote + * head index. It will then pull up to mbmem_len DWORDs off of the + * head of the FIFO and will place it in the MBMEM registers + * associated with the mailbox. + **/ +STATIC void fm10k_mbx_pull_head(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + u16 mbmem_len, len, ack = fm10k_mbx_index_len(mbx, head, mbx->tail); + struct fm10k_mbx_fifo *fifo = &mbx->tx; + + /* update number of bytes pulled and update bytes in transit */ + mbx->pulled += mbx->tail_len - ack; + + /* determine length of data to pull, reserve space for mbmem header */ + mbmem_len = mbx->mbmem_len - 1; + len = fm10k_fifo_used(fifo) - mbx->pulled; + if (len > mbmem_len) + len = mbmem_len; + + /* update tail and record number of bytes in transit */ + mbx->tail = fm10k_mbx_tail_add(mbx, len - ack); + mbx->tail_len = len; + + /* drop pulled messages from the FIFO */ + for (len = fm10k_fifo_head_len(fifo); + len && (mbx->pulled >= len); + len = fm10k_fifo_head_len(fifo)) { + mbx->pulled -= fm10k_fifo_head_drop(fifo); + mbx->tx_messages++; + mbx->tx_dwords += len; + } + + /* Copy message out from the Tx FIFO */ + fm10k_mbx_write_copy(hw, mbx); +} + +/** + * fm10k_mbx_read_copy - pulls data off of mbmem and places it in Rx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will take a section of the mailbox memory and copy it + * into the Rx FIFO. The offset is based on the lower bits of the + * head and len determines the length to copy. + **/ +STATIC void fm10k_mbx_read_copy(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u32 mbmem = mbx->mbmem_reg ^ mbx->mbmem_len; + u32 *tail = fifo->buffer; + u16 end, len, head; + + DEBUGFUNC("fm10k_mbx_read_copy"); + + /* determine data length and mbmem head index */ + len = mbx->head_len; + head = fm10k_mbx_head_sub(mbx, len); + if (head >= mbx->mbmem_len) + head++; + + /* determine offset in the ring */ + end = fm10k_fifo_tail_offset(fifo, mbx->pushed); + tail += end; + + /* Copy message into Rx FIFO */ + for (end = fifo->size - end; len; tail = fifo->buffer) { + do { + /* adjust head to match offset for FIFO */ + head &= mbx->mbmem_len - 1; + if (!head) + head++; + + mbx->rx_mbmem_pushed++; + + /* read message from hardware FIFO */ + *(tail++) = FM10K_READ_MBX(hw, mbmem + head++); + } while (--len && --end); + } + + /* memory barrier to guarantee FIFO is written before tail update */ + FM10K_WMB(); +} + +/** + * fm10k_mbx_push_tail - Pushes up to 15 DWORDs on to tail of FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @tail: tail index of message + * + * This function will first validate the tail index and size for the + * incoming message. It then updates the acknowledgment number and + * copies the data into the FIFO. It will return the number of messages + * dequeued on success and a negative value on error. + **/ +STATIC s32 fm10k_mbx_push_tail(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, + u16 tail) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u16 len, seq = fm10k_mbx_index_len(mbx, mbx->head, tail); + + DEBUGFUNC("fm10k_mbx_push_tail"); + + /* determine length of data to push */ + len = fm10k_fifo_unused(fifo) - mbx->pushed; + if (len > seq) + len = seq; + + /* update head and record bytes received */ + mbx->head = fm10k_mbx_head_add(mbx, len); + mbx->head_len = len; + + /* nothing to do if there is no data */ + if (!len) + return FM10K_SUCCESS; + + /* Copy msg into Rx FIFO */ + fm10k_mbx_read_copy(hw, mbx); + + /* determine if there are any invalid lengths in message */ + if (fm10k_mbx_validate_msg_size(mbx, len)) + return FM10K_MBX_ERR_SIZE; + + /* Update pushed */ + mbx->pushed += len; + + /* flush any completed messages */ + for (len = fm10k_mbx_pushed_tail_len(mbx); + len && (mbx->pushed >= len); + len = fm10k_mbx_pushed_tail_len(mbx)) { + fifo->tail += len; + mbx->pushed -= len; + mbx->rx_messages++; + mbx->rx_dwords += len; + } + + return FM10K_SUCCESS; +} + +/* pre-generated data for generating the CRC based on the poly 0xAC9A. */ +static const u16 fm10k_crc_16b_table[256] = { + 0x0000, 0x7956, 0xF2AC, 0x8BFA, 0xBC6D, 0xC53B, 0x4EC1, 0x3797, + 0x21EF, 0x58B9, 0xD343, 0xAA15, 0x9D82, 0xE4D4, 0x6F2E, 0x1678, + 0x43DE, 0x3A88, 0xB172, 0xC824, 0xFFB3, 0x86E5, 0x0D1F, 0x7449, + 0x6231, 0x1B67, 0x909D, 0xE9CB, 0xDE5C, 0xA70A, 0x2CF0, 0x55A6, + 0x87BC, 0xFEEA, 0x7510, 0x0C46, 0x3BD1, 0x4287, 0xC97D, 0xB02B, + 0xA653, 0xDF05, 0x54FF, 0x2DA9, 0x1A3E, 0x6368, 0xE892, 0x91C4, + 0xC462, 0xBD34, 0x36CE, 0x4F98, 0x780F, 0x0159, 0x8AA3, 0xF3F5, + 0xE58D, 0x9CDB, 0x1721, 0x6E77, 0x59E0, 0x20B6, 0xAB4C, 0xD21A, + 0x564D, 0x2F1B, 0xA4E1, 0xDDB7, 0xEA20, 0x9376, 0x188C, 0x61DA, + 0x77A2, 0x0EF4, 0x850E, 0xFC58, 0xCBCF, 0xB299, 0x3963, 0x4035, + 0x1593, 0x6CC5, 0xE73F, 0x9E69, 0xA9FE, 0xD0A8, 0x5B52, 0x2204, + 0x347C, 0x4D2A, 0xC6D0, 0xBF86, 0x8811, 0xF147, 0x7ABD, 0x03EB, + 0xD1F1, 0xA8A7, 0x235D, 0x5A0B, 0x6D9C, 0x14CA, 0x9F30, 0xE666, + 0xF01E, 0x8948, 0x02B2, 0x7BE4, 0x4C73, 0x3525, 0xBEDF, 0xC789, + 0x922F, 0xEB79, 0x6083, 0x19D5, 0x2E42, 0x5714, 0xDCEE, 0xA5B8, + 0xB3C0, 0xCA96, 0x416C, 0x383A, 0x0FAD, 0x76FB, 0xFD01, 0x8457, + 0xAC9A, 0xD5CC, 0x5E36, 0x2760, 0x10F7, 0x69A1, 0xE25B, 0x9B0D, + 0x8D75, 0xF423, 0x7FD9, 0x068F, 0x3118, 0x484E, 0xC3B4, 0xBAE2, + 0xEF44, 0x9612, 0x1DE8, 0x64BE, 0x5329, 0x2A7F, 0xA185, 0xD8D3, + 0xCEAB, 0xB7FD, 0x3C07, 0x4551, 0x72C6, 0x0B90, 0x806A, 0xF93C, + 0x2B26, 0x5270, 0xD98A, 0xA0DC, 0x974B, 0xEE1D, 0x65E7, 0x1CB1, + 0x0AC9, 0x739F, 0xF865, 0x8133, 0xB6A4, 0xCFF2, 0x4408, 0x3D5E, + 0x68F8, 0x11AE, 0x9A54, 0xE302, 0xD495, 0xADC3, 0x2639, 0x5F6F, + 0x4917, 0x3041, 0xBBBB, 0xC2ED, 0xF57A, 0x8C2C, 0x07D6, 0x7E80, + 0xFAD7, 0x8381, 0x087B, 0x712D, 0x46BA, 0x3FEC, 0xB416, 0xCD40, + 0xDB38, 0xA26E, 0x2994, 0x50C2, 0x6755, 0x1E03, 0x95F9, 0xECAF, + 0xB909, 0xC05F, 0x4BA5, 0x32F3, 0x0564, 0x7C32, 0xF7C8, 0x8E9E, + 0x98E6, 0xE1B0, 0x6A4A, 0x131C, 0x248B, 0x5DDD, 0xD627, 0xAF71, + 0x7D6B, 0x043D, 0x8FC7, 0xF691, 0xC106, 0xB850, 0x33AA, 0x4AFC, + 0x5C84, 0x25D2, 0xAE28, 0xD77E, 0xE0E9, 0x99BF, 0x1245, 0x6B13, + 0x3EB5, 0x47E3, 0xCC19, 0xB54F, 0x82D8, 0xFB8E, 0x7074, 0x0922, + 0x1F5A, 0x660C, 0xEDF6, 0x94A0, 0xA337, 0xDA61, 0x519B, 0x28CD }; + +/** + * fm10k_crc_16b - Generate a 16 bit CRC for a region of 16 bit data + * @data: pointer to data to process + * @seed: seed value for CRC + * @len: length measured in 16 bits words + * + * This function will generate a CRC based on the polynomial 0xAC9A and + * whatever value is stored in the seed variable. Note that this + * value inverts the local seed and the result in order to capture all + * leading and trailing zeros. + */ +STATIC u16 fm10k_crc_16b(const u32 *data, u16 seed, u16 len) +{ + u32 result = seed; + + while (len--) { + result ^= *(data++); + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + + if (!(len--)) + break; + + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + } + + return (u16)result; +} + +/** + * fm10k_fifo_crc - generate a CRC based off of FIFO data + * @fifo: pointer to FIFO + * @offset: offset point for start of FIFO + * @len: number of DWORDS words to process + * @seed: seed value for CRC + * + * This function generates a CRC for some region of the FIFO + **/ +STATIC u16 fm10k_fifo_crc(struct fm10k_mbx_fifo *fifo, u16 offset, + u16 len, u16 seed) +{ + u32 *data = fifo->buffer + offset; + + /* track when we should cross the end of the FIFO */ + offset = fifo->size - offset; + + /* if we are in 2 blocks process the end of the FIFO first */ + if (offset < len) { + seed = fm10k_crc_16b(data, seed, offset * 2); + data = fifo->buffer; + len -= offset; + } + + /* process any remaining bits */ + return fm10k_crc_16b(data, seed, len * 2); +} + +/** + * fm10k_mbx_update_local_crc - Update the local CRC for outgoing data + * @mbx: pointer to mailbox + * @head: head index provided by remote mailbox + * + * This function will generate the CRC for all data from the end of the + * last head update to the current one. It uses the result of the + * previous CRC as the seed for this update. The result is stored in + * mbx->local. + **/ +STATIC void fm10k_mbx_update_local_crc(struct fm10k_mbx_info *mbx, u16 head) +{ + u16 len = mbx->tail_len - fm10k_mbx_index_len(mbx, head, mbx->tail); + + /* determine the offset for the start of the region to be pulled */ + head = fm10k_fifo_head_offset(&mbx->tx, mbx->pulled); + + /* update local CRC to include all of the pulled data */ + mbx->local = fm10k_fifo_crc(&mbx->tx, head, len, mbx->local); +} + +/** + * fm10k_mbx_verify_remote_crc - Verify the CRC is correct for current data + * @mbx: pointer to mailbox + * + * This function will take all data that has been provided from the remote + * end and generate a CRC for it. This is stored in mbx->remote. The + * CRC for the header is then computed and if the result is non-zero this + * is an error and we signal an error dropping all data and resetting the + * connection. + */ +STATIC s32 fm10k_mbx_verify_remote_crc(struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u16 len = mbx->head_len; + u16 offset = fm10k_fifo_tail_offset(fifo, mbx->pushed) - len; + u16 crc; + + /* update the remote CRC if new data has been received */ + if (len) + mbx->remote = fm10k_fifo_crc(fifo, offset, len, mbx->remote); + + /* process the full header as we have to validate the CRC */ + crc = fm10k_crc_16b(&mbx->mbx_hdr, mbx->remote, 1); + + /* notify other end if we have a problem */ + return crc ? FM10K_MBX_ERR_CRC : FM10K_SUCCESS; +} + +/** + * fm10k_mbx_rx_ready - Indicates that a message is ready in the Rx FIFO + * @mbx: pointer to mailbox + * + * This function returns true if there is a message in the Rx FIFO to dequeue. + **/ +STATIC bool fm10k_mbx_rx_ready(struct fm10k_mbx_info *mbx) +{ + u16 msg_size = fm10k_fifo_head_len(&mbx->rx); + + return msg_size && (fm10k_fifo_used(&mbx->rx) >= msg_size); +} + +/** + * fm10k_mbx_tx_ready - Indicates that the mailbox is in state ready for Tx + * @mbx: pointer to mailbox + * @len: verify free space is >= this value + * + * This function returns true if the mailbox is in a state ready to transmit. + **/ +STATIC bool fm10k_mbx_tx_ready(struct fm10k_mbx_info *mbx, u16 len) +{ + u16 fifo_unused = fm10k_fifo_unused(&mbx->tx); + + return (mbx->state == FM10K_STATE_OPEN) && (fifo_unused >= len); +} + +/** + * fm10k_mbx_tx_complete - Indicates that the Tx FIFO has been emptied + * @mbx: pointer to mailbox + * + * This function returns true if the Tx FIFO is empty. + **/ +STATIC bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx) +{ + return fm10k_fifo_empty(&mbx->tx); +} + +/** + * fm10k_mbx_deqeueue_rx - Dequeues the message from the head in the Rx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function dequeues messages and hands them off to the TLV parser. + * It will return the number of messages processed when called. + **/ +STATIC u16 fm10k_mbx_dequeue_rx(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + s32 err; + u16 cnt; + + /* parse Rx messages out of the Rx FIFO to empty it */ + for (cnt = 0; !fm10k_fifo_empty(fifo); cnt++) { + err = fm10k_tlv_msg_parse(hw, fifo->buffer + fifo->head, + mbx, mbx->msg_data); + if (err < 0) + mbx->rx_parse_err++; + + fm10k_fifo_head_drop(fifo); + } + + /* shift remaining bytes back to start of FIFO */ + memmove(fifo->buffer, fifo->buffer + fifo->tail, mbx->pushed << 2); + + /* shift head and tail based on the memory we moved */ + fifo->tail -= fifo->head; + fifo->head = 0; + + return cnt; +} + +/** + * fm10k_mbx_enqueue_tx - Enqueues the message to the tail of the Tx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @msg: message array to read + * + * This function enqueues a message up to the size specified by the length + * contained in the first DWORD of the message and will place at the tail + * of the FIFO. It will return 0 on success, or a negative value on error. + **/ +STATIC s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, const u32 *msg) +{ + u32 countdown = mbx->timeout; + s32 err; + + switch (mbx->state) { + case FM10K_STATE_CLOSED: + case FM10K_STATE_DISCONNECT: + return FM10K_MBX_ERR_NO_MBX; + default: + break; + } + + /* enqueue the message on the Tx FIFO */ + err = fm10k_fifo_enqueue(&mbx->tx, msg); + + /* if it failed give the FIFO a chance to drain */ + while (err && countdown) { + countdown--; + usec_delay(mbx->usec_delay); + mbx->ops.process(hw, mbx); + err = fm10k_fifo_enqueue(&mbx->tx, msg); + } + + /* if we failed treat the error */ + if (err) { + mbx->timeout = 0; + mbx->tx_busy++; + } + + /* begin processing message, ignore errors as this is just meant + * to start the mailbox flow so we are not concerned if there + * is a bad error, or the mailbox is already busy with a request + */ + if (!mbx->tail_len) + mbx->ops.process(hw, mbx); + + return FM10K_SUCCESS; +} + +/** + * fm10k_mbx_read - Copies the mbmem to local message buffer + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function copies the message from the mbmem to the message array + **/ +STATIC s32 fm10k_mbx_read(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + DEBUGFUNC("fm10k_mbx_read"); + + /* only allow one reader in here at a time */ + if (mbx->mbx_hdr) + return FM10K_MBX_ERR_BUSY; + + /* read to capture initial interrupt bits */ + if (FM10K_READ_MBX(hw, mbx->mbx_reg) & FM10K_MBX_REQ_INTERRUPT) + mbx->mbx_lock = FM10K_MBX_ACK; + + /* write back interrupt bits to clear */ + FM10K_WRITE_MBX(hw, mbx->mbx_reg, + FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT); + + /* read remote header */ + mbx->mbx_hdr = FM10K_READ_MBX(hw, mbx->mbmem_reg ^ mbx->mbmem_len); + + return FM10K_SUCCESS; +} + +/** + * fm10k_mbx_write - Copies the local message buffer to mbmem + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function copies the message from the the message array to mbmem + **/ +STATIC void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + u32 mbmem = mbx->mbmem_reg; + + DEBUGFUNC("fm10k_mbx_write"); + + /* write new msg header to notify recipient of change */ + FM10K_WRITE_MBX(hw, mbmem, mbx->mbx_hdr); + + /* write mailbox to send interrupt */ + if (mbx->mbx_lock) + FM10K_WRITE_MBX(hw, mbx->mbx_reg, mbx->mbx_lock); + + /* we no longer are using the header so free it */ + mbx->mbx_hdr = 0; + mbx->mbx_lock = 0; +} + +/** + * fm10k_mbx_create_connect_hdr - Generate a connect mailbox header + * @mbx: pointer to mailbox + * + * This function returns a connection mailbox header + **/ +STATIC void fm10k_mbx_create_connect_hdr(struct fm10k_mbx_info *mbx) +{ + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_CONNECT, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD) | + FM10K_MSG_HDR_FIELD_SET(mbx->rx.size - 1, CONNECT_SIZE); +} + +/** + * fm10k_mbx_create_data_hdr - Generate a data mailbox header + * @mbx: pointer to mailbox + * + * This function returns a data mailbox header + **/ +STATIC void fm10k_mbx_create_data_hdr(struct fm10k_mbx_info *mbx) +{ + u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DATA, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); + struct fm10k_mbx_fifo *fifo = &mbx->tx; + u16 crc; + + if (mbx->tail_len) + mbx->mbx_lock |= FM10K_MBX_REQ; + + /* generate CRC for data in flight and header */ + crc = fm10k_fifo_crc(fifo, fm10k_fifo_head_offset(fifo, mbx->pulled), + mbx->tail_len, mbx->local); + crc = fm10k_crc_16b(&hdr, crc, 1); + + /* load header to memory to be written */ + mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC); +} + +/** + * fm10k_mbx_create_disconnect_hdr - Generate a disconnect mailbox header + * @mbx: pointer to mailbox + * + * This function returns a disconnect mailbox header + **/ +STATIC void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx) +{ + u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); + u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1); + + mbx->mbx_lock |= FM10K_MBX_ACK; + + /* load header to memory to be written */ + mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC); +} + +/** + * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mbox hdr + * @mbx: pointer to mailbox + * + * This function creates a fake disconnect header for loading into remote + * mailbox header. The primary purpose is to prevent errors on immediate + * start up after mbx->connect. + **/ +STATIC void fm10k_mbx_create_fake_disconnect_hdr(struct fm10k_mbx_info *mbx) +{ + u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->tail, HEAD); + u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1); + + mbx->mbx_lock |= FM10K_MBX_ACK; + + /* load header to memory to be written */ + mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC); +} + +/** + * fm10k_mbx_create_error_msg - Generate an error message + * @mbx: pointer to mailbox + * @err: local error encountered + * + * This function will interpret the error provided by err, and based on + * that it may shift the message by 1 DWORD and then place an error header + * at the start of the message. + **/ +STATIC void fm10k_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err) +{ + /* only generate an error message for these types */ + switch (err) { + case FM10K_MBX_ERR_TAIL: + case FM10K_MBX_ERR_HEAD: + case FM10K_MBX_ERR_TYPE: + case FM10K_MBX_ERR_SIZE: + case FM10K_MBX_ERR_RSVD0: + case FM10K_MBX_ERR_CRC: + break; + default: + return; + } + + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_ERROR, TYPE) | + FM10K_MSG_HDR_FIELD_SET(err, ERR_NO) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); +} + +/** + * fm10k_mbx_validate_msg_hdr - Validate common fields in the message header + * @mbx: pointer to mailbox + * + * This function will parse up the fields in the mailbox header and return + * an error if the header contains any of a number of invalid configurations + * including unrecognized type, invalid route, or a malformed message. + **/ +STATIC s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx) +{ + u16 type, rsvd0, head, tail, size; + const u32 *hdr = &mbx->mbx_hdr; + + DEBUGFUNC("fm10k_mbx_validate_msg_hdr"); + + type = FM10K_MSG_HDR_FIELD_GET(*hdr, TYPE); + rsvd0 = FM10K_MSG_HDR_FIELD_GET(*hdr, RSVD0); + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + size = FM10K_MSG_HDR_FIELD_GET(*hdr, CONNECT_SIZE); + + if (rsvd0) + return FM10K_MBX_ERR_RSVD0; + + switch (type) { + case FM10K_MSG_DISCONNECT: + /* validate that all data has been received */ + if (tail != mbx->head) + return FM10K_MBX_ERR_TAIL; + + /* fall through */ + case FM10K_MSG_DATA: + /* validate that head is moving correctly */ + if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) + return FM10K_MBX_ERR_HEAD; + if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) + return FM10K_MBX_ERR_HEAD; + + /* validate that tail is moving correctly */ + if (!tail || (tail == FM10K_MSG_HDR_MASK(TAIL))) + return FM10K_MBX_ERR_TAIL; + if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) + break; + + return FM10K_MBX_ERR_TAIL; + case FM10K_MSG_CONNECT: + /* validate size is in range and is power of 2 mask */ + if ((size < FM10K_VFMBX_MSG_MTU) || (size & (size + 1))) + return FM10K_MBX_ERR_SIZE; + + /* fall through */ + case FM10K_MSG_ERROR: + if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) + return FM10K_MBX_ERR_HEAD; + /* neither create nor error include a tail offset */ + if (tail) + return FM10K_MBX_ERR_TAIL; + + break; + default: + return FM10K_MBX_ERR_TYPE; + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_mbx_create_reply - Generate reply based on state and remote head + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @head: acknowledgement number + * + * This function will generate an outgoing message based on the current + * mailbox state and the remote FIFO head. It will return the length + * of the outgoing message excluding header on success, and a negative value + * on error. + **/ +STATIC s32 fm10k_mbx_create_reply(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + switch (mbx->state) { + case FM10K_STATE_OPEN: + case FM10K_STATE_DISCONNECT: + /* update our checksum for the outgoing data */ + fm10k_mbx_update_local_crc(mbx, head); + + /* as long as other end recognizes us keep sending data */ + fm10k_mbx_pull_head(hw, mbx, head); + + /* generate new header based on data */ + if (mbx->tail_len || (mbx->state == FM10K_STATE_OPEN)) + fm10k_mbx_create_data_hdr(mbx); + else + fm10k_mbx_create_disconnect_hdr(mbx); + break; + case FM10K_STATE_CONNECT: + /* send disconnect even if we aren't connected */ + fm10k_mbx_create_connect_hdr(mbx); + break; + case FM10K_STATE_CLOSED: + /* generate new header based on data */ + fm10k_mbx_create_disconnect_hdr(mbx); + default: + break; + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_mbx_reset_work- Reset internal pointers for any pending work + * @mbx: pointer to mailbox + * + * This function will reset all internal pointers so any work in progress + * is dropped. This call should occur every time we transition from the + * open state to the connect state. + **/ +STATIC void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx) +{ + u16 len, head, ack; + + /* reset our outgoing max size back to Rx limits */ + mbx->max_size = mbx->rx.size - 1; + + /* update mbx->pulled to account for tail_len and ack */ + head = FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, HEAD); + ack = fm10k_mbx_index_len(mbx, head, mbx->tail); + mbx->pulled += mbx->tail_len - ack; + + /* now drop any messages which have started or finished transmitting */ + while (fm10k_fifo_head_len(&mbx->tx) && mbx->pulled) { + len = fm10k_fifo_head_drop(&mbx->tx); + mbx->tx_dropped++; + if (mbx->pulled >= len) + mbx->pulled -= len; + else + mbx->pulled = 0; + } + + /* just do a quick resysnc to start of message */ + mbx->pushed = 0; + mbx->pulled = 0; + mbx->tail_len = 0; + mbx->head_len = 0; + mbx->rx.tail = 0; + mbx->rx.head = 0; +} + +/** + * fm10k_mbx_update_max_size - Update the max_size and drop any large messages + * @mbx: pointer to mailbox + * @size: new value for max_size + * + * This function updates the max_size value and drops any outgoing messages + * at the head of the Tx FIFO if they are larger than max_size. It does not + * drop all messages, as this is too difficult to parse and remove them from + * the FIFO. Instead, rely on the checking to ensure that messages larger + * than max_size aren't pushed into the memory buffer. + **/ +STATIC void fm10k_mbx_update_max_size(struct fm10k_mbx_info *mbx, u16 size) +{ + u16 len; + + DEBUGFUNC("fm10k_mbx_update_max_size"); + + mbx->max_size = size; + + /* flush any oversized messages from the queue */ + for (len = fm10k_fifo_head_len(&mbx->tx); + len > size; + len = fm10k_fifo_head_len(&mbx->tx)) { + fm10k_fifo_head_drop(&mbx->tx); + mbx->tx_dropped++; + } +} + +/** + * fm10k_mbx_connect_reset - Reset following request for reset + * @mbx: pointer to mailbox + * + * This function resets the mailbox to either a disconnected state + * or a connect state depending on the current mailbox state + **/ +STATIC void fm10k_mbx_connect_reset(struct fm10k_mbx_info *mbx) +{ + /* just do a quick resysnc to start of frame */ + fm10k_mbx_reset_work(mbx); + + /* reset CRC seeds */ + mbx->local = FM10K_MBX_CRC_SEED; + mbx->remote = FM10K_MBX_CRC_SEED; + + /* we cannot exit connect until the size is good */ + if (mbx->state == FM10K_STATE_OPEN) + mbx->state = FM10K_STATE_CONNECT; + else + mbx->state = FM10K_STATE_CLOSED; +} + +/** + * fm10k_mbx_process_connect - Process connect header + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will read an incoming connect header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +STATIC s32 fm10k_mbx_process_connect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + const u32 *hdr = &mbx->mbx_hdr; + u16 size, head; + + /* we will need to pull all of the fields for verification */ + size = FM10K_MSG_HDR_FIELD_GET(*hdr, CONNECT_SIZE); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + + switch (state) { + case FM10K_STATE_DISCONNECT: + case FM10K_STATE_OPEN: + /* reset any in-progress work */ + fm10k_mbx_connect_reset(mbx); + break; + case FM10K_STATE_CONNECT: + /* we cannot exit connect until the size is good */ + if (size > mbx->rx.size) { + mbx->max_size = mbx->rx.size - 1; + } else { + /* record the remote system requesting connection */ + mbx->state = FM10K_STATE_OPEN; + + fm10k_mbx_update_max_size(mbx, size); + } + break; + default: + break; + } + + /* align our tail index to remote head index */ + mbx->tail = head; + + return fm10k_mbx_create_reply(hw, mbx, head); +} + +/** + * fm10k_mbx_process_data - Process data header + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will read an incoming data header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +STATIC s32 fm10k_mbx_process_data(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + u16 head, tail; + s32 err; + + DEBUGFUNC("fm10k_mbx_process_data"); + + /* we will need to pull all of the fields for verification */ + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL); + + /* if we are in connect just update our data and go */ + if (mbx->state == FM10K_STATE_CONNECT) { + mbx->tail = head; + mbx->state = FM10K_STATE_OPEN; + } + + /* abort on message size errors */ + err = fm10k_mbx_push_tail(hw, mbx, tail); + if (err < 0) + return err; + + /* verify the checksum on the incoming data */ + err = fm10k_mbx_verify_remote_crc(mbx); + if (err) + return err; + + /* process messages if we have received any */ + fm10k_mbx_dequeue_rx(hw, mbx); + + return fm10k_mbx_create_reply(hw, mbx, head); +} + +/** + * fm10k_mbx_process_disconnect - Process disconnect header + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will read an incoming disconnect header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +STATIC s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + const u32 *hdr = &mbx->mbx_hdr; + u16 head; + s32 err; + + /* we will need to pull the header field for verification */ + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + + /* We should not be receiving disconnect if Rx is incomplete */ + if (mbx->pushed) + return FM10K_MBX_ERR_TAIL; + + /* we have already verified mbx->head == tail so we know this is 0 */ + mbx->head_len = 0; + + /* verify the checksum on the incoming header is correct */ + err = fm10k_mbx_verify_remote_crc(mbx); + if (err) + return err; + + switch (state) { + case FM10K_STATE_DISCONNECT: + case FM10K_STATE_OPEN: + /* state doesn't change if we still have work to do */ + if (!fm10k_mbx_tx_complete(mbx)) + break; + + /* verify the head indicates we completed all transmits */ + if (head != mbx->tail) + return FM10K_MBX_ERR_HEAD; + + /* reset any in-progress work */ + fm10k_mbx_connect_reset(mbx); + break; + default: + break; + } + + return fm10k_mbx_create_reply(hw, mbx, head); +} + +/** + * fm10k_mbx_process_error - Process error header + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will read an incoming error header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +STATIC s32 fm10k_mbx_process_error(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + u16 head; + + /* we will need to pull all of the fields for verification */ + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + + switch (mbx->state) { + case FM10K_STATE_OPEN: + case FM10K_STATE_DISCONNECT: + /* flush any uncompleted work */ + fm10k_mbx_reset_work(mbx); + + /* reset CRC seeds */ + mbx->local = FM10K_MBX_CRC_SEED; + mbx->remote = FM10K_MBX_CRC_SEED; + + /* reset tail index and size to prepare for reconnect */ + mbx->tail = head; + + /* if open then reset max_size and go back to connect */ + if (mbx->state == FM10K_STATE_OPEN) { + mbx->state = FM10K_STATE_CONNECT; + break; + } + + /* send a connect message to get data flowing again */ + fm10k_mbx_create_connect_hdr(mbx); + return FM10K_SUCCESS; + default: + break; + } + + return fm10k_mbx_create_reply(hw, mbx, mbx->tail); +} + +/** + * fm10k_mbx_process - Process mailbox interrupt + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will process incoming mailbox events and generate mailbox + * replies. It will return a value indicating the number of DWORDs + * transmitted excluding header on success or a negative value on error. + **/ +STATIC s32 fm10k_mbx_process(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + s32 err; + + DEBUGFUNC("fm10k_mbx_process"); + + /* we do not read mailbox if closed */ + if (mbx->state == FM10K_STATE_CLOSED) + return FM10K_SUCCESS; + + /* copy data from mailbox */ + err = fm10k_mbx_read(hw, mbx); + if (err) + return err; + + /* validate type, source, and destination */ + err = fm10k_mbx_validate_msg_hdr(mbx); + if (err < 0) + goto msg_err; + + switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, TYPE)) { + case FM10K_MSG_CONNECT: + err = fm10k_mbx_process_connect(hw, mbx); + break; + case FM10K_MSG_DATA: + err = fm10k_mbx_process_data(hw, mbx); + break; + case FM10K_MSG_DISCONNECT: + err = fm10k_mbx_process_disconnect(hw, mbx); + break; + case FM10K_MSG_ERROR: + err = fm10k_mbx_process_error(hw, mbx); + break; + default: + err = FM10K_MBX_ERR_TYPE; + break; + } + +msg_err: + /* notify partner of errors on our end */ + if (err < 0) + fm10k_mbx_create_error_msg(mbx, err); + + /* copy data from mailbox */ + fm10k_mbx_write(hw, mbx); + + return err; +} + +/** + * fm10k_mbx_disconnect - Shutdown mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will shut down the mailbox. It places the mailbox first + * in the disconnect state, it then allows up to a predefined timeout for + * the mailbox to transition to close on its own. If this does not occur + * then the mailbox will be forced into the closed state. + * + * Any mailbox transactions not completed before calling this function + * are not guaranteed to complete and may be dropped. + **/ +STATIC void fm10k_mbx_disconnect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + int timeout = mbx->timeout ? FM10K_MBX_DISCONNECT_TIMEOUT : 0; + + DEBUGFUNC("fm10k_mbx_disconnect"); + + /* Place mbx in ready to disconnect state */ + mbx->state = FM10K_STATE_DISCONNECT; + + /* trigger interrupt to start shutdown process */ + FM10K_WRITE_MBX(hw, mbx->mbx_reg, FM10K_MBX_REQ | + FM10K_MBX_INTERRUPT_DISABLE); + do { + usec_delay(FM10K_MBX_POLL_DELAY); + mbx->ops.process(hw, mbx); + timeout -= FM10K_MBX_POLL_DELAY; + } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED)); + + /* in case we didn't close, just force the mailbox into shutdown and + * drop all left over messages in the FIFO. + */ + fm10k_mbx_connect_reset(mbx); + fm10k_fifo_drop_all(&mbx->tx); + + FM10K_WRITE_MBX(hw, mbx->mbmem_reg, 0); +} + +/** + * fm10k_mbx_connect - Start mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will initiate a mailbox connection. It will populate the + * mailbox with a broadcast connect message and then initialize the lock. + * This is safe since the connect message is a single DWORD so the mailbox + * transaction is guaranteed to be atomic. + * + * This function will return an error if the mailbox has not been initiated + * or is currently in use. + **/ +STATIC s32 fm10k_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + DEBUGFUNC("fm10k_mbx_connect"); + + /* we cannot connect an uninitialized mailbox */ + if (!mbx->rx.buffer) + return FM10K_MBX_ERR_NO_SPACE; + + /* we cannot connect an already connected mailbox */ + if (mbx->state != FM10K_STATE_CLOSED) + return FM10K_MBX_ERR_BUSY; + + /* mailbox timeout can now become active */ + mbx->timeout = FM10K_MBX_INIT_TIMEOUT; + + /* Place mbx in ready to connect state */ + mbx->state = FM10K_STATE_CONNECT; + + fm10k_mbx_reset_work(mbx); + + /* initialize header of remote mailbox */ + fm10k_mbx_create_fake_disconnect_hdr(mbx); + FM10K_WRITE_MBX(hw, mbx->mbmem_reg ^ mbx->mbmem_len, mbx->mbx_hdr); + + /* enable interrupt and notify other party of new message */ + mbx->mbx_lock = FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT | + FM10K_MBX_INTERRUPT_ENABLE; + + /* generate and load connect header into mailbox */ + fm10k_mbx_create_connect_hdr(mbx); + fm10k_mbx_write(hw, mbx); + + return FM10K_SUCCESS; +} + +/** + * fm10k_mbx_validate_handlers - Validate layout of message parsing data + * @msg_data: handlers for mailbox events + * + * This function validates the layout of the message parsing data. This + * should be mostly static, but it is important to catch any errors that + * are made when constructing the parsers. + **/ +STATIC s32 fm10k_mbx_validate_handlers(const struct fm10k_msg_data *msg_data) +{ + const struct fm10k_tlv_attr *attr; + unsigned int id; + + DEBUGFUNC("fm10k_mbx_validate_handlers"); + + /* Allow NULL mailboxes that transmit but don't receive */ + if (!msg_data) + return FM10K_SUCCESS; + + while (msg_data->id != FM10K_TLV_ERROR) { + /* all messages should have a function handler */ + if (!msg_data->func) + return FM10K_ERR_PARAM; + + /* parser is optional */ + attr = msg_data->attr; + if (attr) { + while (attr->id != FM10K_TLV_ERROR) { + id = attr->id; + attr++; + /* ID should always be increasing */ + if (id >= attr->id) + return FM10K_ERR_PARAM; + /* ID should fit in results array */ + if (id >= FM10K_TLV_RESULTS_MAX) + return FM10K_ERR_PARAM; + } + + /* verify terminator is in the list */ + if (attr->id != FM10K_TLV_ERROR) + return FM10K_ERR_PARAM; + } + + id = msg_data->id; + msg_data++; + /* ID should always be increasing */ + if (id >= msg_data->id) + return FM10K_ERR_PARAM; + } + + /* verify terminator is in the list */ + if ((msg_data->id != FM10K_TLV_ERROR) || !msg_data->func) + return FM10K_ERR_PARAM; + + return FM10K_SUCCESS; +} + +/** + * fm10k_mbx_register_handlers - Register a set of handler ops for mailbox + * @mbx: pointer to mailbox + * @msg_data: handlers for mailbox events + * + * This function associates a set of message handling ops with a mailbox. + **/ +STATIC s32 fm10k_mbx_register_handlers(struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *msg_data) +{ + DEBUGFUNC("fm10k_mbx_register_handlers"); + + /* validate layout of handlers before assigning them */ + if (fm10k_mbx_validate_handlers(msg_data)) + return FM10K_ERR_PARAM; + + /* initialize the message handlers */ + mbx->msg_data = msg_data; + + return FM10K_SUCCESS; +} + +/** + * fm10k_pfvf_mbx_init - Initialize mailbox memory for PF/VF mailbox + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @msg_data: handlers for mailbox events + * @id: ID reference for PF as it supports up to 64 PF/VF mailboxes + * + * This function initializes the mailbox for use. It will split the + * buffer provided and use that to populate both the Tx and Rx FIFO by + * evenly splitting it. In order to allow for easy masking of head/tail + * the value reported in size must be a power of 2 and is reported in + * DWORDs, not bytes. Any invalid values will cause the mailbox to return + * error. + **/ +s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *msg_data, u8 id) +{ + DEBUGFUNC("fm10k_pfvf_mbx_init"); + + /* initialize registers */ + switch (hw->mac.type) { + case fm10k_mac_vf: + mbx->mbx_reg = FM10K_VFMBX; + mbx->mbmem_reg = FM10K_VFMBMEM(FM10K_VFMBMEM_VF_XOR); + break; + case fm10k_mac_pf: + /* there are only 64 VF <-> PF mailboxes */ + if (id < 64) { + mbx->mbx_reg = FM10K_MBX(id); + mbx->mbmem_reg = FM10K_MBMEM_VF(id, 0); + break; + } + /* fallthough */ + default: + return FM10K_MBX_ERR_NO_MBX; + } + + /* start out in closed state */ + mbx->state = FM10K_STATE_CLOSED; + + /* validate layout of handlers before assigning them */ + if (fm10k_mbx_validate_handlers(msg_data)) + return FM10K_ERR_PARAM; + + /* initialize the message handlers */ + mbx->msg_data = msg_data; + + /* start mailbox as timed out and let the reset_hw call + * set the timeout value to begin communications + */ + mbx->timeout = 0; + mbx->usec_delay = FM10K_MBX_INIT_DELAY; + + /* initialize tail and head */ + mbx->tail = 1; + mbx->head = 1; + + /* initialize CRC seeds */ + mbx->local = FM10K_MBX_CRC_SEED; + mbx->remote = FM10K_MBX_CRC_SEED; + + /* Split buffer for use by Tx/Rx FIFOs */ + mbx->max_size = FM10K_MBX_MSG_MAX_SIZE; + mbx->mbmem_len = FM10K_VFMBMEM_VF_XOR; + + /* initialize the FIFOs, sizes are in 4 byte increments */ + fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE); + fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE], + FM10K_MBX_RX_BUFFER_SIZE); + + /* initialize function pointers */ + mbx->ops.connect = fm10k_mbx_connect; + mbx->ops.disconnect = fm10k_mbx_disconnect; + mbx->ops.rx_ready = fm10k_mbx_rx_ready; + mbx->ops.tx_ready = fm10k_mbx_tx_ready; + mbx->ops.tx_complete = fm10k_mbx_tx_complete; + mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx; + mbx->ops.process = fm10k_mbx_process; + mbx->ops.register_handlers = fm10k_mbx_register_handlers; + + return FM10K_SUCCESS; +} + +/** + * fm10k_sm_mbx_create_data_hdr - Generate a mailbox header for local FIFO + * @mbx: pointer to mailbox + * + * This function returns a data mailbox header + **/ +STATIC void fm10k_sm_mbx_create_data_hdr(struct fm10k_mbx_info *mbx) +{ + if (mbx->tail_len) + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->remote, SM_VER) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD); +} + +/** + * fm10k_sm_mbx_create_connect_hdr - Generate a mailbox header for local FIFO + * @mbx: pointer to mailbox + * @err: error flags to report if any + * + * This function returns a connection mailbox header + **/ +STATIC void fm10k_sm_mbx_create_connect_hdr(struct fm10k_mbx_info *mbx, u8 err) +{ + if (mbx->local) + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->remote, SM_VER) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD) | + FM10K_MSG_HDR_FIELD_SET(err, SM_ERR); +} + +/** + * fm10k_sm_mbx_connect_reset - Reset following request for reset + * @mbx: pointer to mailbox + * + * This function resets the mailbox to a just connected state + **/ +STATIC void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx) +{ + /* flush any uncompleted work */ + fm10k_mbx_reset_work(mbx); + + /* set local version to max and remote version to 0 */ + mbx->local = FM10K_SM_MBX_VERSION; + mbx->remote = 0; + + /* initialize tail and head */ + mbx->tail = 1; + mbx->head = 1; + + /* reset state back to connect */ + mbx->state = FM10K_STATE_CONNECT; +} + +/** + * fm10k_sm_mbx_connect - Start switch manager mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will initiate a mailbox connection with the switch + * manager. To do this it will first disconnect the mailbox, and then + * reconnect it in order to complete a reset of the mailbox. + * + * This function will return an error if the mailbox has not been initiated + * or is currently in use. + **/ +STATIC s32 fm10k_sm_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + DEBUGFUNC("fm10k_sm_mbx_connect"); + + /* we cannot connect an uninitialized mailbox */ + if (!mbx->rx.buffer) + return FM10K_MBX_ERR_NO_SPACE; + + /* we cannot connect an already connected mailbox */ + if (mbx->state != FM10K_STATE_CLOSED) + return FM10K_MBX_ERR_BUSY; + + /* mailbox timeout can now become active */ + mbx->timeout = FM10K_MBX_INIT_TIMEOUT; + + /* Place mbx in ready to connect state */ + mbx->state = FM10K_STATE_CONNECT; + mbx->max_size = FM10K_MBX_MSG_MAX_SIZE; + + /* reset interface back to connect */ + fm10k_sm_mbx_connect_reset(mbx); + + /* enable interrupt and notify other party of new message */ + mbx->mbx_lock = FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT | + FM10K_MBX_INTERRUPT_ENABLE; + + /* generate and load connect header into mailbox */ + fm10k_sm_mbx_create_connect_hdr(mbx, 0); + fm10k_mbx_write(hw, mbx); + + return FM10K_SUCCESS; +} + +/** + * fm10k_sm_mbx_disconnect - Shutdown mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will shut down the mailbox. It places the mailbox first + * in the disconnect state, it then allows up to a predefined timeout for + * the mailbox to transition to close on its own. If this does not occur + * then the mailbox will be forced into the closed state. + * + * Any mailbox transactions not completed before calling this function + * are not guaranteed to complete and may be dropped. + **/ +STATIC void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + int timeout = mbx->timeout ? FM10K_MBX_DISCONNECT_TIMEOUT : 0; + + DEBUGFUNC("fm10k_sm_mbx_disconnect"); + + /* Place mbx in ready to disconnect state */ + mbx->state = FM10K_STATE_DISCONNECT; + + /* trigger interrupt to start shutdown process */ + FM10K_WRITE_REG(hw, mbx->mbx_reg, FM10K_MBX_REQ | + FM10K_MBX_INTERRUPT_DISABLE); + do { + usec_delay(FM10K_MBX_POLL_DELAY); + mbx->ops.process(hw, mbx); + timeout -= FM10K_MBX_POLL_DELAY; + } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED)); + + /* in case we didn't close just force the mailbox into shutdown */ + mbx->state = FM10K_STATE_CLOSED; + mbx->remote = 0; + fm10k_mbx_reset_work(mbx); + fm10k_fifo_drop_all(&mbx->tx); + + FM10K_WRITE_REG(hw, mbx->mbmem_reg, 0); +} + +/** + * fm10k_sm_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header + * @mbx: pointer to mailbox + * + * This function will parse up the fields in the mailbox header and return + * an error if the header contains any of a number of invalid configurations + * including unrecognized offsets or version numbers. + **/ +STATIC s32 fm10k_sm_mbx_validate_fifo_hdr(struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + u16 tail, head, ver; + + DEBUGFUNC("fm10k_sm_mbx_validate_fifo_hdr"); + + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL); + ver = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_VER); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD); + + switch (ver) { + case 0: + break; + case FM10K_SM_MBX_VERSION: + if (!head || head > FM10K_SM_MBX_FIFO_LEN) + return FM10K_MBX_ERR_HEAD; + if (!tail || tail > FM10K_SM_MBX_FIFO_LEN) + return FM10K_MBX_ERR_TAIL; + if (mbx->tail < head) + head += mbx->mbmem_len - 1; + if (tail < mbx->head) + tail += mbx->mbmem_len - 1; + if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) + return FM10K_MBX_ERR_HEAD; + if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) + break; + return FM10K_MBX_ERR_TAIL; + default: + return FM10K_MBX_ERR_SRC; + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_sm_mbx_process_error - Process header with error flag set + * @mbx: pointer to mailbox + * + * This function is meant to respond to a request where the error flag + * is set. As a result we will terminate a connection if one is present + * and fall back into the reset state with a connection header of version + * 0 (RESET). + **/ +STATIC void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + + switch (state) { + case FM10K_STATE_DISCONNECT: + /* if there is an error just disconnect */ + mbx->remote = 0; + break; + case FM10K_STATE_OPEN: + /* flush any uncompleted work */ + fm10k_sm_mbx_connect_reset(mbx); + break; + case FM10K_STATE_CONNECT: + /* try connnecting at lower version */ + if (mbx->remote) { + while (mbx->local > 1) + mbx->local--; + mbx->remote = 0; + } + break; + default: + break; + } + + fm10k_sm_mbx_create_connect_hdr(mbx, 0); +} + +/** + * fm10k_sm_mbx_create_error_msg - Process an error in FIFO header + * @mbx: pointer to mailbox + * @err: local error encountered + * + * This function will interpret the error provided by err, and based on + * that it may set the error bit in the local message header + **/ +STATIC void fm10k_sm_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err) +{ + /* only generate an error message for these types */ + switch (err) { + case FM10K_MBX_ERR_TAIL: + case FM10K_MBX_ERR_HEAD: + case FM10K_MBX_ERR_SRC: + case FM10K_MBX_ERR_SIZE: + case FM10K_MBX_ERR_RSVD0: + break; + default: + return; + } + + /* process it as though we received an error, and send error reply */ + fm10k_sm_mbx_process_error(mbx); + fm10k_sm_mbx_create_connect_hdr(mbx, 1); +} + +/** + * fm10k_sm_mbx_receive - Take message from Rx mailbox FIFO and put it in Rx + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @tail: tail index of message + * + * This function will dequeue one message from the Rx switch manager mailbox + * FIFO and place it in the Rx mailbox FIFO for processing by software. + **/ +STATIC s32 fm10k_sm_mbx_receive(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, + u16 tail) +{ + /* reduce length by 1 to convert to a mask */ + u16 mbmem_len = mbx->mbmem_len - 1; + s32 err; + + DEBUGFUNC("fm10k_sm_mbx_receive"); + + /* push tail in front of head */ + if (tail < mbx->head) + tail += mbmem_len; + + /* copy data to the Rx FIFO */ + err = fm10k_mbx_push_tail(hw, mbx, tail); + if (err < 0) + return err; + + /* process messages if we have received any */ + fm10k_mbx_dequeue_rx(hw, mbx); + + /* guarantee head aligns with the end of the last message */ + mbx->head = fm10k_mbx_head_sub(mbx, mbx->pushed); + mbx->pushed = 0; + + /* clear any extra bits left over since index adds 1 extra bit */ + if (mbx->head > mbmem_len) + mbx->head -= mbmem_len; + + return err; +} + +/** + * fm10k_sm_mbx_transmit - Take message from Tx and put it in Tx mailbox FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @head: head index of message + * + * This function will dequeue one message from the Tx mailbox FIFO and place + * it in the Tx switch manager mailbox FIFO for processing by hardware. + **/ +STATIC void fm10k_sm_mbx_transmit(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + struct fm10k_mbx_fifo *fifo = &mbx->tx; + /* reduce length by 1 to convert to a mask */ + u16 mbmem_len = mbx->mbmem_len - 1; + u16 tail_len, len = 0; + u32 *msg; + + DEBUGFUNC("fm10k_sm_mbx_transmit"); + + /* push head behind tail */ + if (mbx->tail < head) + head += mbmem_len; + + fm10k_mbx_pull_head(hw, mbx, head); + + /* determine msg aligned offset for end of buffer */ + do { + msg = fifo->buffer + fm10k_fifo_head_offset(fifo, len); + tail_len = len; + len += FM10K_TLV_DWORD_LEN(*msg); + } while ((len <= mbx->tail_len) && (len < mbmem_len)); + + /* guarantee we stop on a message boundary */ + if (mbx->tail_len > tail_len) { + mbx->tail = fm10k_mbx_tail_sub(mbx, mbx->tail_len - tail_len); + mbx->tail_len = tail_len; + } + + /* clear any extra bits left over since index adds 1 extra bit */ + if (mbx->tail > mbmem_len) + mbx->tail -= mbmem_len; +} + +/** + * fm10k_sm_mbx_create_reply - Generate reply based on state and remote head + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @head: acknowledgement number + * + * This function will generate an outgoing message based on the current + * mailbox state and the remote FIFO head. It will return the length + * of the outgoing message excluding header on success, and a negative value + * on error. + **/ +STATIC void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + switch (mbx->state) { + case FM10K_STATE_OPEN: + case FM10K_STATE_DISCONNECT: + /* flush out Tx data */ + fm10k_sm_mbx_transmit(hw, mbx, head); + + /* generate new header based on data */ + if (mbx->tail_len || (mbx->state == FM10K_STATE_OPEN)) { + fm10k_sm_mbx_create_data_hdr(mbx); + } else { + mbx->remote = 0; + fm10k_sm_mbx_create_connect_hdr(mbx, 0); + } + break; + case FM10K_STATE_CONNECT: + case FM10K_STATE_CLOSED: + fm10k_sm_mbx_create_connect_hdr(mbx, 0); + break; + default: + break; + } +} + +/** + * fm10k_sm_mbx_process_reset - Process header with version == 0 (RESET) + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function is meant to respond to a request where the version data + * is set to 0. As such we will either terminate the connection or go + * into the connect state in order to re-establish the connection. This + * function can also be used to respond to an error as the connection + * resetting would also be a means of dealing with errors. + **/ +STATIC void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + + switch (state) { + case FM10K_STATE_DISCONNECT: + /* drop remote connections and disconnect */ + mbx->state = FM10K_STATE_CLOSED; + mbx->remote = 0; + mbx->local = 0; + break; + case FM10K_STATE_OPEN: + /* flush any incomplete work */ + fm10k_sm_mbx_connect_reset(mbx); + break; + case FM10K_STATE_CONNECT: + /* Update remote value to match local value */ + mbx->remote = mbx->local; + default: + break; + } + + fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail); +} + +/** + * fm10k_sm_mbx_process_version_1 - Process header with version == 1 + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function is meant to process messages received when the remote + * mailbox is active. + **/ +STATIC s32 fm10k_sm_mbx_process_version_1(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + u16 head, tail; + s32 len; + + /* pull all fields needed for verification */ + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD); + + /* if we are in connect and wanting version 1 then start up and go */ + if (mbx->state == FM10K_STATE_CONNECT) { + if (!mbx->remote) + goto send_reply; + if (mbx->remote != 1) + return FM10K_MBX_ERR_SRC; + + mbx->state = FM10K_STATE_OPEN; + } + + do { + /* abort on message size errors */ + len = fm10k_sm_mbx_receive(hw, mbx, tail); + if (len < 0) + return len; + + /* continue until we have flushed the Rx FIFO */ + } while (len); + +send_reply: + fm10k_sm_mbx_create_reply(hw, mbx, head); + + return FM10K_SUCCESS; +} + +/** + * fm10k_sm_mbx_process - Process switch manager mailbox interrupt + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will process incoming mailbox events and generate mailbox + * replies. It will return a value indicating the number of DWORDs + * transmitted excluding header on success or a negative value on error. + **/ +STATIC s32 fm10k_sm_mbx_process(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + s32 err; + + DEBUGFUNC("fm10k_sm_mbx_process"); + + /* we do not read mailbox if closed */ + if (mbx->state == FM10K_STATE_CLOSED) + return FM10K_SUCCESS; + + /* retrieve data from switch manager */ + err = fm10k_mbx_read(hw, mbx); + if (err) + return err; + + err = fm10k_sm_mbx_validate_fifo_hdr(mbx); + if (err < 0) + goto fifo_err; + + if (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_ERR)) { + fm10k_sm_mbx_process_error(mbx); + goto fifo_err; + } + + switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) { + case 0: + fm10k_sm_mbx_process_reset(hw, mbx); + break; + case FM10K_SM_MBX_VERSION: + err = fm10k_sm_mbx_process_version_1(hw, mbx); + break; + } + +fifo_err: + if (err < 0) + fm10k_sm_mbx_create_error_msg(mbx, err); + + /* report data to switch manager */ + fm10k_mbx_write(hw, mbx); + + return err; +} + +/** + * fm10k_sm_mbx_init - Initialize mailbox memory for PF/SM mailbox + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @msg_data: handlers for mailbox events + * + * This function initializes the PF/SM mailbox for use. It will split the + * buffer provided and use that to populate both the Tx and Rx FIFO by + * evenly splitting it. In order to allow for easy masking of head/tail + * the value reported in size must be a power of 2 and is reported in + * DWORDs, not bytes. Any invalid values will cause the mailbox to return + * error. + **/ +s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *msg_data) +{ + DEBUGFUNC("fm10k_sm_mbx_init"); + UNREFERENCED_1PARAMETER(hw); + + mbx->mbx_reg = FM10K_GMBX; + mbx->mbmem_reg = FM10K_MBMEM_PF(0); + + /* start out in closed state */ + mbx->state = FM10K_STATE_CLOSED; + + /* validate layout of handlers before assigning them */ + if (fm10k_mbx_validate_handlers(msg_data)) + return FM10K_ERR_PARAM; + + /* initialize the message handlers */ + mbx->msg_data = msg_data; + + /* start mailbox as timed out and let the reset_hw call + * set the timeout value to begin communications + */ + mbx->timeout = 0; + mbx->usec_delay = FM10K_MBX_INIT_DELAY; + + /* Split buffer for use by Tx/Rx FIFOs */ + mbx->max_size = FM10K_MBX_MSG_MAX_SIZE; + mbx->mbmem_len = FM10K_MBMEM_PF_XOR; + + /* initialize the FIFOs, sizes are in 4 byte increments */ + fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE); + fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE], + FM10K_MBX_RX_BUFFER_SIZE); + + /* initialize function pointers */ + mbx->ops.connect = fm10k_sm_mbx_connect; + mbx->ops.disconnect = fm10k_sm_mbx_disconnect; + mbx->ops.rx_ready = fm10k_mbx_rx_ready; + mbx->ops.tx_ready = fm10k_mbx_tx_ready; + mbx->ops.tx_complete = fm10k_mbx_tx_complete; + mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx; + mbx->ops.process = fm10k_sm_mbx_process; + mbx->ops.register_handlers = fm10k_mbx_register_handlers; + + return FM10K_SUCCESS; +} diff --git a/drivers/net/fm10k/base/fm10k_mbx.h b/drivers/net/fm10k/base/fm10k_mbx.h new file mode 100644 index 00000000..edc57dff --- /dev/null +++ b/drivers/net/fm10k/base/fm10k_mbx.h @@ -0,0 +1,324 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _FM10K_MBX_H_ +#define _FM10K_MBX_H_ + +/* forward declaration */ +struct fm10k_mbx_info; + +#include "fm10k_type.h" +#include "fm10k_tlv.h" + +/* PF Mailbox Registers */ +#define FM10K_MBMEM(_n) ((_n) + 0x18000) +#define FM10K_MBMEM_VF(_n, _m) (((_n) * 0x10) + (_m) + 0x18000) +#define FM10K_MBMEM_SM(_n) ((_n) + 0x18400) +#define FM10K_MBMEM_PF(_n) ((_n) + 0x18600) +/* XOR provides means of switching from Tx to Rx FIFO */ +#define FM10K_MBMEM_PF_XOR (FM10K_MBMEM_SM(0) ^ FM10K_MBMEM_PF(0)) +#define FM10K_MBX(_n) ((_n) + 0x18800) +#define FM10K_MBX_REQ 0x00000002 +#define FM10K_MBX_ACK 0x00000004 +#define FM10K_MBX_REQ_INTERRUPT 0x00000008 +#define FM10K_MBX_ACK_INTERRUPT 0x00000010 +#define FM10K_MBX_INTERRUPT_ENABLE 0x00000020 +#define FM10K_MBX_INTERRUPT_DISABLE 0x00000040 +#define FM10K_MBICR(_n) ((_n) + 0x18840) +#define FM10K_GMBX 0x18842 + +/* VF Mailbox Registers */ +#define FM10K_VFMBX 0x00010 +#define FM10K_VFMBMEM(_n) ((_n) + 0x00020) +#define FM10K_VFMBMEM_LEN 16 +#define FM10K_VFMBMEM_VF_XOR (FM10K_VFMBMEM_LEN / 2) + +/* Delays/timeouts */ +#define FM10K_MBX_DISCONNECT_TIMEOUT 500 +#define FM10K_MBX_POLL_DELAY 19 +#define FM10K_MBX_INT_DELAY 20 + +#define FM10K_WRITE_MBX(hw, reg, value) FM10K_WRITE_REG(hw, reg, value) + +/* PF/VF Mailbox state machine + * + * +----------+ connect() +----------+ + * | CLOSED | --------------> | CONNECT | + * +----------+ +----------+ + * ^ ^ | + * | rcv: rcv: | | rcv: + * | Connect Disconnect | | Connect + * | Disconnect Error | | Data + * | | | + * | | V + * +----------+ disconnect() +----------+ + * |DISCONNECT| <-------------- | OPEN | + * +----------+ +----------+ + * + * The diagram above describes the PF/VF mailbox state machine. There + * are four main states to this machine. + * Closed: This state represents a mailbox that is in a standby state + * with interrupts disabled. In this state the mailbox should not + * read the mailbox or write any data. The only means of exiting + * this state is for the system to make the connect() call for the + * mailbox, it will then transition to the connect state. + * Connect: In this state the mailbox is seeking a connection. It will + * post a connect message with no specified destination and will + * wait for a reply from the other side of the mailbox. This state + * is exited when either a connect with the local mailbox as the + * destination is received or when a data message is received with + * a valid sequence number. + * Open: In this state the mailbox is able to transfer data between the local + * entity and the remote. It will fall back to connect in the event of + * receiving either an error message, or a disconnect message. It will + * transition to disconnect on a call to disconnect(); + * Disconnect: In this state the mailbox is attempting to gracefully terminate + * the connection. It will do so at the first point where it knows + * that the remote endpoint is either done sending, or when the + * remote endpoint has fallen back into connect. + */ +enum fm10k_mbx_state { + FM10K_STATE_CLOSED, + FM10K_STATE_CONNECT, + FM10K_STATE_OPEN, + FM10K_STATE_DISCONNECT, +}; + +/* PF/VF Mailbox header format + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Size/Err_no/CRC | Rsvd0 | Head | Tail | Type | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * The layout above describes the format for the header used in the PF/VF + * mailbox. The header is broken out into the following fields: + * Type: There are 4 supported message types + * 0x8: Data header - used to transport message data + * 0xC: Connect header - used to establish connection + * 0xD: Disconnect header - used to tear down a connection + * 0xE: Error header - used to address message exceptions + * Tail: Tail index for local FIFO + * Tail index actually consists of two parts. The MSB of + * the head is a loop tracker, it is 0 on an even numbered + * loop through the FIFO, and 1 on the odd numbered loops. + * To get the actual mailbox offset based on the tail it + * is necessary to add bit 3 to bit 0 and clear bit 3. This + * gives us a valid range of 0x1 - 0xE. + * Head: Head index for remote FIFO + * Head index follows the same format as the tail index. + * Rsvd0: Reserved 0 portion of the mailbox header + * CRC: Running CRC for all data since connect plus current message header + * Size: Maximum message size - Applies only to connect headers + * The maximum message size is provided during connect to avoid + * jamming the mailbox with messages that do not fit. + * Err_no: Error number - Applies only to error headers + * The error number provides an indication of the type of error + * experienced. + */ + +/* macros for retrieving and setting header values */ +#define FM10K_MSG_HDR_MASK(name) \ + ((0x1u << FM10K_MSG_##name##_SIZE) - 1) +#define FM10K_MSG_HDR_FIELD_SET(value, name) \ + (((u32)(value) & FM10K_MSG_HDR_MASK(name)) << FM10K_MSG_##name##_SHIFT) +#define FM10K_MSG_HDR_FIELD_GET(value, name) \ + ((u16)((value) >> FM10K_MSG_##name##_SHIFT) & FM10K_MSG_HDR_MASK(name)) + +/* offsets shared between all headers */ +#define FM10K_MSG_TYPE_SHIFT 0 +#define FM10K_MSG_TYPE_SIZE 4 +#define FM10K_MSG_TAIL_SHIFT 4 +#define FM10K_MSG_TAIL_SIZE 4 +#define FM10K_MSG_HEAD_SHIFT 8 +#define FM10K_MSG_HEAD_SIZE 4 +#define FM10K_MSG_RSVD0_SHIFT 12 +#define FM10K_MSG_RSVD0_SIZE 4 + +/* offsets for data/disconnect headers */ +#define FM10K_MSG_CRC_SHIFT 16 +#define FM10K_MSG_CRC_SIZE 16 + +/* offsets for connect headers */ +#define FM10K_MSG_CONNECT_SIZE_SHIFT 16 +#define FM10K_MSG_CONNECT_SIZE_SIZE 16 + +/* offsets for error headers */ +#define FM10K_MSG_ERR_NO_SHIFT 16 +#define FM10K_MSG_ERR_NO_SIZE 16 + +enum fm10k_msg_type { + FM10K_MSG_DATA = 0x8, + FM10K_MSG_CONNECT = 0xC, + FM10K_MSG_DISCONNECT = 0xD, + FM10K_MSG_ERROR = 0xE, +}; + +/* HNI/SM Mailbox FIFO format + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-------+-----------------------+-------+-----------------------+ + * | Error | Remote Head |Version| Local Tail | + * +-------+-----------------------+-------+-----------------------+ + * | | + * . Local FIFO Data . + * . . + * +-------+-----------------------+-------+-----------------------+ + * + * The layout above describes the format for the FIFOs used by the host + * network interface and the switch manager to communicate messages back + * and forth. Both the HNI and the switch maintain one such FIFO. The + * layout in memory has the switch manager FIFO followed immediately by + * the HNI FIFO. For this reason I am using just the pointer to the + * HNI FIFO in the mailbox ops as the offset between the two is fixed. + * + * The header for the FIFO is broken out into the following fields: + * Local Tail: Offset into FIFO region for next DWORD to write. + * Version: Version info for mailbox, only values of 0/1 are supported. + * Remote Head: Offset into remote FIFO to indicate how much we have read. + * Error: Error indication, values TBD. + */ + +/* version number for switch manager mailboxes */ +#define FM10K_SM_MBX_VERSION 1 +#define FM10K_SM_MBX_FIFO_LEN (FM10K_MBMEM_PF_XOR - 1) + +/* offsets shared between all SM FIFO headers */ +#define FM10K_MSG_SM_TAIL_SHIFT 0 +#define FM10K_MSG_SM_TAIL_SIZE 12 +#define FM10K_MSG_SM_VER_SHIFT 12 +#define FM10K_MSG_SM_VER_SIZE 4 +#define FM10K_MSG_SM_HEAD_SHIFT 16 +#define FM10K_MSG_SM_HEAD_SIZE 12 +#define FM10K_MSG_SM_ERR_SHIFT 28 +#define FM10K_MSG_SM_ERR_SIZE 4 + +/* All error messages returned by mailbox functions + * The value -511 is 0xFE01 in hex. The idea is to order the errors + * from 0xFE01 - 0xFEFF so error codes are easily visible in the mailbox + * messages. This also helps to avoid error number collisions as Linux + * doesn't appear to use error numbers 256 - 511. + */ +#define FM10K_MBX_ERR(_n) ((_n) - 512) +#define FM10K_MBX_ERR_NO_MBX FM10K_MBX_ERR(0x01) +#define FM10K_MBX_ERR_NO_SPACE FM10K_MBX_ERR(0x03) +#define FM10K_MBX_ERR_TAIL FM10K_MBX_ERR(0x05) +#define FM10K_MBX_ERR_HEAD FM10K_MBX_ERR(0x06) +#define FM10K_MBX_ERR_SRC FM10K_MBX_ERR(0x08) +#define FM10K_MBX_ERR_TYPE FM10K_MBX_ERR(0x09) +#define FM10K_MBX_ERR_SIZE FM10K_MBX_ERR(0x0B) +#define FM10K_MBX_ERR_BUSY FM10K_MBX_ERR(0x0C) +#define FM10K_MBX_ERR_RSVD0 FM10K_MBX_ERR(0x0E) +#define FM10K_MBX_ERR_CRC FM10K_MBX_ERR(0x0F) + +#define FM10K_MBX_CRC_SEED 0xFFFF + +struct fm10k_mbx_ops { + s32 (*connect)(struct fm10k_hw *, struct fm10k_mbx_info *); + void (*disconnect)(struct fm10k_hw *, struct fm10k_mbx_info *); + bool (*rx_ready)(struct fm10k_mbx_info *); + bool (*tx_ready)(struct fm10k_mbx_info *, u16); + bool (*tx_complete)(struct fm10k_mbx_info *); + s32 (*enqueue_tx)(struct fm10k_hw *, struct fm10k_mbx_info *, + const u32 *); + s32 (*process)(struct fm10k_hw *, struct fm10k_mbx_info *); + s32 (*register_handlers)(struct fm10k_mbx_info *, + const struct fm10k_msg_data *); +}; + +struct fm10k_mbx_fifo { + u32 *buffer; + u16 head; + u16 tail; + u16 size; +}; + +/* size of buffer to be stored in mailbox for FIFOs */ +#define FM10K_MBX_TX_BUFFER_SIZE 512 +#define FM10K_MBX_RX_BUFFER_SIZE 128 +#define FM10K_MBX_BUFFER_SIZE \ + (FM10K_MBX_TX_BUFFER_SIZE + FM10K_MBX_RX_BUFFER_SIZE) + +/* minimum and maximum message size in dwords */ +#define FM10K_MBX_MSG_MAX_SIZE \ + ((FM10K_MBX_TX_BUFFER_SIZE - 1) & (FM10K_MBX_RX_BUFFER_SIZE - 1)) +#define FM10K_VFMBX_MSG_MTU ((FM10K_VFMBMEM_LEN / 2) - 1) + +#define FM10K_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define FM10K_MBX_INIT_DELAY 500 /* microseconds between retries */ + +struct fm10k_mbx_info { + /* function pointers for mailbox operations */ + struct fm10k_mbx_ops ops; + const struct fm10k_msg_data *msg_data; + + /* message FIFOs */ + struct fm10k_mbx_fifo rx; + struct fm10k_mbx_fifo tx; + + /* delay for handling timeouts */ + u32 timeout; + u32 usec_delay; + + /* mailbox state info */ + u32 mbx_reg, mbmem_reg, mbx_lock, mbx_hdr; + u16 max_size, mbmem_len; + u16 tail, tail_len, pulled; + u16 head, head_len, pushed; + u16 local, remote; + enum fm10k_mbx_state state; + + /* result of last mailbox test */ + s32 test_result; + + /* statistics */ + u64 tx_busy; + u64 tx_dropped; + u64 tx_messages; + u64 tx_dwords; + u64 tx_mbmem_pulled; + u64 rx_messages; + u64 rx_dwords; + u64 rx_mbmem_pushed; + u64 rx_parse_err; + + /* Buffer to store messages */ + u32 buffer[FM10K_MBX_BUFFER_SIZE]; +}; + +s32 fm10k_pfvf_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *, + const struct fm10k_msg_data *, u8); +s32 fm10k_sm_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *, + const struct fm10k_msg_data *); + +#endif /* _FM10K_MBX_H_ */ diff --git a/drivers/net/fm10k/base/fm10k_osdep.h b/drivers/net/fm10k/base/fm10k_osdep.h new file mode 100644 index 00000000..a21daa2a --- /dev/null +++ b/drivers/net/fm10k/base/fm10k_osdep.h @@ -0,0 +1,190 @@ +/******************************************************************************* + +Copyright (c) 2013-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _FM10K_OSDEP_H_ +#define _FM10K_OSDEP_H_ + +#include <stdint.h> +#include <string.h> +#include <rte_atomic.h> +#include <rte_byteorder.h> +#include <rte_cycles.h> +#include "../fm10k_logs.h" + +/* TODO: this does not look like it should be used... */ +#define ERROR_REPORT2(v1, v2, v3) do { } while (0) + +#ifndef BOULDER_RAPIDS_HW +#define BOULDER_RAPIDS_HW +#endif + +#define STATIC static +#define DEBUGFUNC(F) DEBUGOUT(F "\n"); +#define DEBUGOUT(S, args...) PMD_DRV_LOG_RAW(DEBUG, S, ##args) +#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args) + +#define FALSE 0 +#define TRUE 1 +#ifndef false +#define false FALSE +#endif +#ifndef true +#define true TRUE +#endif + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef int16_t s16; +typedef uint32_t u32; +typedef int32_t s32; +typedef int64_t s64; +typedef uint64_t u64; +typedef int bool; + +#ifndef __le16 +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 +#endif + +/* offsets are WORD offsets, not BYTE offsets */ +#define FM10K_WRITE_REG(hw, reg, val) \ + ((((volatile uint32_t *)(hw)->hw_addr)[(reg)]) = ((uint32_t)(val))) +#define FM10K_READ_REG(hw, reg) \ + (((volatile uint32_t *)(hw)->hw_addr)[(reg)]) +#define FM10K_WRITE_FLUSH(a) FM10K_READ_REG(a, FM10K_CTRL) + +#define FM10K_PCI_REG(reg) (*((volatile uint32_t *)(reg))) + +#define FM10K_PCI_REG_WRITE(reg, value) do { \ + FM10K_PCI_REG((reg)) = (value); \ +} while (0) + +/* not implemented */ +#define FM10K_READ_PCI_WORD(hw, reg) 0 + +#define FM10K_WRITE_MBX(hw, reg, value) FM10K_WRITE_REG(hw, reg, value) +#define FM10K_READ_MBX(hw, reg) FM10K_READ_REG(hw, reg) + +#define FM10K_LE16_TO_CPU rte_le_to_cpu_16 +#define FM10K_LE32_TO_CPU rte_le_to_cpu_32 +#define FM10K_CPU_TO_LE32 rte_cpu_to_le_32 +#define FM10K_CPU_TO_LE16 rte_cpu_to_le_16 +#define le16_to_cpu rte_le_to_cpu_16 + +#define FM10K_RMB rte_rmb +#define FM10K_WMB rte_wmb + +#define usec_delay rte_delay_us + +#define FM10K_REMOVED(hw_addr) (!(hw_addr)) + +#ifndef FM10K_IS_ZERO_ETHER_ADDR +/* make certain address is not 0 */ +#define FM10K_IS_ZERO_ETHER_ADDR(addr) \ +(!((addr)[0] | (addr)[1] | (addr)[2] | (addr)[3] | (addr)[4] | (addr)[5])) +#endif + +#ifndef FM10K_IS_MULTICAST_ETHER_ADDR +#define FM10K_IS_MULTICAST_ETHER_ADDR(addr) ((addr)[0] & 0x1) +#endif + +#ifndef FM10K_IS_VALID_ETHER_ADDR +/* make certain address is not multicast or 0 */ +#define FM10K_IS_VALID_ETHER_ADDR(addr) \ +(!FM10K_IS_MULTICAST_ETHER_ADDR(addr) && !FM10K_IS_ZERO_ETHER_ADDR(addr)) +#endif + +#ifndef do_div +#define do_div(n, base) ({\ + (n) = (n) / (base);\ +}) +#endif /* do_div */ + +/* DPDK can't access IOMEM directly */ +#ifndef FM10K_WRITE_SW_REG +#define FM10K_WRITE_SW_REG(v1, v2, v3) do { } while (0) +#endif + +#ifndef fm10k_read_reg +#define fm10k_read_reg FM10K_READ_REG +#endif + +#define FM10K_INTEL_VENDOR_ID 0x8086 +#define FM10K_DMA_CTRL_MINMSS_SHIFT 9 +#define FM10K_EICR_PCA_FAULT 0x00000001 +#define FM10K_EICR_THI_FAULT 0x00000004 +#define FM10K_EICR_FUM_FAULT 0x00000020 +#define FM10K_EICR_SRAMERROR 0x00000400 +#define FM10K_SRAM_IP 0x13003 +#define FM10K_RXINT_TIMER_SHIFT 8 +#define FM10K_TXINT_TIMER_SHIFT 8 +#define FM10K_RXD_PKTTYPE_MASK 0x03F0 +#define FM10K_RXD_PKTTYPE_SHIFT 4 + +enum fm10k_rdesc_pkt_type { + /* L3 type */ + FM10K_PKTTYPE_OTHER = 0x00, + FM10K_PKTTYPE_IPV4 = 0x01, + FM10K_PKTTYPE_IPV4_EX = 0x02, + FM10K_PKTTYPE_IPV6 = 0x03, + FM10K_PKTTYPE_IPV6_EX = 0x04, + + /* L4 type */ + FM10K_PKTTYPE_TCP = 0x08, + FM10K_PKTTYPE_UDP = 0x10, + FM10K_PKTTYPE_GRE = 0x18, + FM10K_PKTTYPE_VXLAN = 0x20, + FM10K_PKTTYPE_NVGRE = 0x28, + FM10K_PKTTYPE_GENEVE = 0x30 +}; + +#define FM10K_RXD_STATUS_IPCS 0x0008 /* Indicates IPv4 csum */ +#define FM10K_RXD_STATUS_HBO 0x0400 /* header buffer overrun */ + +#define FM10K_TSO_MINMSS \ + (FM10K_DMA_CTRL_MINMSS_64 >> FM10K_DMA_CTRL_MINMSS_SHIFT) +#define FM10K_TSO_MIN_HEADERLEN 54 +#define FM10K_TSO_MAX_HEADERLEN 192 + +#endif /* _FM10K_OSDEP_H_ */ diff --git a/drivers/net/fm10k/base/fm10k_pf.c b/drivers/net/fm10k/base/fm10k_pf.c new file mode 100644 index 00000000..105babf4 --- /dev/null +++ b/drivers/net/fm10k/base/fm10k_pf.c @@ -0,0 +1,2102 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "fm10k_pf.h" +#include "fm10k_vf.h" + +/** + * fm10k_reset_hw_pf - PF hardware reset + * @hw: pointer to hardware structure + * + * This function should return the hardware to a state similar to the + * one it is in after being powered on. + **/ +STATIC s32 fm10k_reset_hw_pf(struct fm10k_hw *hw) +{ + s32 err; + u32 reg; + u16 i; + + DEBUGFUNC("fm10k_reset_hw_pf"); + + /* Disable interrupts */ + FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL)); + + /* Lock ITR2 reg 0 into itself and disable interrupt moderation */ + FM10K_WRITE_REG(hw, FM10K_ITR2(0), 0); + FM10K_WRITE_REG(hw, FM10K_INT_CTRL, 0); + + /* We assume here Tx and Rx queue 0 are owned by the PF */ + + /* Shut off VF access to their queues forcing them to queue 0 */ + for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) { + FM10K_WRITE_REG(hw, FM10K_TQMAP(i), 0); + FM10K_WRITE_REG(hw, FM10K_RQMAP(i), 0); + } + + /* shut down all rings */ + err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES); + if (err) + return err; + + /* Verify that DMA is no longer active */ + reg = FM10K_READ_REG(hw, FM10K_DMA_CTRL); + if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE)) + return FM10K_ERR_DMA_PENDING; + + /* verify the switch is ready for reset */ + reg = FM10K_READ_REG(hw, FM10K_DMA_CTRL2); + if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY)) + goto out; + + /* Inititate data path reset */ + reg |= FM10K_DMA_CTRL_DATAPATH_RESET; + FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, reg); + + /* Flush write and allow 100us for reset to complete */ + FM10K_WRITE_FLUSH(hw); + usec_delay(FM10K_RESET_TIMEOUT); + + /* Verify we made it out of reset */ + reg = FM10K_READ_REG(hw, FM10K_IP); + if (!(reg & FM10K_IP_NOTINRESET)) + err = FM10K_ERR_RESET_FAILED; + +out: + return err; +} + +/** + * fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support + * @hw: pointer to hardware structure + * + * Looks at the ARI hierarchy bit to determine whether ARI is supported or not. + **/ +STATIC bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw) +{ + u16 sriov_ctrl = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_SRIOV_CTRL); + + DEBUGFUNC("fm10k_is_ari_hierarchy_pf"); + + return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI); +} + +/** + * fm10k_init_hw_pf - PF hardware initialization + * @hw: pointer to hardware structure + * + **/ +STATIC s32 fm10k_init_hw_pf(struct fm10k_hw *hw) +{ + u32 dma_ctrl, txqctl; + u16 i; + + DEBUGFUNC("fm10k_init_hw_pf"); + + /* Establish default VSI as valid */ + FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0); + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(fm10k_dglort_default), + FM10K_DGLORTMAP_ANY); + + /* Invalidate all other GLORT entries */ + for (i = 1; i < FM10K_DGLORT_COUNT; i++) + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE); + + /* reset ITR2(0) to point to itself */ + FM10K_WRITE_REG(hw, FM10K_ITR2(0), 0); + + /* reset VF ITR2(0) to point to 0 avoid PF registers */ + FM10K_WRITE_REG(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0); + + /* loop through all PF ITR2 registers pointing them to the previous */ + for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++) + FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - 1); + + /* Enable interrupt moderator if not already enabled */ + FM10K_WRITE_REG(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR); + + /* compute the default txqctl configuration */ + txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW | + (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT); + + for (i = 0; i < FM10K_MAX_QUEUES; i++) { + /* configure rings for 256 Queue / 32 Descriptor cache mode */ + FM10K_WRITE_REG(hw, FM10K_TQDLOC(i), + (i * FM10K_TQDLOC_BASE_32_DESC) | + FM10K_TQDLOC_SIZE_32_DESC); + FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), txqctl); + + /* configure rings to provide TPH processing hints */ + FM10K_WRITE_REG(hw, FM10K_TPH_TXCTRL(i), + FM10K_TPH_TXCTRL_DESC_TPHEN | + FM10K_TPH_TXCTRL_DESC_RROEN | + FM10K_TPH_TXCTRL_DESC_WROEN | + FM10K_TPH_TXCTRL_DATA_RROEN); + FM10K_WRITE_REG(hw, FM10K_TPH_RXCTRL(i), + FM10K_TPH_RXCTRL_DESC_TPHEN | + FM10K_TPH_RXCTRL_DESC_RROEN | + FM10K_TPH_RXCTRL_DATA_WROEN | + FM10K_TPH_RXCTRL_HDR_WROEN); + } + + /* set max hold interval to align with 1.024 usec in all modes and + * store ITR scale + */ + switch (hw->bus.speed) { + case fm10k_bus_speed_2500: + dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1; + break; + case fm10k_bus_speed_5000: + dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2; + break; + case fm10k_bus_speed_8000: + dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3; + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3; + break; + default: + dma_ctrl = 0; + /* just in case, assume Gen3 ITR scale */ + hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3; + break; + } + + /* Configure TSO flags */ + FM10K_WRITE_REG(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW); + FM10K_WRITE_REG(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI); + + /* Enable DMA engine + * Set Rx Descriptor size to 32 + * Set Minimum MSS to 64 + * Set Maximum number of Rx queues to 256 / 32 Descriptor + */ + dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE | + FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 | + FM10K_DMA_CTRL_32_DESC; + + FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, dma_ctrl); + + /* record maximum queue count, we limit ourselves to 128 */ + hw->mac.max_queues = FM10K_MAX_QUEUES_PF; + + /* We support either 64 VFs or 7 VFs depending on if we have ARI */ + hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7; + + return FM10K_SUCCESS; +} + +#ifndef NO_IS_SLOT_APPROPRIATE_CHECK +/** + * fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU + * @hw: pointer to hardware structure + * + * Looks at the PCIe bus info to confirm whether or not this slot can support + * the necessary bandwidth for this device. + **/ +STATIC bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw) +{ + DEBUGFUNC("fm10k_is_slot_appropriate_pf"); + + return (hw->bus.speed == hw->bus_caps.speed) && + (hw->bus.width == hw->bus_caps.width); +} + +#endif +/** + * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table + * @hw: pointer to hardware structure + * @vid: VLAN ID to add to table + * @vsi: Index indicating VF ID or PF ID in table + * @set: Indicates if this is a set or clear operation + * + * This function adds or removes the corresponding VLAN ID from the VLAN + * filter table for the corresponding function. In addition to the + * standard set/clear that supports one bit a multi-bit write is + * supported to set 64 bits at a time. + **/ +STATIC s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) +{ + u32 vlan_table, reg, mask, bit, len; + + /* verify the VSI index is valid */ + if (vsi > FM10K_VLAN_TABLE_VSI_MAX) + return FM10K_ERR_PARAM; + + /* VLAN multi-bit write: + * The multi-bit write has several parts to it. + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | RSVD0 | Length |C|RSVD0| VLAN ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * VLAN ID: Vlan Starting value + * RSVD0: Reserved section, must be 0 + * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message) + * Length: Number of times to repeat the bit being set + */ + len = vid >> 16; + vid = (vid << 17) >> 17; + + /* verify the reserved 0 fields are 0 */ + if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* Loop through the table updating all required VLANs */ + for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32; + len < FM10K_VLAN_TABLE_VID_MAX; + len -= 32 - bit, reg++, bit = 0) { + /* record the initial state of the register */ + vlan_table = FM10K_READ_REG(hw, reg); + + /* truncate mask if we are at the start or end of the run */ + mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit; + + /* make necessary modifications to the register */ + mask &= set ? ~vlan_table : vlan_table; + if (mask) + FM10K_WRITE_REG(hw, reg, vlan_table ^ mask); + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_read_mac_addr_pf - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the SM_AREA and stores the value. + **/ +STATIC s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw) +{ + u8 perm_addr[ETH_ALEN]; + u32 serial_num; + + DEBUGFUNC("fm10k_read_mac_addr_pf"); + + serial_num = FM10K_READ_REG(hw, FM10K_SM_AREA(1)); + + /* last byte should be all 1's */ + if ((~serial_num) << 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[0] = (u8)(serial_num >> 24); + perm_addr[1] = (u8)(serial_num >> 16); + perm_addr[2] = (u8)(serial_num >> 8); + + serial_num = FM10K_READ_REG(hw, FM10K_SM_AREA(0)); + + /* first byte should be all 1's */ + if ((~serial_num) >> 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[3] = (u8)(serial_num >> 16); + perm_addr[4] = (u8)(serial_num >> 8); + perm_addr[5] = (u8)(serial_num); + + memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN); + memcpy(hw->mac.addr, perm_addr, ETH_ALEN); + + return FM10K_SUCCESS; +} + +/** + * fm10k_glort_valid_pf - Validate that the provided glort is valid + * @hw: pointer to the HW structure + * @glort: base glort to be validated + * + * This function will return an error if the provided glort is invalid + **/ +bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort) +{ + glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT; + + return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE); +} + +/** + * fm10k_update_xc_addr_pf - Update device addresses + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * @flags: flags field to indicate add and secure + * + * This function generates a message to the Switch API requesting + * that the given logical port add/remove the given L2 MAC/VLAN address. + **/ +STATIC s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add, u8 flags) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + struct fm10k_mac_update mac_update; + u32 msg[5]; + + DEBUGFUNC("fm10k_update_xc_addr_pf"); + + /* clear set bit from VLAN ID */ + vid &= ~FM10K_VLAN_CLEAR; + + /* if glort or VLAN are not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* record fields */ + mac_update.mac_lower = FM10K_CPU_TO_LE32(((u32)mac[2] << 24) | + ((u32)mac[3] << 16) | + ((u32)mac[4] << 8) | + ((u32)mac[5])); + mac_update.mac_upper = FM10K_CPU_TO_LE16(((u16)mac[0] << 8) | + ((u16)mac[1])); + mac_update.vlan = FM10K_CPU_TO_LE16(vid); + mac_update.glort = FM10K_CPU_TO_LE16(glort); + mac_update.action = add ? 0 : 1; + mac_update.flags = flags; + + /* populate mac_update fields */ + fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE); + fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE, + &mac_update, sizeof(mac_update)); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_uc_addr_pf - Update device unicast addresses + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * @flags: flags field to indicate add and secure + * + * This function is used to add or remove unicast addresses for + * the PF. + **/ +STATIC s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add, u8 flags) +{ + DEBUGFUNC("fm10k_update_uc_addr_pf"); + + /* verify MAC address is valid */ + if (!FM10K_IS_VALID_ETHER_ADDR(mac)) + return FM10K_ERR_PARAM; + + return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags); +} + +/** + * fm10k_update_mc_addr_pf - Update device multicast addresses + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * + * This function is used to add or remove multicast MAC addresses for + * the PF. + **/ +STATIC s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add) +{ + DEBUGFUNC("fm10k_update_mc_addr_pf"); + + /* verify multicast address is valid */ + if (!FM10K_IS_MULTICAST_ETHER_ADDR(mac)) + return FM10K_ERR_PARAM; + + return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0); +} + +/** + * fm10k_update_xcast_mode_pf - Request update of multicast mode + * @hw: pointer to hardware structure + * @glort: base resource tag for this request + * @mode: integer value indicating mode being requested + * + * This function will attempt to request a higher mode for the port + * so that it can enable either multicast, multicast promiscuous, or + * promiscuous mode of operation. + **/ +STATIC s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[3], xcast_mode; + + DEBUGFUNC("fm10k_update_xcast_mode_pf"); + + if (mode > FM10K_XCAST_MODE_NONE) + return FM10K_ERR_PARAM; + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* write xcast mode as a single u32 value, + * lower 16 bits: glort + * upper 16 bits: mode + */ + xcast_mode = ((u32)mode << 16) | glort; + + /* generate message requesting to change xcast mode */ + fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES); + fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_int_moderator_pf - Update interrupt moderator linked list + * @hw: pointer to hardware structure + * + * This function walks through the MSI-X vector table to determine the + * number of active interrupts and based on that information updates the + * interrupt moderator linked list. + **/ +STATIC void fm10k_update_int_moderator_pf(struct fm10k_hw *hw) +{ + u32 i; + + /* Disable interrupt moderator */ + FM10K_WRITE_REG(hw, FM10K_INT_CTRL, 0); + + /* loop through PF from last to first looking enabled vectors */ + for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) { + if (!FM10K_READ_REG(hw, FM10K_MSIX_VECTOR_MASK(i))) + break; + } + + /* always reset VFITR2[0] to point to last enabled PF vector */ + FM10K_WRITE_REG(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i); + + /* reset ITR2[0] to point to last enabled PF vector */ + if (!hw->iov.num_vfs) + FM10K_WRITE_REG(hw, FM10K_ITR2(0), i); + + /* Enable interrupt moderator */ + FM10K_WRITE_REG(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR); +} + +/** + * fm10k_update_lport_state_pf - Notify the switch of a change in port state + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @count: number of logical ports being updated + * @enable: boolean value indicating enable or disable + * + * This function is used to add/remove a logical port from the switch. + **/ +STATIC s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort, + u16 count, bool enable) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[3], lport_msg; + + DEBUGFUNC("fm10k_lport_state_pf"); + + /* do nothing if we are being asked to create or destroy 0 ports */ + if (!count) + return FM10K_SUCCESS; + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* construct the lport message from the 2 pieces of data we have */ + lport_msg = ((u32)count << 16) | glort; + + /* generate lport create/delete message */ + fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE : + FM10K_PF_MSG_ID_LPORT_DELETE); + fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_configure_dglort_map_pf - Configures GLORT entry and queues + * @hw: pointer to hardware structure + * @dglort: pointer to dglort configuration structure + * + * Reads the configuration structure contained in dglort_cfg and uses + * that information to then populate a DGLORTMAP/DEC entry and the queues + * to which it has been assigned. + **/ +STATIC s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw, + struct fm10k_dglort_cfg *dglort) +{ + u16 glort, queue_count, vsi_count, pc_count; + u16 vsi, queue, pc, q_idx; + u32 txqctl, dglortdec, dglortmap; + + /* verify the dglort pointer */ + if (!dglort) + return FM10K_ERR_PARAM; + + /* verify the dglort values */ + if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) || + (dglort->vsi_l > 6) || (dglort->vsi_b > 64) || + (dglort->queue_l > 8) || (dglort->queue_b >= 256)) + return FM10K_ERR_PARAM; + + /* determine count of VSIs and queues */ + queue_count = BIT(dglort->rss_l + dglort->pc_l); + vsi_count = BIT(dglort->vsi_l + dglort->queue_l); + glort = dglort->glort; + q_idx = dglort->queue_b; + + /* configure SGLORT for queues */ + for (vsi = 0; vsi < vsi_count; vsi++, glort++) { + for (queue = 0; queue < queue_count; queue++, q_idx++) { + if (q_idx >= FM10K_MAX_QUEUES) + break; + + FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(q_idx), glort); + FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(q_idx), glort); + } + } + + /* determine count of PCs and queues */ + queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l); + pc_count = BIT(dglort->pc_l); + + /* configure PC for Tx queues */ + for (pc = 0; pc < pc_count; pc++) { + q_idx = pc + dglort->queue_b; + for (queue = 0; queue < queue_count; queue++) { + if (q_idx >= FM10K_MAX_QUEUES) + break; + + txqctl = FM10K_READ_REG(hw, FM10K_TXQCTL(q_idx)); + txqctl &= ~FM10K_TXQCTL_PC_MASK; + txqctl |= pc << FM10K_TXQCTL_PC_SHIFT; + FM10K_WRITE_REG(hw, FM10K_TXQCTL(q_idx), txqctl); + + q_idx += pc_count; + } + } + + /* configure DGLORTDEC */ + dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | + ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) | + ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) | + ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) | + ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) | + ((u32)(dglort->queue_l)); + if (dglort->inner_rss) + dglortdec |= FM10K_DGLORTDEC_INNERRSS_ENABLE; + + /* configure DGLORTMAP */ + dglortmap = (dglort->idx == fm10k_dglort_default) ? + FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO; + dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l; + dglortmap |= dglort->glort; + + /* write values to hardware */ + FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec); + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap); + + return FM10K_SUCCESS; +} + +u16 fm10k_queues_per_pool(struct fm10k_hw *hw) +{ + u16 num_pools = hw->iov.num_pools; + + return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ? + 8 : FM10K_MAX_QUEUES_POOL; +} + +u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx) +{ + u16 num_vfs = hw->iov.num_vfs; + u16 vf_q_idx = FM10K_MAX_QUEUES; + + vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx); + + return vf_q_idx; +} + +STATIC u16 fm10k_vectors_per_pool(struct fm10k_hw *hw) +{ + u16 num_pools = hw->iov.num_pools; + + return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 : + FM10K_MAX_VECTORS_POOL; +} + +STATIC u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx) +{ + u16 vf_v_idx = FM10K_MAX_VECTORS_PF; + + vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx; + + return vf_v_idx; +} + +/** + * fm10k_iov_assign_resources_pf - Assign pool resources for virtualization + * @hw: pointer to the HW structure + * @num_vfs: number of VFs to be allocated + * @num_pools: number of virtualization pools to be allocated + * + * Allocates queues and traffic classes to virtualization entities to prepare + * the PF for SR-IOV and VMDq + **/ +STATIC s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs, + u16 num_pools) +{ + u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx; + u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT; + int i, j; + + /* hardware only supports up to 64 pools */ + if (num_pools > 64) + return FM10K_ERR_PARAM; + + /* the number of VFs cannot exceed the number of pools */ + if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs)) + return FM10K_ERR_PARAM; + + /* record number of virtualization entities */ + hw->iov.num_vfs = num_vfs; + hw->iov.num_pools = num_pools; + + /* determine qmap offsets and counts */ + qmap_stride = (num_vfs > 8) ? 32 : 256; + qpp = fm10k_queues_per_pool(hw); + vpp = fm10k_vectors_per_pool(hw); + + /* calculate starting index for queues */ + vf_q_idx = fm10k_vf_queue_index(hw, 0); + qmap_idx = 0; + + /* establish TCs with -1 credits and no quanta to prevent transmit */ + for (i = 0; i < num_vfs; i++) { + FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(i), 0); + FM10K_WRITE_REG(hw, FM10K_TC_RATE(i), 0); + FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(i), + FM10K_TC_CREDIT_CREDIT_MASK); + } + + /* zero out all mbmem registers */ + for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;) + FM10K_WRITE_REG(hw, FM10K_MBMEM(i), 0); + + /* clear event notification of VF FLR */ + FM10K_WRITE_REG(hw, FM10K_PFVFLREC(0), ~0); + FM10K_WRITE_REG(hw, FM10K_PFVFLREC(1), ~0); + + /* loop through unallocated rings assigning them back to PF */ + for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) { + FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), 0); + FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF | + FM10K_TXQCTL_UNLIMITED_BW | vid); + FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF); + } + + /* PF should have already updated VFITR2[0] */ + + /* update all ITR registers to flow to VFITR2[0] */ + for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) { + if (!(i & (vpp - 1))) + FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - vpp); + else + FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - 1); + } + + /* update PF ITR2[0] to reference the last vector */ + FM10K_WRITE_REG(hw, FM10K_ITR2(0), + fm10k_vf_vector_index(hw, num_vfs - 1)); + + /* loop through rings populating rings and TCs */ + for (i = 0; i < num_vfs; i++) { + /* record index for VF queue 0 for use in end of loop */ + vf_q_idx0 = vf_q_idx; + + for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) { + /* assign VF and locked TC to queues */ + FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0); + FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx), + (i << FM10K_TXQCTL_TC_SHIFT) | i | + FM10K_TXQCTL_VF | vid); + FM10K_WRITE_REG(hw, FM10K_RXDCTL(vf_q_idx), + FM10K_RXDCTL_WRITE_BACK_MIN_DELAY | + FM10K_RXDCTL_DROP_ON_EMPTY); + FM10K_WRITE_REG(hw, FM10K_RXQCTL(vf_q_idx), + (i << FM10K_RXQCTL_VF_SHIFT) | + FM10K_RXQCTL_VF); + + /* map queue pair to VF */ + FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx); + FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), vf_q_idx); + } + + /* repeat the first ring for all of the remaining VF rings */ + for (; j < qmap_stride; j++, qmap_idx++) { + FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0); + FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0); + } + } + + /* loop through remaining indexes assigning all to queue 0 */ + while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) { + FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0); + FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), 0); + qmap_idx++; + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_iov_configure_tc_pf - Configure the shaping group for VF + * @hw: pointer to the HW structure + * @vf_idx: index of VF receiving GLORT + * @rate: Rate indicated in Mb/s + * + * Configured the TC for a given VF to allow only up to a given number + * of Mb/s of outgoing Tx throughput. + **/ +STATIC s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate) +{ + /* configure defaults */ + u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3; + u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK; + + /* verify vf is in range */ + if (vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* set interval to align with 4.096 usec in all modes */ + switch (hw->bus.speed) { + case fm10k_bus_speed_2500: + interval = FM10K_TC_RATE_INTERVAL_4US_GEN1; + break; + case fm10k_bus_speed_5000: + interval = FM10K_TC_RATE_INTERVAL_4US_GEN2; + break; + default: + break; + } + + if (rate) { + if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN) + return FM10K_ERR_PARAM; + + /* The quanta is measured in Bytes per 4.096 or 8.192 usec + * The rate is provided in Mbits per second + * To tralslate from rate to quanta we need to multiply the + * rate by 8.192 usec and divide by 8 bits/byte. To avoid + * dealing with floating point we can round the values up + * to the nearest whole number ratio which gives us 128 / 125. + */ + tc_rate = (rate * 128) / 125; + + /* try to keep the rate limiting accurate by increasing + * the number of credits and interval for rates less than 4Gb/s + */ + if (rate < 4000) + interval <<= 1; + else + tc_rate >>= 1; + } + + /* update rate limiter with new values */ + FM10K_WRITE_REG(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval); + FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K); + FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K); + + return FM10K_SUCCESS; +} + +/** + * fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list + * @hw: pointer to the HW structure + * @vf_idx: index of VF receiving GLORT + * + * Update the interrupt moderator linked list to include any MSI-X + * interrupts which the VF has enabled in the MSI-X vector table. + **/ +STATIC s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx) +{ + u16 vf_v_idx, vf_v_limit, i; + + /* verify vf is in range */ + if (vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* determine vector offset and count */ + vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); + vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); + + /* search for first vector that is not masked */ + for (i = vf_v_limit - 1; i > vf_v_idx; i--) { + if (!FM10K_READ_REG(hw, FM10K_MSIX_VECTOR_MASK(i))) + break; + } + + /* reset linked list so it now includes our active vectors */ + if (vf_idx == (hw->iov.num_vfs - 1)) + FM10K_WRITE_REG(hw, FM10K_ITR2(0), i); + else + FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_limit), i); + + return FM10K_SUCCESS; +} + +/** + * fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF + * @hw: pointer to the HW structure + * @vf_info: pointer to VF information structure + * + * Assign a MAC address and default VLAN to a VF and notify it of the update + **/ +STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info) +{ + u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i; + u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0; + s32 err = FM10K_SUCCESS; + u16 vf_idx, vf_vid; + + /* verify vf is in range */ + if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* determine qmap offsets and counts */ + qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256; + queues_per_pool = fm10k_queues_per_pool(hw); + + /* calculate starting index for queues */ + vf_idx = vf_info->vf_idx; + vf_q_idx = fm10k_vf_queue_index(hw, vf_idx); + qmap_idx = qmap_stride * vf_idx; + + /* MAP Tx queue back to 0 temporarily, and disable it */ + FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0); + FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0); + + /* determine correct default VLAN ID */ + if (vf_info->pf_vid) + vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR; + else + vf_vid = vf_info->sw_vid; + + /* generate MAC_ADDR request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC, + vf_info->mac, vf_vid); + + /* load onto outgoing mailbox, ignore any errors on enqueue */ + if (vf_info->mbx.ops.enqueue_tx) + vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); + + /* verify ring has disabled before modifying base address registers */ + txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx)); + for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) { + /* limit ourselves to a 1ms timeout */ + if (timeout == 10) { + err = FM10K_ERR_DMA_PENDING; + goto err_out; + } + + usec_delay(100); + txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx)); + } + + /* Update base address registers to contain MAC address */ + if (FM10K_IS_VALID_ETHER_ADDR(vf_info->mac)) { + tdbal = (((u32)vf_info->mac[3]) << 24) | + (((u32)vf_info->mac[4]) << 16) | + (((u32)vf_info->mac[5]) << 8); + + tdbah = (((u32)0xFF) << 24) | + (((u32)vf_info->mac[0]) << 16) | + (((u32)vf_info->mac[1]) << 8) | + ((u32)vf_info->mac[2]); + } + + /* Record the base address into queue 0 */ + FM10K_WRITE_REG(hw, FM10K_TDBAL(vf_q_idx), tdbal); + FM10K_WRITE_REG(hw, FM10K_TDBAH(vf_q_idx), tdbah); + + /* Provide the VF the ITR scale, using software-defined fields in TDLEN + * to pass the information during VF initialization. See definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more details. + */ + FM10K_WRITE_REG(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale << + FM10K_TDLEN_ITR_SCALE_SHIFT); + +err_out: + /* configure Queue control register */ + txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) & + FM10K_TXQCTL_VID_MASK; + txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) | + FM10K_TXQCTL_VF | vf_idx; + + /* assign VLAN ID */ + for (i = 0; i < queues_per_pool; i++) + FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl); + + /* restore the queue back to VF ownership */ + FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx); + return err; +} + +/** + * fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF + * @hw: pointer to the HW structure + * @vf_info: pointer to VF information structure + * + * Reassign the interrupts and queues to a VF following an FLR + **/ +STATIC s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info) +{ + u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx; + u32 tdbal = 0, tdbah = 0, txqctl, rxqctl; + u16 vf_v_idx, vf_v_limit, vf_vid; + u8 vf_idx = vf_info->vf_idx; + int i; + + /* verify vf is in range */ + if (vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* clear event notification of VF FLR */ + FM10K_WRITE_REG(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32)); + + /* force timeout and then disconnect the mailbox */ + vf_info->mbx.timeout = 0; + if (vf_info->mbx.ops.disconnect) + vf_info->mbx.ops.disconnect(hw, &vf_info->mbx); + + /* determine vector offset and count */ + vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); + vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); + + /* determine qmap offsets and counts */ + qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256; + queues_per_pool = fm10k_queues_per_pool(hw); + qmap_idx = qmap_stride * vf_idx; + + /* make all the queues inaccessible to the VF */ + for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) { + FM10K_WRITE_REG(hw, FM10K_TQMAP(i), 0); + FM10K_WRITE_REG(hw, FM10K_RQMAP(i), 0); + } + + /* calculate starting index for queues */ + vf_q_idx = fm10k_vf_queue_index(hw, vf_idx); + + /* determine correct default VLAN ID */ + if (vf_info->pf_vid) + vf_vid = vf_info->pf_vid; + else + vf_vid = vf_info->sw_vid; + + /* configure Queue control register */ + txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) | + (vf_idx << FM10K_TXQCTL_TC_SHIFT) | + FM10K_TXQCTL_VF | vf_idx; + rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF; + + /* stop further DMA and reset queue ownership back to VF */ + for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) { + FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), 0); + FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), txqctl); + FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), + FM10K_RXDCTL_WRITE_BACK_MIN_DELAY | + FM10K_RXDCTL_DROP_ON_EMPTY); + FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), rxqctl); + } + + /* reset TC with -1 credits and no quanta to prevent transmit */ + FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(vf_idx), 0); + FM10K_WRITE_REG(hw, FM10K_TC_RATE(vf_idx), 0); + FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(vf_idx), + FM10K_TC_CREDIT_CREDIT_MASK); + + /* update our first entry in the table based on previous VF */ + if (!vf_idx) + hw->mac.ops.update_int_moderator(hw); + else + hw->iov.ops.assign_int_moderator(hw, vf_idx - 1); + + /* reset linked list so it now includes our active vectors */ + if (vf_idx == (hw->iov.num_vfs - 1)) + FM10K_WRITE_REG(hw, FM10K_ITR2(0), vf_v_idx); + else + FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_limit), vf_v_idx); + + /* link remaining vectors so that next points to previous */ + for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++) + FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1); + + /* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */ + for (i = FM10K_VFMBMEM_LEN; i--;) + FM10K_WRITE_REG(hw, FM10K_MBMEM_VF(vf_idx, i), 0); + for (i = FM10K_VLAN_TABLE_SIZE; i--;) + FM10K_WRITE_REG(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0); + for (i = FM10K_RETA_SIZE; i--;) + FM10K_WRITE_REG(hw, FM10K_RETA(vf_info->vsi, i), 0); + for (i = FM10K_RSSRK_SIZE; i--;) + FM10K_WRITE_REG(hw, FM10K_RSSRK(vf_info->vsi, i), 0); + FM10K_WRITE_REG(hw, FM10K_MRQC(vf_info->vsi), 0); + + /* Update base address registers to contain MAC address */ + if (FM10K_IS_VALID_ETHER_ADDR(vf_info->mac)) { + tdbal = (((u32)vf_info->mac[3]) << 24) | + (((u32)vf_info->mac[4]) << 16) | + (((u32)vf_info->mac[5]) << 8); + tdbah = (((u32)0xFF) << 24) | + (((u32)vf_info->mac[0]) << 16) | + (((u32)vf_info->mac[1]) << 8) | + ((u32)vf_info->mac[2]); + } + + /* map queue pairs back to VF from last to first */ + for (i = queues_per_pool; i--;) { + FM10K_WRITE_REG(hw, FM10K_TDBAL(vf_q_idx + i), tdbal); + FM10K_WRITE_REG(hw, FM10K_TDBAH(vf_q_idx + i), tdbah); + /* See definition of FM10K_TDLEN_ITR_SCALE_SHIFT for an + * explanation of how TDLEN is used. + */ + FM10K_WRITE_REG(hw, FM10K_TDLEN(vf_q_idx + i), + hw->mac.itr_scale << + FM10K_TDLEN_ITR_SCALE_SHIFT); + FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i); + FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i); + } + + /* repeat the first ring for all the remaining VF rings */ + for (i = queues_per_pool; i < qmap_stride; i++) { + FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx); + FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx); + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF + * @hw: pointer to hardware structure + * @vf_info: pointer to VF information structure + * @lport_idx: Logical port offset from the hardware glort + * @flags: Set of capability flags to extend port beyond basic functionality + * + * This function allows enabling a VF port by assigning it a GLORT and + * setting the flags so that it can enable an Rx mode. + **/ +STATIC s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info, + u16 lport_idx, u8 flags) +{ + u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE; + + DEBUGFUNC("fm10k_iov_set_lport_state_pf"); + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE; + vf_info->glort = glort; + + return FM10K_SUCCESS; +} + +/** + * fm10k_iov_reset_lport_pf - Disable a logical port for a given VF + * @hw: pointer to hardware structure + * @vf_info: pointer to VF information structure + * + * This function disables a VF port by stripping it of a GLORT and + * setting the flags so that it cannot enable any Rx mode. + **/ +STATIC void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info) +{ + u32 msg[1]; + + DEBUGFUNC("fm10k_iov_reset_lport_state_pf"); + + /* need to disable the port if it is already enabled */ + if (FM10K_VF_FLAG_ENABLED(vf_info)) { + /* notify switch that this port has been disabled */ + fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false); + + /* generate port state response to notify VF it is not ready */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); + } + + /* clear flags and glort if it exists */ + vf_info->vf_flags = 0; + vf_info->glort = 0; +} + +/** + * fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs + * @hw: pointer to hardware structure + * @q: stats for all queues of a VF + * @vf_idx: index of VF + * + * This function collects queue stats for VFs. + **/ +STATIC void fm10k_iov_update_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats_q *q, + u16 vf_idx) +{ + u32 idx, qpp; + + /* get stats for all of the queues */ + qpp = fm10k_queues_per_pool(hw); + idx = fm10k_vf_queue_index(hw, vf_idx); + fm10k_update_hw_stats_q(hw, q, idx, qpp); +} + +/** + * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Pointer to mailbox information structure + * + * This function is a default handler for MSI-X requests from the VF. The + * assumption is that in this case it is acceptable to just directly + * hand off the message from the VF to the underlying shared code. + **/ +s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + u8 vf_idx = vf_info->vf_idx; + + UNREFERENCED_1PARAMETER(results); + DEBUGFUNC("fm10k_iov_msg_msix_pf"); + + return hw->iov.ops.assign_int_moderator(hw, vf_idx); +} + +/** + * fm10k_iov_select_vid - Select correct default VLAN ID + * @hw: Pointer to hardware structure + * @vid: VLAN ID to correct + * + * Will report an error if the VLAN ID is out of range. For VID = 0, it will + * return either the pf_vid or sw_vid depending on which one is set. + */ +STATIC s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) +{ + if (!vid) + return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid; + else if (vf_info->pf_vid && vid != vf_info->pf_vid) + return FM10K_ERR_PARAM; + else + return vid; +} + +/** + * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Pointer to mailbox information structure + * + * This function is a default handler for MAC/VLAN requests from the VF. + * The assumption is that in this case it is acceptable to just directly + * hand off the message from the VF to the underlying shared code. + **/ +s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + u8 mac[ETH_ALEN]; + u32 *result; + int err = FM10K_SUCCESS; + bool set; + u16 vlan; + u32 vid; + + DEBUGFUNC("fm10k_iov_msg_mac_vlan_pf"); + + /* we shouldn't be updating rules on a disabled interface */ + if (!FM10K_VF_FLAG_ENABLED(vf_info)) + err = FM10K_ERR_PARAM; + + if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) { + result = results[FM10K_MAC_VLAN_MSG_VLAN]; + + /* record VLAN id requested */ + err = fm10k_tlv_attr_get_u32(result, &vid); + if (err) + return err; + + /* verify upper 16 bits are zero */ + if (vid >> 16) + return FM10K_ERR_PARAM; + + set = !(vid & FM10K_VLAN_CLEAR); + vid &= ~FM10K_VLAN_CLEAR; + + err = fm10k_iov_select_vid(vf_info, (u16)vid); + if (err < 0) + return err; + + vid = err; + + /* update VSI info for VF in regards to VLAN table */ + err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); + } + + if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) { + result = results[FM10K_MAC_VLAN_MSG_MAC]; + + /* record unicast MAC address requested */ + err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); + if (err) + return err; + + /* block attempts to set MAC for a locked device */ + if (FM10K_IS_VALID_ETHER_ADDR(vf_info->mac) && + memcmp(mac, vf_info->mac, ETH_ALEN)) + return FM10K_ERR_PARAM; + + set = !(vlan & FM10K_VLAN_CLEAR); + vlan &= ~FM10K_VLAN_CLEAR; + + err = fm10k_iov_select_vid(vf_info, vlan); + if (err < 0) + return err; + + vlan = (u16)err; + + /* notify switch of request for new unicast address */ + err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, + mac, vlan, set, 0); + } + + if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) { + result = results[FM10K_MAC_VLAN_MSG_MULTICAST]; + + /* record multicast MAC address requested */ + err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); + if (err) + return err; + + /* verify that the VF is allowed to request multicast */ + if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED)) + return FM10K_ERR_PARAM; + + set = !(vlan & FM10K_VLAN_CLEAR); + vlan &= ~FM10K_VLAN_CLEAR; + + err = fm10k_iov_select_vid(vf_info, vlan); + if (err < 0) + return err; + + vlan = (u16)err; + + /* notify switch of request for new multicast address */ + err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, + mac, vlan, set); + } + + return err; +} + +/** + * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode + * @vf_info: VF info structure containing capability flags + * @mode: Requested xcast mode + * + * This function outputs the mode that most closely matches the requested + * mode. If not modes match it will request we disable the port + **/ +STATIC u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info, + u8 mode) +{ + u8 vf_flags = vf_info->vf_flags; + + /* match up mode to capabilities as best as possible */ + switch (mode) { + case FM10K_XCAST_MODE_PROMISC: + if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE) + return FM10K_XCAST_MODE_PROMISC; + /* fallthough */ + case FM10K_XCAST_MODE_ALLMULTI: + if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE) + return FM10K_XCAST_MODE_ALLMULTI; + /* fallthough */ + case FM10K_XCAST_MODE_MULTI: + if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE) + return FM10K_XCAST_MODE_MULTI; + /* fallthough */ + case FM10K_XCAST_MODE_NONE: + if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE) + return FM10K_XCAST_MODE_NONE; + /* fallthough */ + default: + break; + } + + /* disable interface as it should not be able to request any */ + return FM10K_XCAST_MODE_DISABLE; +} + +/** + * fm10k_iov_msg_lport_state_pf - Message handler for port state requests + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Pointer to mailbox information structure + * + * This function is a default handler for port state requests. The port + * state requests for now are basic and consist of enabling or disabling + * the port. + **/ +s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + u32 *result; + s32 err = FM10K_SUCCESS; + u32 msg[2]; + u8 mode = 0; + + DEBUGFUNC("fm10k_iov_msg_lport_state_pf"); + + /* verify VF is allowed to enable even minimal mode */ + if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)) + return FM10K_ERR_PARAM; + + if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) { + result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE]; + + /* XCAST mode update requested */ + err = fm10k_tlv_attr_get_u8(result, &mode); + if (err) + return FM10K_ERR_PARAM; + + /* prep for possible demotion depending on capabilities */ + mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode); + + /* if mode is not currently enabled, enable it */ + if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode))) + fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode); + + /* swap mode back to a bit flag */ + mode = FM10K_VF_FLAG_SET_MODE(mode); + } else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) { + /* need to disable the port if it is already enabled */ + if (FM10K_VF_FLAG_ENABLED(vf_info)) + err = fm10k_update_lport_state_pf(hw, vf_info->glort, + 1, false); + + /* we need to clear VF_FLAG_ENABLED flags in order to ensure + * that we actually re-enable the LPORT state below. Note that + * this has no impact if the VF is already disabled, as the + * flags are already cleared. + */ + if (!err) + vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info); + + /* when enabling the port we should reset the rate limiters */ + hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate); + + /* set mode for minimal functionality */ + mode = FM10K_VF_FLAG_SET_MODE_NONE; + + /* generate port state response to notify VF it is ready */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY); + mbx->ops.enqueue_tx(hw, mbx, msg); + } + + /* if enable state toggled note the update */ + if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode)) + err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1, + !!mode); + + /* if state change succeeded, then update our stored state */ + mode |= FM10K_VF_FLAG_CAPABLE(vf_info); + if (!err) + vf_info->vf_flags = mode; + + return err; +} + +#ifndef NO_DEFAULT_SRIOV_MSG_HANDLERS +const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = { + FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), + FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf), + FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), +}; + +#endif +/** + * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF + * @hw: pointer to hardware structure + * @stats: pointer to the stats structure to update + * + * This function collects and aggregates global and per queue hardware + * statistics. + **/ +STATIC void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop; + u32 id, id_prev; + + DEBUGFUNC("fm10k_update_hw_stats_pf"); + + /* Use Tx queue 0 as a canary to detect a reset */ + id = FM10K_READ_REG(hw, FM10K_TXQCTL(0)); + + /* Read Global Statistics */ + do { + timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT, + &stats->timeout); + ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur); + ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca); + um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um); + xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec); + vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP, + &stats->vlan_drop); + loopback_drop = + fm10k_read_hw_stats_32b(hw, + FM10K_STATS_LOOPBACK_DROP, + &stats->loopback_drop); + nodesc_drop = fm10k_read_hw_stats_32b(hw, + FM10K_STATS_NODESC_DROP, + &stats->nodesc_drop); + + /* if value has not changed then we have consistent data */ + id_prev = id; + id = FM10K_READ_REG(hw, FM10K_TXQCTL(0)); + } while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK); + + /* drop non-ID bits and set VALID ID bit */ + id &= FM10K_TXQCTL_ID_MASK; + id |= FM10K_STAT_VALID; + + /* Update Global Statistics */ + if (stats->stats_idx == id) { + stats->timeout.count += timeout; + stats->ur.count += ur; + stats->ca.count += ca; + stats->um.count += um; + stats->xec.count += xec; + stats->vlan_drop.count += vlan_drop; + stats->loopback_drop.count += loopback_drop; + stats->nodesc_drop.count += nodesc_drop; + } + + /* Update bases and record current PF id */ + fm10k_update_hw_base_32b(&stats->timeout, timeout); + fm10k_update_hw_base_32b(&stats->ur, ur); + fm10k_update_hw_base_32b(&stats->ca, ca); + fm10k_update_hw_base_32b(&stats->um, um); + fm10k_update_hw_base_32b(&stats->xec, xec); + fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop); + fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop); + fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop); + stats->stats_idx = id; + + /* Update Queue Statistics */ + fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); +} + +/** + * fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF + * @hw: pointer to hardware structure + * @stats: pointer to the stats structure to update + * + * This function resets the base for global and per queue hardware + * statistics. + **/ +STATIC void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + DEBUGFUNC("fm10k_rebind_hw_stats_pf"); + + /* Unbind Global Statistics */ + fm10k_unbind_hw_stats_32b(&stats->timeout); + fm10k_unbind_hw_stats_32b(&stats->ur); + fm10k_unbind_hw_stats_32b(&stats->ca); + fm10k_unbind_hw_stats_32b(&stats->um); + fm10k_unbind_hw_stats_32b(&stats->xec); + fm10k_unbind_hw_stats_32b(&stats->vlan_drop); + fm10k_unbind_hw_stats_32b(&stats->loopback_drop); + fm10k_unbind_hw_stats_32b(&stats->nodesc_drop); + + /* Unbind Queue Statistics */ + fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); + + /* Reinitialize bases for all stats */ + fm10k_update_hw_stats_pf(hw, stats); +} + +/** + * fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system + * @hw: pointer to hardware structure + * @dma_mask: 64 bit DMA mask required for platform + * + * This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order + * to limit the access to memory beyond what is physically in the system. + **/ +STATIC void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask) +{ + /* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */ + u32 phyaddr = (u32)(dma_mask >> 32); + + DEBUGFUNC("fm10k_set_dma_mask_pf"); + + FM10K_WRITE_REG(hw, FM10K_PHYADDR, phyaddr); +} + +/** + * fm10k_get_fault_pf - Record a fault in one of the interface units + * @hw: pointer to hardware structure + * @type: pointer to fault type register offset + * @fault: pointer to memory location to record the fault + * + * Record the fault register contents to the fault data structure and + * clear the entry from the register. + * + * Returns ERR_PARAM if invalid register is specified or no error is present. + **/ +STATIC s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type, + struct fm10k_fault *fault) +{ + u32 func; + + DEBUGFUNC("fm10k_get_fault_pf"); + + /* verify the fault register is in range and is aligned */ + switch (type) { + case FM10K_PCA_FAULT: + case FM10K_THI_FAULT: + case FM10K_FUM_FAULT: + break; + default: + return FM10K_ERR_PARAM; + } + + /* only service faults that are valid */ + func = FM10K_READ_REG(hw, type + FM10K_FAULT_FUNC); + if (!(func & FM10K_FAULT_FUNC_VALID)) + return FM10K_ERR_PARAM; + + /* read remaining fields */ + fault->address = FM10K_READ_REG(hw, type + FM10K_FAULT_ADDR_HI); + fault->address <<= 32; + fault->address = FM10K_READ_REG(hw, type + FM10K_FAULT_ADDR_LO); + fault->specinfo = FM10K_READ_REG(hw, type + FM10K_FAULT_SPECINFO); + + /* clear valid bit to allow for next error */ + FM10K_WRITE_REG(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID); + + /* Record which function triggered the error */ + if (func & FM10K_FAULT_FUNC_PF) + fault->func = 0; + else + fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >> + FM10K_FAULT_FUNC_VF_SHIFT); + + /* record fault type */ + fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK; + + return FM10K_SUCCESS; +} + +/** + * fm10k_request_lport_map_pf - Request LPORT map from the switch API + * @hw: pointer to hardware structure + * + **/ +STATIC s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[1]; + + DEBUGFUNC("fm10k_request_lport_pf"); + + /* issue request asking for LPORT map */ + fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_get_host_state_pf - Returns the state of the switch and mailbox + * @hw: pointer to hardware structure + * @switch_ready: pointer to boolean value that will record switch state + * + * This funciton will check the DMA_CTRL2 register and mailbox in order + * to determine if the switch is ready for the PF to begin requesting + * addresses and mapping traffic to the local interface. + **/ +STATIC s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready) +{ + s32 ret_val = FM10K_SUCCESS; + u32 dma_ctrl2; + + DEBUGFUNC("fm10k_get_host_state_pf"); + + /* verify the switch is ready for interaction */ + dma_ctrl2 = FM10K_READ_REG(hw, FM10K_DMA_CTRL2); + if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY)) + goto out; + + /* retrieve generic host state info */ + ret_val = fm10k_get_host_state_generic(hw, switch_ready); + if (ret_val) + goto out; + + /* interface cannot receive traffic without logical ports */ + if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE) + ret_val = fm10k_request_lport_map_pf(hw); + +out: + return ret_val; +} + +/* This structure defines the attibutes to be parsed below */ +const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = { + FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_lport_map_pf - Message handler for lport_map message from SM + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler configures the lport mapping based on the reply from the + * switch API. + **/ +s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u16 glort, mask; + u32 dglort_map; + s32 err; + + UNREFERENCED_1PARAMETER(mbx); + DEBUGFUNC("fm10k_msg_lport_map_pf"); + + err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP], + &dglort_map); + if (err) + return err; + + /* extract values out of the header */ + glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT); + mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK); + + /* verify mask is set and none of the masked bits in glort are set */ + if (!mask || (glort & ~mask)) + return FM10K_ERR_PARAM; + + /* verify the mask is contiguous, and that it is 1's followed by 0's */ + if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE) + return FM10K_ERR_PARAM; + + /* record the glort, mask, and port count */ + hw->mac.dglort_map = dglort_map; + + return FM10K_SUCCESS; +} + +const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = { + FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler configures the default VLAN for the PF + **/ +static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u16 glort, pvid; + u32 pvid_update; + s32 err; + + UNREFERENCED_1PARAMETER(mbx); + DEBUGFUNC("fm10k_msg_update_pvid_pf"); + + err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID], + &pvid_update); + if (err) + return err; + + /* extract values from the pvid update */ + glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT); + pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID); + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* verify VLAN ID is valid */ + if (pvid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* record the port VLAN ID value */ + hw->mac.default_vid = pvid; + + return FM10K_SUCCESS; +} + +/** + * fm10k_record_global_table_data - Move global table data to swapi table info + * @from: pointer to source table data structure + * @to: pointer to destination table info structure + * + * This function is will copy table_data to the table_info contained in + * the hw struct. + **/ +static void fm10k_record_global_table_data(struct fm10k_global_table_data *from, + struct fm10k_swapi_table_info *to) +{ + /* convert from le32 struct to CPU byte ordered values */ + to->used = FM10K_LE32_TO_CPU(from->used); + to->avail = FM10K_LE32_TO_CPU(from->avail); +} + +const struct fm10k_tlv_attr fm10k_err_msg_attr[] = { + FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR, + sizeof(struct fm10k_swapi_error)), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_err_pf - Message handler for error reply + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler will capture the data for any error replies to previous + * messages that the PF has sent. + **/ +s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_swapi_error err_msg; + s32 err; + + UNREFERENCED_1PARAMETER(mbx); + DEBUGFUNC("fm10k_msg_err_pf"); + + /* extract structure from message */ + err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR], + &err_msg, sizeof(err_msg)); + if (err) + return err; + + /* record table status */ + fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac); + fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop); + fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu); + + /* record SW API status value */ + hw->swapi.status = FM10K_LE32_TO_CPU(err_msg.status); + + return FM10K_SUCCESS; +} + +/* currently there is no shared 1588 timestamp handler */ + +const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = { + FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP, + sizeof(struct fm10k_swapi_1588_timestamp)), + FM10K_TLV_ATTR_LAST +}; + +const struct fm10k_tlv_attr fm10k_1588_clock_owner_attr[] = { + FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_CLOCK_OWNER, + sizeof(struct fm10k_swapi_1588_clock_owner)), + FM10K_TLV_ATTR_LAST +}; + +const struct fm10k_tlv_attr fm10k_master_clk_offset_attr[] = { + FM10K_TLV_ATTR_U64(FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_iov_notify_offset_pf - Notify VF of change in PTP offset + * @hw: pointer to hardware structure + * @vf_info: pointer to the vf info structure + * @offset: 64bit unsigned offset from hardware SYSTIME + * + * This function sends a message to a given VF to notify it of PTP offset + * changes. + **/ +STATIC void fm10k_iov_notify_offset_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info, + u64 offset) +{ + u32 msg[4]; + + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588); + fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_CLK_OFFSET, offset); + + if (vf_info->mbx.ops.enqueue_tx) + vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); +} + +/** + * fm10k_msg_1588_clock_owner_pf - Message handler for clock ownership from SM + * @hw: pointer to hardware structure + * @results: pointer to array containing parsed data, + * @mbx: Pointer to mailbox information structure + * + * This handler configures the FM10K_HW_FLAG_CLOCK_OWNER field for the PF + */ +s32 fm10k_msg_1588_clock_owner_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_swapi_1588_clock_owner msg; + u16 glort; + s32 err; + + UNREFERENCED_1PARAMETER(mbx); + DEBUGFUNC("fm10k_msg_1588_clock_owner"); + + err = fm10k_tlv_attr_get_le_struct( + results[FM10K_PF_ATTR_ID_1588_CLOCK_OWNER], + &msg, sizeof(msg)); + if (err) + return err; + + /* We own the clock iff the glort matches us and the enabled field is + * true. Otherwise, the clock must belong to some other port. + */ + glort = le16_to_cpu(msg.glort); + if (fm10k_glort_valid_pf(hw, glort) && msg.enabled) + hw->flags |= FM10K_HW_FLAG_CLOCK_OWNER; + else + hw->flags &= ~FM10K_HW_FLAG_CLOCK_OWNER; + + return FM10K_SUCCESS; +} + +/** + * fm10k_adjust_systime_pf - Adjust systime frequency + * @hw: pointer to hardware structure + * @ppb: adjustment rate in parts per billion + * + * This function will adjust the SYSTIME_CFG register contained in BAR 4 + * if this function is supported for BAR 4 access. The adjustment amount + * is based on the parts per billion value provided and adjusted to a + * value based on parts per 2^48 clock cycles. + * + * If adjustment is not supported or the requested value is too large + * we will return an error. + **/ +STATIC s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb) +{ + u64 systime_adjust; + + DEBUGFUNC("fm10k_adjust_systime_pf"); + + /* ensure that we control the clock */ + if (!(hw->flags & FM10K_HW_FLAG_CLOCK_OWNER)) + return FM10K_ERR_DEVICE_NOT_SUPPORTED; + + /* if sw_addr is not set we don't have switch register access */ + if (!hw->sw_addr) + return ppb ? FM10K_ERR_PARAM : FM10K_SUCCESS; + + /* we must convert the value from parts per billion to parts per + * 2^48 cycles. In addition I have opted to only use the 30 most + * significant bits of the adjustment value as the 8 least + * significant bits are located in another register and represent + * a value significantly less than a part per billion, the result + * of dropping the 8 least significant bits is that the adjustment + * value is effectively multiplied by 2^8 when we write it. + * + * As a result of all this the math for this breaks down as follows: + * ppb / 10^9 == adjust * 2^8 / 2^48 + * If we solve this for adjust, and simplify it comes out as: + * ppb * 2^31 / 5^9 == adjust + */ + systime_adjust = (ppb < 0) ? -ppb : ppb; + systime_adjust <<= 31; + do_div(systime_adjust, 1953125); + + /* verify the requested adjustment value is in range */ + if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK) + return FM10K_ERR_PARAM; + + if (ppb > 0) + systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE; + + FM10K_WRITE_SW_REG(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust); + + return FM10K_SUCCESS; +} + +/** + * fm10k_notify_offset_pf - Notify switch of change in PTP offset + * @hw: pointer to hardware structure + * @offset: 64bit unsigned offset of SYSTIME + * + * This function sends a message to the switch to indicate a change in the + * offset of the hardware SYSTIME registers. The switch manager is + * responsible for transmitting this message to other hosts. + */ +STATIC s32 fm10k_notify_offset_pf(struct fm10k_hw *hw, u64 offset) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[4]; + + DEBUGFUNC("fm10k_notify_offset_pf"); + + /* ensure that we control the clock */ + if (!(hw->flags & FM10K_HW_FLAG_CLOCK_OWNER)) + return FM10K_ERR_DEVICE_NOT_SUPPORTED; + + fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_MASTER_CLK_OFFSET); + fm10k_tlv_attr_put_u64(msg, FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET, offset); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_read_systime_pf - Reads value of systime registers + * @hw: pointer to the hardware structure + * + * Function reads the content of 2 registers, combined to represent a 64 bit + * value measured in nanosecods. In order to guarantee the value is accurate + * we check the 32 most significant bits both before and after reading the + * 32 least significant bits to verify they didn't change as we were reading + * the registers. + **/ +static u64 fm10k_read_systime_pf(struct fm10k_hw *hw) +{ + u32 systime_l, systime_h, systime_tmp; + + systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1); + + do { + systime_tmp = systime_h; + systime_l = fm10k_read_reg(hw, FM10K_SYSTIME); + systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1); + } while (systime_tmp != systime_h); + + return ((u64)systime_h << 32) | systime_l; +} + +static const struct fm10k_msg_data fm10k_msg_data_pf[] = { + FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), + FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), + FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf), + FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), + FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), + FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf), + FM10K_PF_MSG_1588_CLOCK_OWNER_HANDLER(fm10k_msg_1588_clock_owner_pf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), +}; + +/** + * fm10k_init_ops_pf - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for PF. + * Does not touch the hardware. + **/ +s32 fm10k_init_ops_pf(struct fm10k_hw *hw) +{ + struct fm10k_mac_info *mac = &hw->mac; + struct fm10k_iov_info *iov = &hw->iov; + + DEBUGFUNC("fm10k_init_ops_pf"); + + fm10k_init_ops_generic(hw); + + mac->ops.reset_hw = &fm10k_reset_hw_pf; + mac->ops.init_hw = &fm10k_init_hw_pf; + mac->ops.start_hw = &fm10k_start_hw_generic; + mac->ops.stop_hw = &fm10k_stop_hw_generic; +#ifndef NO_IS_SLOT_APPROPRIATE_CHECK + mac->ops.is_slot_appropriate = &fm10k_is_slot_appropriate_pf; +#endif + mac->ops.update_vlan = &fm10k_update_vlan_pf; + mac->ops.read_mac_addr = &fm10k_read_mac_addr_pf; + mac->ops.update_uc_addr = &fm10k_update_uc_addr_pf; + mac->ops.update_mc_addr = &fm10k_update_mc_addr_pf; + mac->ops.update_xcast_mode = &fm10k_update_xcast_mode_pf; + mac->ops.update_int_moderator = &fm10k_update_int_moderator_pf; + mac->ops.update_lport_state = &fm10k_update_lport_state_pf; + mac->ops.update_hw_stats = &fm10k_update_hw_stats_pf; + mac->ops.rebind_hw_stats = &fm10k_rebind_hw_stats_pf; + mac->ops.configure_dglort_map = &fm10k_configure_dglort_map_pf; + mac->ops.set_dma_mask = &fm10k_set_dma_mask_pf; + mac->ops.get_fault = &fm10k_get_fault_pf; + mac->ops.get_host_state = &fm10k_get_host_state_pf; + mac->ops.adjust_systime = &fm10k_adjust_systime_pf; + mac->ops.notify_offset = &fm10k_notify_offset_pf; + mac->ops.read_systime = &fm10k_read_systime_pf; + + mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw); + + iov->ops.assign_resources = &fm10k_iov_assign_resources_pf; + iov->ops.configure_tc = &fm10k_iov_configure_tc_pf; + iov->ops.assign_int_moderator = &fm10k_iov_assign_int_moderator_pf; + iov->ops.assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf; + iov->ops.reset_resources = &fm10k_iov_reset_resources_pf; + iov->ops.set_lport = &fm10k_iov_set_lport_pf; + iov->ops.reset_lport = &fm10k_iov_reset_lport_pf; + iov->ops.update_stats = &fm10k_iov_update_stats_pf; + iov->ops.notify_offset = &fm10k_iov_notify_offset_pf; + + return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf); +} diff --git a/drivers/net/fm10k/base/fm10k_pf.h b/drivers/net/fm10k/base/fm10k_pf.h new file mode 100644 index 00000000..c84b1bc5 --- /dev/null +++ b/drivers/net/fm10k/base/fm10k_pf.h @@ -0,0 +1,185 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _FM10K_PF_H_ +#define _FM10K_PF_H_ + +#include "fm10k_type.h" +#include "fm10k_common.h" + +bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort); +u16 fm10k_queues_per_pool(struct fm10k_hw *hw); +u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx); + +enum fm10k_pf_tlv_msg_id_v1 { + FM10K_PF_MSG_ID_TEST = 0x000, /* msg ID reserved */ + FM10K_PF_MSG_ID_XCAST_MODES = 0x001, + FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE = 0x002, + FM10K_PF_MSG_ID_LPORT_MAP = 0x100, + FM10K_PF_MSG_ID_LPORT_CREATE = 0x200, + FM10K_PF_MSG_ID_LPORT_DELETE = 0x201, + FM10K_PF_MSG_ID_CONFIG = 0x300, + FM10K_PF_MSG_ID_UPDATE_PVID = 0x400, + FM10K_PF_MSG_ID_CREATE_FLOW_TABLE = 0x501, + FM10K_PF_MSG_ID_DELETE_FLOW_TABLE = 0x502, + FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503, + FM10K_PF_MSG_ID_DELETE_FLOW = 0x504, + FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505, + FM10K_PF_MSG_ID_GET_1588_INFO = 0x506, + FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701, + FM10K_PF_MSG_ID_1588_CLOCK_OWNER = 0x702, + FM10K_PF_MSG_ID_MASTER_CLK_OFFSET = 0x703, +}; + +enum fm10k_pf_tlv_attr_id_v1 { + FM10K_PF_ATTR_ID_ERR = 0x00, + FM10K_PF_ATTR_ID_LPORT_MAP = 0x01, + FM10K_PF_ATTR_ID_XCAST_MODE = 0x02, + FM10K_PF_ATTR_ID_MAC_UPDATE = 0x03, + FM10K_PF_ATTR_ID_VLAN_UPDATE = 0x04, + FM10K_PF_ATTR_ID_CONFIG = 0x05, + FM10K_PF_ATTR_ID_CREATE_FLOW_TABLE = 0x06, + FM10K_PF_ATTR_ID_DELETE_FLOW_TABLE = 0x07, + FM10K_PF_ATTR_ID_UPDATE_FLOW = 0x08, + FM10K_PF_ATTR_ID_FLOW_STATE = 0x09, + FM10K_PF_ATTR_ID_FLOW_HANDLE = 0x0A, + FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B, + FM10K_PF_ATTR_ID_PORT = 0x0C, + FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D, + FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10, + FM10K_PF_ATTR_ID_1588_CLOCK_OWNER = 0x12, + FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET = 0x14, +}; + +#define FM10K_MSG_LPORT_MAP_GLORT_SHIFT 0 +#define FM10K_MSG_LPORT_MAP_GLORT_SIZE 16 +#define FM10K_MSG_LPORT_MAP_MASK_SHIFT 16 +#define FM10K_MSG_LPORT_MAP_MASK_SIZE 16 + +#define FM10K_MSG_UPDATE_PVID_GLORT_SHIFT 0 +#define FM10K_MSG_UPDATE_PVID_GLORT_SIZE 16 +#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16 +#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16 + +/* The following data structures are overlayed directly onto TLV mailbox + * messages, and must not break 4 byte alignment. Ensure the structures line + * up correctly as per their TLV definition. + */ +#ifdef C99 +#pragma pack(push, 4) +#else +#pragma pack(4) +#endif /* C99 */ + +struct fm10k_mac_update { + __le32 mac_lower; + __le16 mac_upper; + __le16 vlan; + __le16 glort; + u8 flags; + u8 action; +}; + +struct fm10k_global_table_data { + __le32 used; + __le32 avail; +}; + +struct fm10k_swapi_error { + __le32 status; + struct fm10k_global_table_data mac; + struct fm10k_global_table_data nexthop; + struct fm10k_global_table_data ffu; +}; + +struct fm10k_swapi_1588_timestamp { + __le64 egress; + __le64 ingress; + __le16 dglort; + __le16 sglort; +}; + +struct fm10k_swapi_1588_clock_owner { + __le16 glort; + __le16 enabled; +}; + +#ifdef C99 +#pragma pack(pop) +#else +#pragma pack() +#endif /* C99 */ + +s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[]; +#define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_MAP, \ + fm10k_lport_map_msg_attr, func) +extern const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[]; +#define FM10K_PF_MSG_UPDATE_PVID_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_UPDATE_PVID, \ + fm10k_update_pvid_msg_attr, func) + +s32 fm10k_msg_err_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_err_msg_attr[]; +#define FM10K_PF_MSG_ERR_HANDLER(msg, func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func) + +extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[]; +#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \ + fm10k_1588_timestamp_msg_attr, func) + +s32 fm10k_msg_1588_clock_owner_pf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_1588_clock_owner_attr[]; +#define FM10K_PF_MSG_1588_CLOCK_OWNER_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_CLOCK_OWNER, \ + fm10k_1588_clock_owner_attr, func) + +extern const struct fm10k_tlv_attr fm10k_master_clk_offset_attr[]; +#define FM10K_PF_MSG_MASTER_CLK_OFFSET_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_MASTER_CLK_OFFSET, \ + fm10k_master_clk_offset_attr, func) + +s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +#ifndef NO_DEFAULT_SRIOV_MSG_HANDLERS +extern const struct fm10k_msg_data fm10k_iov_msg_data_pf[]; +#endif + +s32 fm10k_init_ops_pf(struct fm10k_hw *hw); +#endif /* _FM10K_PF_H */ diff --git a/drivers/net/fm10k/base/fm10k_tlv.c b/drivers/net/fm10k/base/fm10k_tlv.c new file mode 100644 index 00000000..e6150c1d --- /dev/null +++ b/drivers/net/fm10k/base/fm10k_tlv.c @@ -0,0 +1,914 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "fm10k_tlv.h" + +/** + * fm10k_tlv_msg_init - Initialize message block for TLV data storage + * @msg: Pointer to message block + * @msg_id: Message ID indicating message type + * + * This function return success if provided with a valid message pointer + **/ +s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id) +{ + DEBUGFUNC("fm10k_tlv_msg_init"); + + /* verify pointer is not NULL */ + if (!msg) + return FM10K_ERR_PARAM; + + *msg = (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT) | msg_id; + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_put_null_string - Place null terminated string on message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @string: Pointer to string to be stored in attribute + * + * This function will reorder a string to be CPU endian and store it in + * the attribute buffer. It will return success if provided with a valid + * pointers. + **/ +static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, + const unsigned char *string) +{ + u32 attr_data = 0, len = 0; + u32 *attr; + + DEBUGFUNC("fm10k_tlv_attr_put_null_string"); + + /* verify pointers are not NULL */ + if (!string || !msg) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + /* copy string into local variable and then write to msg */ + do { + /* write data to message */ + if (len && !(len % 4)) { + attr[len / 4] = attr_data; + attr_data = 0; + } + + /* record character to offset location */ + attr_data |= (u32)(*string) << (8 * (len % 4)); + len++; + + /* test for NULL and then increment */ + } while (*(string++)); + + /* write last piece of data to message */ + attr[(len + 3) / 4] = attr_data; + + /* record attribute header, update message length */ + len <<= FM10K_TLV_LEN_SHIFT; + attr[0] = len | attr_id; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_get_null_string - Get null terminated string from attribute + * @attr: Pointer to attribute + * @string: Pointer to location of destination string + * + * This function pulls the string back out of the attribute and will place + * it in the array pointed by by string. It will return success if provided + * with a valid pointers. + **/ +static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string) +{ + u32 len; + + DEBUGFUNC("fm10k_tlv_attr_get_null_string"); + + /* verify pointers are not NULL */ + if (!string || !attr) + return FM10K_ERR_PARAM; + + len = *attr >> FM10K_TLV_LEN_SHIFT; + attr++; + + while (len--) + string[len] = (u8)(attr[len / 4] >> (8 * (len % 4))); + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_put_mac_vlan - Store MAC/VLAN attribute in message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @mac_addr: MAC address to be stored + * + * This function will reorder a MAC address to be CPU endian and store it + * in the attribute buffer. It will return success if provided with a + * valid pointers. + **/ +s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id, + const u8 *mac_addr, u16 vlan) +{ + u32 len = ETH_ALEN << FM10K_TLV_LEN_SHIFT; + u32 *attr; + + DEBUGFUNC("fm10k_tlv_attr_put_mac_vlan"); + + /* verify pointers are not NULL */ + if (!msg || !mac_addr) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + /* record attribute header, update message length */ + attr[0] = len | attr_id; + + /* copy value into local variable and then write to msg */ + attr[1] = FM10K_LE32_TO_CPU(*(const __le32 *)&mac_addr[0]); + attr[2] = FM10K_LE16_TO_CPU(*(const __le16 *)&mac_addr[4]); + attr[2] |= (u32)vlan << 16; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_get_mac_vlan - Get MAC/VLAN stored in attribute + * @attr: Pointer to attribute + * @attr_id: Attribute ID + * @mac_addr: location of buffer to store MAC address + * + * This function pulls the MAC address back out of the attribute and will + * place it in the array pointed by by mac_addr. It will return success + * if provided with a valid pointers. + **/ +s32 fm10k_tlv_attr_get_mac_vlan(u32 *attr, u8 *mac_addr, u16 *vlan) +{ + DEBUGFUNC("fm10k_tlv_attr_get_mac_vlan"); + + /* verify pointers are not NULL */ + if (!mac_addr || !attr) + return FM10K_ERR_PARAM; + + *(__le32 *)&mac_addr[0] = FM10K_CPU_TO_LE32(attr[1]); + *(__le16 *)&mac_addr[4] = FM10K_CPU_TO_LE16((u16)(attr[2])); + *vlan = (u16)(attr[2] >> 16); + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_put_bool - Add header indicating value "true" + * @msg: Pointer to message block + * @attr_id: Attribute ID + * + * This function will simply add an attribute header, the fact + * that the header is here means the attribute value is true, else + * it is false. The function will return success if provided with a + * valid pointers. + **/ +s32 fm10k_tlv_attr_put_bool(u32 *msg, u16 attr_id) +{ + DEBUGFUNC("fm10k_tlv_attr_put_bool"); + + /* verify pointers are not NULL */ + if (!msg) + return FM10K_ERR_PARAM; + + /* record attribute header */ + msg[FM10K_TLV_DWORD_LEN(*msg)] = attr_id; + + /* add header length to length */ + *msg += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_put_value - Store integer value attribute in message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @value: Value to be written + * @len: Size of value + * + * This function will place an integer value of up to 8 bytes in size + * in a message attribute. The function will return success provided + * that msg is a valid pointer, and len is 1, 2, 4, or 8. + **/ +s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len) +{ + u32 *attr; + + DEBUGFUNC("fm10k_tlv_attr_put_value"); + + /* verify non-null msg and len is 1, 2, 4, or 8 */ + if (!msg || !len || len > 8 || (len & (len - 1))) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + if (len < 4) { + attr[1] = (u32)value & (BIT(8 * len) - 1); + } else { + attr[1] = (u32)value; + if (len > 4) + attr[2] = (u32)(value >> 32); + } + + /* record attribute header, update message length */ + len <<= FM10K_TLV_LEN_SHIFT; + attr[0] = len | attr_id; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_get_value - Get integer value stored in attribute + * @attr: Pointer to attribute + * @value: Pointer to destination buffer + * @len: Size of value + * + * This function will place an integer value of up to 8 bytes in size + * in the offset pointed to by value. The function will return success + * provided that pointers are valid and the len value matches the + * attribute length. + **/ +s32 fm10k_tlv_attr_get_value(u32 *attr, void *value, u32 len) +{ + DEBUGFUNC("fm10k_tlv_attr_get_value"); + + /* verify pointers are not NULL */ + if (!attr || !value) + return FM10K_ERR_PARAM; + + if ((*attr >> FM10K_TLV_LEN_SHIFT) != len) + return FM10K_ERR_PARAM; + + if (len == 8) + *(u64 *)value = ((u64)attr[2] << 32) | attr[1]; + else if (len == 4) + *(u32 *)value = attr[1]; + else if (len == 2) + *(u16 *)value = (u16)attr[1]; + else + *(u8 *)value = (u8)attr[1]; + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_put_le_struct - Store little endian structure in message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @le_struct: Pointer to structure to be written + * @len: Size of le_struct + * + * This function will place a little endian structure value in a message + * attribute. The function will return success provided that all pointers + * are valid and length is a non-zero multiple of 4. + **/ +s32 fm10k_tlv_attr_put_le_struct(u32 *msg, u16 attr_id, + const void *le_struct, u32 len) +{ + const __le32 *le32_ptr = (const __le32 *)le_struct; + u32 *attr; + u32 i; + + DEBUGFUNC("fm10k_tlv_attr_put_le_struct"); + + /* verify non-null msg and len is in 32 bit words */ + if (!msg || !len || (len % 4)) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + /* copy le32 structure into host byte order at 32b boundaries */ + for (i = 0; i < (len / 4); i++) + attr[i + 1] = FM10K_LE32_TO_CPU(le32_ptr[i]); + + /* record attribute header, update message length */ + len <<= FM10K_TLV_LEN_SHIFT; + attr[0] = len | attr_id; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_get_le_struct - Get little endian struct form attribute + * @attr: Pointer to attribute + * @le_struct: Pointer to structure to be written + * @len: Size of structure + * + * This function will place a little endian structure in the buffer + * pointed to by le_struct. The function will return success + * provided that pointers are valid and the len value matches the + * attribute length. + **/ +s32 fm10k_tlv_attr_get_le_struct(u32 *attr, void *le_struct, u32 len) +{ + __le32 *le32_ptr = (__le32 *)le_struct; + u32 i; + + DEBUGFUNC("fm10k_tlv_attr_get_le_struct"); + + /* verify pointers are not NULL */ + if (!le_struct || !attr) + return FM10K_ERR_PARAM; + + if ((*attr >> FM10K_TLV_LEN_SHIFT) != len) + return FM10K_ERR_PARAM; + + attr++; + + for (i = 0; len; i++, len -= 4) + le32_ptr[i] = FM10K_CPU_TO_LE32(attr[i]); + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_nest_start - Start a set of nested attributes + * @msg: Pointer to message block + * @attr_id: Attribute ID + * + * This function will mark off a new nested region for encapsulating + * a given set of attributes. The idea is if you wish to place a secondary + * structure within the message this mechanism allows for that. The + * function will return NULL on failure, and a pointer to the start + * of the nested attributes on success. + **/ +static u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) +{ + u32 *attr; + + DEBUGFUNC("fm10k_tlv_attr_nest_start"); + + /* verify pointer is not NULL */ + if (!msg) + return NULL; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + attr[0] = attr_id; + + /* return pointer to nest header */ + return attr; +} + +/** + * fm10k_tlv_attr_nest_stop - Stop a set of nested attributes + * @msg: Pointer to message block + * + * This function closes off an existing set of nested attributes. The + * message pointer should be pointing to the parent of the nest. So in + * the case of a nest within the nest this would be the outer nest pointer. + * This function will return success provided all pointers are valid. + **/ +static s32 fm10k_tlv_attr_nest_stop(u32 *msg) +{ + u32 *attr; + u32 len; + + DEBUGFUNC("fm10k_tlv_attr_nest_stop"); + + /* verify pointer is not NULL */ + if (!msg) + return FM10K_ERR_PARAM; + + /* locate the nested header and retrieve its length */ + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + len = (attr[0] >> FM10K_TLV_LEN_SHIFT) << FM10K_TLV_LEN_SHIFT; + + /* only include nest if data was added to it */ + if (len) { + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += len; + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_validate - Validate attribute metadata + * @attr: Pointer to attribute + * @tlv_attr: Type and length info for attribute + * + * This function does some basic validation of the input TLV. It + * verifies the length, and in the case of null terminated strings + * it verifies that the last byte is null. The function will + * return FM10K_ERR_PARAM if any attribute is malformed, otherwise + * it returns 0. + **/ +STATIC s32 fm10k_tlv_attr_validate(u32 *attr, + const struct fm10k_tlv_attr *tlv_attr) +{ + u32 attr_id = *attr & FM10K_TLV_ID_MASK; + u16 len = *attr >> FM10K_TLV_LEN_SHIFT; + + DEBUGFUNC("fm10k_tlv_attr_validate"); + + /* verify this is an attribute and not a message */ + if (*attr & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT)) + return FM10K_ERR_PARAM; + + /* search through the list of attributes to find a matching ID */ + while (tlv_attr->id < attr_id) + tlv_attr++; + + /* if didn't find a match then we should exit */ + if (tlv_attr->id != attr_id) + return FM10K_NOT_IMPLEMENTED; + + /* move to start of attribute data */ + attr++; + + switch (tlv_attr->type) { + case FM10K_TLV_NULL_STRING: + if (!len || + (attr[(len - 1) / 4] & (0xFF << (8 * ((len - 1) % 4))))) + return FM10K_ERR_PARAM; + if (len > tlv_attr->len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_MAC_ADDR: + if (len != ETH_ALEN) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_BOOL: + if (len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_UNSIGNED: + case FM10K_TLV_SIGNED: + if (len != tlv_attr->len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_LE_STRUCT: + /* struct must be 4 byte aligned */ + if ((len % 4) || len != tlv_attr->len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_NESTED: + /* nested attributes must be 4 byte aligned */ + if (len % 4) + return FM10K_ERR_PARAM; + break; + default: + /* attribute id is mapped to bad value */ + return FM10K_ERR_PARAM; + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_attr_parse - Parses stream of attribute data + * @attr: Pointer to attribute list + * @results: Pointer array to store pointers to attributes + * @tlv_attr: Type and length info for attributes + * + * This function validates a stream of attributes and parses them + * up into an array of pointers stored in results. The function will + * return FM10K_ERR_PARAM on any input or message error, + * FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array + * and 0 on success. + **/ +static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, + const struct fm10k_tlv_attr *tlv_attr) +{ + u32 i, attr_id, offset = 0; + s32 err = 0; + u16 len; + + DEBUGFUNC("fm10k_tlv_attr_parse"); + + /* verify pointers are not NULL */ + if (!attr || !results) + return FM10K_ERR_PARAM; + + /* initialize results to NULL */ + for (i = 0; i < FM10K_TLV_RESULTS_MAX; i++) + results[i] = NULL; + + /* pull length from the message header */ + len = *attr >> FM10K_TLV_LEN_SHIFT; + + /* no attributes to parse if there is no length */ + if (!len) + return FM10K_SUCCESS; + + /* no attributes to parse, just raw data, message becomes attribute */ + if (!tlv_attr) { + results[0] = attr; + return FM10K_SUCCESS; + } + + /* move to start of attribute data */ + attr++; + + /* run through list parsing all attributes */ + while (offset < len) { + attr_id = *attr & FM10K_TLV_ID_MASK; + + if (attr_id < FM10K_TLV_RESULTS_MAX) + err = fm10k_tlv_attr_validate(attr, tlv_attr); + else + err = FM10K_NOT_IMPLEMENTED; + + if (err < 0) + return err; + if (!err) + results[attr_id] = attr; + + /* update offset */ + offset += FM10K_TLV_DWORD_LEN(*attr) * 4; + + /* move to next attribute */ + attr = &attr[FM10K_TLV_DWORD_LEN(*attr)]; + } + + /* we should find ourselves at the end of the list */ + if (offset != len) + return FM10K_ERR_PARAM; + + return FM10K_SUCCESS; +} + +/** + * fm10k_tlv_msg_parse - Parses message header and calls function handler + * @hw: Pointer to hardware structure + * @msg: Pointer to message + * @mbx: Pointer to mailbox information structure + * @func: Function array containing list of message handling functions + * + * This function should be the first function called upon receiving a + * message. The handler will identify the message type and call the correct + * handler for the given message. It will return the value from the function + * call on a recognized message type, otherwise it will return + * FM10K_NOT_IMPLEMENTED on an unrecognized type. + **/ +s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg, + struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *data) +{ + u32 *results[FM10K_TLV_RESULTS_MAX]; + u32 msg_id; + s32 err; + + DEBUGFUNC("fm10k_tlv_msg_parse"); + + /* verify pointer is not NULL */ + if (!msg || !data) + return FM10K_ERR_PARAM; + + /* verify this is a message and not an attribute */ + if (!(*msg & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT))) + return FM10K_ERR_PARAM; + + /* grab message ID */ + msg_id = *msg & FM10K_TLV_ID_MASK; + + while (data->id < msg_id) + data++; + + /* if we didn't find it then pass it up as an error */ + if (data->id != msg_id) { + while (data->id != FM10K_TLV_ERROR) + data++; + } + + /* parse the attributes into the results list */ + err = fm10k_tlv_attr_parse(msg, results, data->attr); + if (err < 0) + return err; + + return data->func(hw, results, mbx); +} + +/** + * fm10k_tlv_msg_error - Default handler for unrecognized TLV message IDs + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Unused mailbox pointer + * + * This function is a default handler for unrecognized messages. At a + * a minimum it just indicates that the message requested was + * unimplemented. + **/ +s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + UNREFERENCED_3PARAMETER(hw, results, mbx); + DEBUGOUT1("Unknown message ID %u\n", **results & FM10K_TLV_ID_MASK); + return FM10K_NOT_IMPLEMENTED; +} + +STATIC const unsigned char test_str[] = "fm10k"; +STATIC const unsigned char test_mac[ETH_ALEN] = { 0x12, 0x34, 0x56, + 0x78, 0x9a, 0xbc }; +STATIC const u16 test_vlan = 0x0FED; +STATIC const u64 test_u64 = 0xfedcba9876543210ull; +STATIC const u32 test_u32 = 0x87654321; +STATIC const u16 test_u16 = 0x8765; +STATIC const u8 test_u8 = 0x87; +STATIC const s64 test_s64 = -0x123456789abcdef0ll; +STATIC const s32 test_s32 = -0x1235678; +STATIC const s16 test_s16 = -0x1234; +STATIC const s8 test_s8 = -0x12; +STATIC const __le32 test_le[2] = { FM10K_CPU_TO_LE32(0x12345678), + FM10K_CPU_TO_LE32(0x9abcdef0)}; + +/* The message below is meant to be used as a test message to demonstrate + * how to use the TLV interface and to test the types. Normally this code + * be compiled out by stripping the code wrapped in FM10K_TLV_TEST_MSG + */ +const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = { + FM10K_TLV_ATTR_NULL_STRING(FM10K_TEST_MSG_STRING, 80), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_TEST_MSG_MAC_ADDR), + FM10K_TLV_ATTR_U8(FM10K_TEST_MSG_U8), + FM10K_TLV_ATTR_U16(FM10K_TEST_MSG_U16), + FM10K_TLV_ATTR_U32(FM10K_TEST_MSG_U32), + FM10K_TLV_ATTR_U64(FM10K_TEST_MSG_U64), + FM10K_TLV_ATTR_S8(FM10K_TEST_MSG_S8), + FM10K_TLV_ATTR_S16(FM10K_TEST_MSG_S16), + FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_S32), + FM10K_TLV_ATTR_S64(FM10K_TEST_MSG_S64), + FM10K_TLV_ATTR_LE_STRUCT(FM10K_TEST_MSG_LE_STRUCT, 8), + FM10K_TLV_ATTR_NESTED(FM10K_TEST_MSG_NESTED), + FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_RESULT), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_tlv_msg_test_generate_data - Stuff message with data + * @msg: Pointer to message + * @attr_flags: List of flags indicating what attributes to add + * + * This function is meant to load a message buffer with attribute data + **/ +STATIC void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags) +{ + DEBUGFUNC("fm10k_tlv_msg_test_generate_data"); + + if (attr_flags & BIT(FM10K_TEST_MSG_STRING)) + fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING, + test_str); + if (attr_flags & BIT(FM10K_TEST_MSG_MAC_ADDR)) + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR, + test_mac, test_vlan); + if (attr_flags & BIT(FM10K_TEST_MSG_U8)) + fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8); + if (attr_flags & BIT(FM10K_TEST_MSG_U16)) + fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16); + if (attr_flags & BIT(FM10K_TEST_MSG_U32)) + fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32); + if (attr_flags & BIT(FM10K_TEST_MSG_U64)) + fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64); + if (attr_flags & BIT(FM10K_TEST_MSG_S8)) + fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8); + if (attr_flags & BIT(FM10K_TEST_MSG_S16)) + fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16); + if (attr_flags & BIT(FM10K_TEST_MSG_S32)) + fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32); + if (attr_flags & BIT(FM10K_TEST_MSG_S64)) + fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64); + if (attr_flags & BIT(FM10K_TEST_MSG_LE_STRUCT)) + fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT, + test_le, 8); +} + +/** + * fm10k_tlv_msg_test_create - Create a test message testing all attributes + * @msg: Pointer to message + * @attr_flags: List of flags indicating what attributes to add + * + * This function is meant to load a message buffer with all attribute types + * including a nested attribute. + **/ +void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags) +{ + u32 *nest = NULL; + + DEBUGFUNC("fm10k_tlv_msg_test_create"); + + fm10k_tlv_msg_init(msg, FM10K_TLV_MSG_ID_TEST); + + fm10k_tlv_msg_test_generate_data(msg, attr_flags); + + /* check for nested attributes */ + attr_flags >>= FM10K_TEST_MSG_NESTED; + + if (attr_flags) { + nest = fm10k_tlv_attr_nest_start(msg, FM10K_TEST_MSG_NESTED); + + fm10k_tlv_msg_test_generate_data(nest, attr_flags); + + fm10k_tlv_attr_nest_stop(msg); + } +} + +/** + * fm10k_tlv_msg_test - Validate all results on test message receive + * @hw: Pointer to hardware structure + * @results: Pointer array to attributes in the message + * @mbx: Pointer to mailbox information structure + * + * This function does a check to verify all attributes match what the test + * message placed in the message buffer. It is the default handler + * for TLV test messages. + **/ +s32 fm10k_tlv_msg_test(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u32 *nest_results[FM10K_TLV_RESULTS_MAX]; + unsigned char result_str[80]; + unsigned char result_mac[ETH_ALEN]; + s32 err = FM10K_SUCCESS; + __le32 result_le[2]; + u16 result_vlan; + u64 result_u64; + u32 result_u32; + u16 result_u16; + u8 result_u8; + s64 result_s64; + s32 result_s32; + s16 result_s16; + s8 result_s8; + u32 reply[3]; + + DEBUGFUNC("fm10k_tlv_msg_test"); + + /* retrieve results of a previous test */ + if (!!results[FM10K_TEST_MSG_RESULT]) + return fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_RESULT], + &mbx->test_result); + +parse_nested: + if (!!results[FM10K_TEST_MSG_STRING]) { + err = fm10k_tlv_attr_get_null_string( + results[FM10K_TEST_MSG_STRING], + result_str); + if (!err && memcmp(test_str, result_str, sizeof(test_str))) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_MAC_ADDR]) { + err = fm10k_tlv_attr_get_mac_vlan( + results[FM10K_TEST_MSG_MAC_ADDR], + result_mac, &result_vlan); + if (!err && memcmp(test_mac, result_mac, ETH_ALEN)) + err = FM10K_ERR_INVALID_VALUE; + if (!err && test_vlan != result_vlan) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U8]) { + err = fm10k_tlv_attr_get_u8(results[FM10K_TEST_MSG_U8], + &result_u8); + if (!err && test_u8 != result_u8) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U16]) { + err = fm10k_tlv_attr_get_u16(results[FM10K_TEST_MSG_U16], + &result_u16); + if (!err && test_u16 != result_u16) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U32]) { + err = fm10k_tlv_attr_get_u32(results[FM10K_TEST_MSG_U32], + &result_u32); + if (!err && test_u32 != result_u32) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U64]) { + err = fm10k_tlv_attr_get_u64(results[FM10K_TEST_MSG_U64], + &result_u64); + if (!err && test_u64 != result_u64) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S8]) { + err = fm10k_tlv_attr_get_s8(results[FM10K_TEST_MSG_S8], + &result_s8); + if (!err && test_s8 != result_s8) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S16]) { + err = fm10k_tlv_attr_get_s16(results[FM10K_TEST_MSG_S16], + &result_s16); + if (!err && test_s16 != result_s16) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S32]) { + err = fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_S32], + &result_s32); + if (!err && test_s32 != result_s32) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S64]) { + err = fm10k_tlv_attr_get_s64(results[FM10K_TEST_MSG_S64], + &result_s64); + if (!err && test_s64 != result_s64) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_LE_STRUCT]) { + err = fm10k_tlv_attr_get_le_struct( + results[FM10K_TEST_MSG_LE_STRUCT], + result_le, + sizeof(result_le)); + if (!err && memcmp(test_le, result_le, sizeof(test_le))) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + + if (!!results[FM10K_TEST_MSG_NESTED]) { + /* clear any pointers */ + memset(nest_results, 0, sizeof(nest_results)); + + /* parse the nested attributes into the nest results list */ + err = fm10k_tlv_attr_parse(results[FM10K_TEST_MSG_NESTED], + nest_results, + fm10k_tlv_msg_test_attr); + if (err) + goto report_result; + + /* loop back through to the start */ + results = nest_results; + goto parse_nested; + } + +report_result: + /* generate reply with test result */ + fm10k_tlv_msg_init(reply, FM10K_TLV_MSG_ID_TEST); + fm10k_tlv_attr_put_s32(reply, FM10K_TEST_MSG_RESULT, err); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, reply); +} diff --git a/drivers/net/fm10k/base/fm10k_tlv.h b/drivers/net/fm10k/base/fm10k_tlv.h new file mode 100644 index 00000000..8f85fce3 --- /dev/null +++ b/drivers/net/fm10k/base/fm10k_tlv.h @@ -0,0 +1,194 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _FM10K_TLV_H_ +#define _FM10K_TLV_H_ + +/* forward declaration */ +struct fm10k_msg_data; + +#include "fm10k_type.h" + +/* Message / Argument header format + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Length | Flags | Type / ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * The message header format described here is used for messages that are + * passed between the PF and the VF. To allow for messages larger then + * mailbox size we will provide a message with the above header and it + * will be segmented and transported to the mailbox to the other side where + * it is reassembled. It contains the following fields: + * Length: Length of the message in bytes excluding the message header + * Flags: TBD + * Type/ID: These will be the message/argument types we pass + */ +/* message data header */ +#define FM10K_TLV_ID_SHIFT 0 +#define FM10K_TLV_ID_SIZE 16 +#define FM10K_TLV_ID_MASK ((1u << FM10K_TLV_ID_SIZE) - 1) +#define FM10K_TLV_FLAGS_SHIFT 16 +#define FM10K_TLV_FLAGS_MSG 0x1 +#define FM10K_TLV_FLAGS_SIZE 4 +#define FM10K_TLV_LEN_SHIFT 20 +#define FM10K_TLV_LEN_SIZE 12 + +#define FM10K_TLV_HDR_LEN 4ul +#define FM10K_TLV_LEN_ALIGN_MASK \ + ((FM10K_TLV_HDR_LEN - 1) << FM10K_TLV_LEN_SHIFT) +#define FM10K_TLV_LEN_ALIGN(tlv) \ + (((tlv) + FM10K_TLV_LEN_ALIGN_MASK) & ~FM10K_TLV_LEN_ALIGN_MASK) +#define FM10K_TLV_DWORD_LEN(tlv) \ + ((u16)((FM10K_TLV_LEN_ALIGN(tlv)) >> (FM10K_TLV_LEN_SHIFT + 2)) + 1) + +#define FM10K_TLV_RESULTS_MAX 32 + +enum fm10k_tlv_type { + FM10K_TLV_NULL_STRING, + FM10K_TLV_MAC_ADDR, + FM10K_TLV_BOOL, + FM10K_TLV_UNSIGNED, + FM10K_TLV_SIGNED, + FM10K_TLV_LE_STRUCT, + FM10K_TLV_NESTED, + FM10K_TLV_MAX_TYPE +}; + +#define FM10K_TLV_ERROR (~0u) + +struct fm10k_tlv_attr { + unsigned int id; + enum fm10k_tlv_type type; + u16 len; +}; + +#define FM10K_TLV_ATTR_NULL_STRING(id, len) { id, FM10K_TLV_NULL_STRING, len } +#define FM10K_TLV_ATTR_MAC_ADDR(id) { id, FM10K_TLV_MAC_ADDR, 6 } +#define FM10K_TLV_ATTR_BOOL(id) { id, FM10K_TLV_BOOL, 0 } +#define FM10K_TLV_ATTR_U8(id) { id, FM10K_TLV_UNSIGNED, 1 } +#define FM10K_TLV_ATTR_U16(id) { id, FM10K_TLV_UNSIGNED, 2 } +#define FM10K_TLV_ATTR_U32(id) { id, FM10K_TLV_UNSIGNED, 4 } +#define FM10K_TLV_ATTR_U64(id) { id, FM10K_TLV_UNSIGNED, 8 } +#define FM10K_TLV_ATTR_S8(id) { id, FM10K_TLV_SIGNED, 1 } +#define FM10K_TLV_ATTR_S16(id) { id, FM10K_TLV_SIGNED, 2 } +#define FM10K_TLV_ATTR_S32(id) { id, FM10K_TLV_SIGNED, 4 } +#define FM10K_TLV_ATTR_S64(id) { id, FM10K_TLV_SIGNED, 8 } +#define FM10K_TLV_ATTR_LE_STRUCT(id, len) { id, FM10K_TLV_LE_STRUCT, len } +#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED } +#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR } + +struct fm10k_msg_data { + unsigned int id; + const struct fm10k_tlv_attr *attr; + s32 (*func)(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +}; + +#define FM10K_MSG_HANDLER(id, attr, func) { id, attr, func } + +s32 fm10k_tlv_msg_init(u32 *, u16); +s32 fm10k_tlv_attr_put_mac_vlan(u32 *, u16, const u8 *, u16); +s32 fm10k_tlv_attr_get_mac_vlan(u32 *, u8 *, u16 *); +s32 fm10k_tlv_attr_put_bool(u32 *, u16); +s32 fm10k_tlv_attr_put_value(u32 *, u16, s64, u32); +#define fm10k_tlv_attr_put_u8(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 1) +#define fm10k_tlv_attr_put_u16(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 2) +#define fm10k_tlv_attr_put_u32(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 4) +#define fm10k_tlv_attr_put_u64(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 8) +#define fm10k_tlv_attr_put_s8(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 1) +#define fm10k_tlv_attr_put_s16(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 2) +#define fm10k_tlv_attr_put_s32(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 4) +#define fm10k_tlv_attr_put_s64(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 8) +s32 fm10k_tlv_attr_get_value(u32 *, void *, u32); +#define fm10k_tlv_attr_get_u8(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u8)) +#define fm10k_tlv_attr_get_u16(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u16)) +#define fm10k_tlv_attr_get_u32(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u32)) +#define fm10k_tlv_attr_get_u64(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u64)) +#define fm10k_tlv_attr_get_s8(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s8)) +#define fm10k_tlv_attr_get_s16(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s16)) +#define fm10k_tlv_attr_get_s32(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s32)) +#define fm10k_tlv_attr_get_s64(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s64)) +s32 fm10k_tlv_attr_put_le_struct(u32 *, u16, const void *, u32); +s32 fm10k_tlv_attr_get_le_struct(u32 *, void *, u32); +s32 fm10k_tlv_msg_parse(struct fm10k_hw *, u32 *, struct fm10k_mbx_info *, + const struct fm10k_msg_data *); +s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *); + +#define FM10K_TLV_MSG_ID_TEST 0 + +enum fm10k_tlv_test_attr_id { + FM10K_TEST_MSG_UNSET, + FM10K_TEST_MSG_STRING, + FM10K_TEST_MSG_MAC_ADDR, + FM10K_TEST_MSG_U8, + FM10K_TEST_MSG_U16, + FM10K_TEST_MSG_U32, + FM10K_TEST_MSG_U64, + FM10K_TEST_MSG_S8, + FM10K_TEST_MSG_S16, + FM10K_TEST_MSG_S32, + FM10K_TEST_MSG_S64, + FM10K_TEST_MSG_LE_STRUCT, + FM10K_TEST_MSG_NESTED, + FM10K_TEST_MSG_RESULT, + FM10K_TEST_MSG_MAX +}; + +extern const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[]; +void fm10k_tlv_msg_test_create(u32 *, u32); +s32 fm10k_tlv_msg_test(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); + +#define FM10K_TLV_MSG_TEST_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_TLV_MSG_ID_TEST, fm10k_tlv_msg_test_attr, func) +#define FM10K_TLV_MSG_ERROR_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_TLV_ERROR, NULL, func) +#endif /* _FM10K_MSG_H_ */ diff --git a/drivers/net/fm10k/base/fm10k_type.h b/drivers/net/fm10k/base/fm10k_type.h new file mode 100644 index 00000000..3fc8f136 --- /dev/null +++ b/drivers/net/fm10k/base/fm10k_type.h @@ -0,0 +1,848 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _FM10K_TYPE_H_ +#define _FM10K_TYPE_H_ + +/* forward declaration */ +struct fm10k_hw; + +#include "fm10k_osdep.h" +#include "fm10k_mbx.h" + +#define FM10K_DEV_ID_PF 0x15A4 +#define FM10K_DEV_ID_VF 0x15A5 +#ifdef BOULDER_RAPIDS_HW +#define FM10K_DEV_ID_SDI_FM10420_QDA2 0x15D0 +#endif /* BOULDER_RAPIDS_HW */ +#ifdef ATWOOD_CHANNEL_HW +#define FM10K_DEV_ID_SDI_FM10420_DA2 0x15D5 +#endif /* ATWOOD_CHANNEL_HW */ + +#ifndef LINUX_MACROS +#ifndef BIT +#define BIT(a) (1UL << (a)) +#endif +#endif /* LINUX_MACROS */ + +#define FM10K_MAX_QUEUES 256 +#define FM10K_MAX_QUEUES_PF 128 +#define FM10K_MAX_QUEUES_POOL 16 + +#define FM10K_48_BIT_MASK 0x0000FFFFFFFFFFFFull +#define FM10K_STAT_VALID 0x80000000 + +/* PCI Bus Info */ +#define FM10K_PCIE_LINK_CAP 0x7C +#define FM10K_PCIE_LINK_STATUS 0x82 +#define FM10K_PCIE_LINK_WIDTH 0x3F0 +#define FM10K_PCIE_LINK_WIDTH_1 0x10 +#define FM10K_PCIE_LINK_WIDTH_2 0x20 +#define FM10K_PCIE_LINK_WIDTH_4 0x40 +#define FM10K_PCIE_LINK_WIDTH_8 0x80 +#define FM10K_PCIE_LINK_SPEED 0xF +#define FM10K_PCIE_LINK_SPEED_2500 0x1 +#define FM10K_PCIE_LINK_SPEED_5000 0x2 +#define FM10K_PCIE_LINK_SPEED_8000 0x3 + +/* PCIe payload size */ +#define FM10K_PCIE_DEV_CAP 0x74 +#define FM10K_PCIE_DEV_CAP_PAYLOAD 0x07 +#define FM10K_PCIE_DEV_CAP_PAYLOAD_128 0x00 +#define FM10K_PCIE_DEV_CAP_PAYLOAD_256 0x01 +#define FM10K_PCIE_DEV_CAP_PAYLOAD_512 0x02 +#define FM10K_PCIE_DEV_CTRL 0x78 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD 0xE0 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD_128 0x00 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD_256 0x20 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD_512 0x40 + +/* PCIe MSI-X Capability info */ +#define FM10K_PCI_MSIX_MSG_CTRL 0xB2 +#define FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK 0x7FF +#define FM10K_MAX_MSIX_VECTORS 256 +#define FM10K_MAX_VECTORS_PF 256 +#define FM10K_MAX_VECTORS_POOL 32 + +/* PCIe SR-IOV Info */ +#define FM10K_PCIE_SRIOV_CTRL 0x190 +#define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10 + +#define FM10K_SUCCESS 0 +#define FM10K_ERR_DEVICE_NOT_SUPPORTED -1 +#define FM10K_ERR_PARAM -2 +#define FM10K_ERR_NO_RESOURCES -3 +#define FM10K_ERR_REQUESTS_PENDING -4 +#define FM10K_ERR_RESET_REQUESTED -5 +#define FM10K_ERR_DMA_PENDING -6 +#define FM10K_ERR_RESET_FAILED -7 +#define FM10K_ERR_INVALID_MAC_ADDR -8 +#define FM10K_ERR_INVALID_VALUE -9 +#define FM10K_NOT_IMPLEMENTED 0x7FFFFFFF + +#define UNREFERENCED_XPARAMETER +#define UNREFERENCED_1PARAMETER(_p) (_p) +#define UNREFERENCED_2PARAMETER(_p, _q) do { (_p); (_q); } while (0) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { (_p); (_q); (_r); } while (0) + +/* Start of PF registers */ +#define FM10K_CTRL 0x0000 +#define FM10K_CTRL_BAR4_ALLOWED 0x00000004 + +#define FM10K_CTRL_EXT 0x0001 +#define FM10K_GCR 0x0003 +#define FM10K_GCR_EXT 0x0005 + +/* Interrupt control registers */ +#define FM10K_EICR 0x0006 +#define FM10K_EICR_FAULT_MASK 0x0000003F +#define FM10K_EICR_MAILBOX 0x00000040 +#define FM10K_EICR_SWITCHREADY 0x00000080 +#define FM10K_EICR_SWITCHNOTREADY 0x00000100 +#define FM10K_EICR_SWITCHINTERRUPT 0x00000200 +#define FM10K_EICR_VFLR 0x00000800 +#define FM10K_EICR_MAXHOLDTIME 0x00001000 +#define FM10K_EIMR 0x0007 +#define FM10K_EIMR_PCA_FAULT 0x00000001 +#define FM10K_EIMR_THI_FAULT 0x00000010 +#define FM10K_EIMR_FUM_FAULT 0x00000400 +#define FM10K_EIMR_MAILBOX 0x00001000 +#define FM10K_EIMR_SWITCHREADY 0x00004000 +#define FM10K_EIMR_SWITCHNOTREADY 0x00010000 +#define FM10K_EIMR_SWITCHINTERRUPT 0x00040000 +#define FM10K_EIMR_SRAMERROR 0x00100000 +#define FM10K_EIMR_VFLR 0x00400000 +#define FM10K_EIMR_MAXHOLDTIME 0x01000000 +#define FM10K_EIMR_ALL 0x55555555 +#define FM10K_EIMR_DISABLE(NAME) ((FM10K_EIMR_ ## NAME) << 0) +#define FM10K_EIMR_ENABLE(NAME) ((FM10K_EIMR_ ## NAME) << 1) +#define FM10K_FAULT_ADDR_LO 0x0 +#define FM10K_FAULT_ADDR_HI 0x1 +#define FM10K_FAULT_SPECINFO 0x2 +#define FM10K_FAULT_FUNC 0x3 +#define FM10K_FAULT_SIZE 0x4 +#define FM10K_FAULT_FUNC_VALID 0x00008000 +#define FM10K_FAULT_FUNC_PF 0x00004000 +#define FM10K_FAULT_FUNC_VF_MASK 0x00003F00 +#define FM10K_FAULT_FUNC_VF_SHIFT 8 +#define FM10K_FAULT_FUNC_TYPE_MASK 0x000000FF + +#define FM10K_PCA_FAULT 0x0008 +#define FM10K_THI_FAULT 0x0010 +#define FM10K_FUM_FAULT 0x001C + +/* Rx queue timeout indicator */ +#define FM10K_MAXHOLDQ(_n) ((_n) + 0x0020) + +/* Switch Manager info */ +#define FM10K_SM_AREA(_n) ((_n) + 0x0028) + +/* GLORT mapping registers */ +#define FM10K_DGLORTMAP(_n) ((_n) + 0x0030) +#define FM10K_DGLORT_COUNT 8 +#define FM10K_DGLORTMAP_MASK_SHIFT 16 +#define FM10K_DGLORTMAP_ANY 0x00000000 +#define FM10K_DGLORTMAP_NONE 0x0000FFFF +#define FM10K_DGLORTMAP_ZERO 0xFFFF0000 +#define FM10K_DGLORTDEC(_n) ((_n) + 0x0038) +#define FM10K_DGLORTDEC_VSILENGTH_SHIFT 4 +#define FM10K_DGLORTDEC_VSIBASE_SHIFT 7 +#define FM10K_DGLORTDEC_PCLENGTH_SHIFT 14 +#define FM10K_DGLORTDEC_QBASE_SHIFT 16 +#define FM10K_DGLORTDEC_RSSLENGTH_SHIFT 24 +#define FM10K_DGLORTDEC_INNERRSS_ENABLE 0x08000000 +#define FM10K_TUNNEL_CFG 0x0040 +#define FM10K_TUNNEL_CFG_NVGRE_SHIFT 16 +#define FM10K_SWPRI_MAP(_n) ((_n) + 0x0050) +#define FM10K_SWPRI_MAX 16 +#define FM10K_RSSRK(_n, _m) (((_n) * 0x10) + (_m) + 0x0800) +#define FM10K_RSSRK_SIZE 10 +#define FM10K_RSSRK_ENTRIES_PER_REG 4 +#define FM10K_RETA(_n, _m) (((_n) * 0x20) + (_m) + 0x1000) +#define FM10K_RETA_SIZE 32 +#define FM10K_RETA_ENTRIES_PER_REG 4 +#define FM10K_MAX_RSS_INDICES 128 + +/* Rate limiting registers */ +#define FM10K_TC_CREDIT(_n) ((_n) + 0x2000) +#define FM10K_TC_CREDIT_CREDIT_MASK 0x001FFFFF +#define FM10K_TC_MAXCREDIT(_n) ((_n) + 0x2040) +#define FM10K_TC_MAXCREDIT_64K 0x00010000 +#define FM10K_TC_RATE(_n) ((_n) + 0x2080) +#define FM10K_TC_RATE_QUANTA_MASK 0x0000FFFF +#define FM10K_TC_RATE_INTERVAL_4US_GEN1 0x00020000 +#define FM10K_TC_RATE_INTERVAL_4US_GEN2 0x00040000 +#define FM10K_TC_RATE_INTERVAL_4US_GEN3 0x00080000 + +/* DMA control registers */ +#define FM10K_DMA_CTRL 0x20C3 +#define FM10K_DMA_CTRL_TX_ENABLE 0x00000001 +#define FM10K_DMA_CTRL_TX_ACTIVE 0x00000008 +#define FM10K_DMA_CTRL_RX_ENABLE 0x00000010 +#define FM10K_DMA_CTRL_RX_ACTIVE 0x00000080 +#define FM10K_DMA_CTRL_RX_DESC_SIZE 0x00000100 +#define FM10K_DMA_CTRL_MINMSS_64 0x00008000 +#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3 0x04800000 +#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2 0x04000000 +#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1 0x03800000 +#define FM10K_DMA_CTRL_DATAPATH_RESET 0x20000000 +#define FM10K_DMA_CTRL_32_DESC 0x00000000 + +#define FM10K_DMA_CTRL2 0x20C4 +#define FM10K_DMA_CTRL2_SWITCH_READY 0x00002000 + +/* TSO flags configuration + * First packet contains all flags except for fin and psh + * Middle packet contains only urg and ack + * Last packet contains urg, ack, fin, and psh + */ +#define FM10K_TSO_FLAGS_LOW 0x00300FF6 +#define FM10K_TSO_FLAGS_HI 0x00000039 +#define FM10K_DTXTCPFLGL 0x20C5 +#define FM10K_DTXTCPFLGH 0x20C6 + +#define FM10K_TPH_CTRL 0x20C7 +#define FM10K_MRQC(_n) ((_n) + 0x2100) +#define FM10K_MRQC_TCP_IPV4 0x00000001 +#define FM10K_MRQC_IPV4 0x00000002 +#define FM10K_MRQC_IPV6 0x00000010 +#define FM10K_MRQC_TCP_IPV6 0x00000020 +#define FM10K_MRQC_UDP_IPV4 0x00000040 +#define FM10K_MRQC_UDP_IPV6 0x00000080 + +#define FM10K_TQMAP(_n) ((_n) + 0x2800) +#define FM10K_TQMAP_TABLE_SIZE 2048 +#define FM10K_RQMAP(_n) ((_n) + 0x3000) + +/* Hardware Statistics */ +#define FM10K_STATS_TIMEOUT 0x3800 +#define FM10K_STATS_UR 0x3801 +#define FM10K_STATS_CA 0x3802 +#define FM10K_STATS_UM 0x3803 +#define FM10K_STATS_XEC 0x3804 +#define FM10K_STATS_VLAN_DROP 0x3805 +#define FM10K_STATS_LOOPBACK_DROP 0x3806 +#define FM10K_STATS_NODESC_DROP 0x3807 + +/* Timesync registers */ +#define FM10K_SYSTIME 0x3814 +#define FM10K_SYSTIME_CFG 0x3818 +#define FM10K_SYSTIME_CFG_STEP_MASK 0x0000000F + +/* PCIe state registers */ +#define FM10K_PHYADDR 0x381C + +/* Rx ring registers */ +#define FM10K_RDBAL(_n) ((0x40 * (_n)) + 0x4000) +#define FM10K_RDBAH(_n) ((0x40 * (_n)) + 0x4001) +#define FM10K_RDLEN(_n) ((0x40 * (_n)) + 0x4002) +#define FM10K_TPH_RXCTRL(_n) ((0x40 * (_n)) + 0x4003) +#define FM10K_TPH_RXCTRL_DESC_TPHEN 0x00000020 +#define FM10K_TPH_RXCTRL_DESC_RROEN 0x00000200 +#define FM10K_TPH_RXCTRL_DATA_WROEN 0x00002000 +#define FM10K_TPH_RXCTRL_HDR_WROEN 0x00008000 +#define FM10K_RDH(_n) ((0x40 * (_n)) + 0x4004) +#define FM10K_RDT(_n) ((0x40 * (_n)) + 0x4005) +#define FM10K_RXQCTL(_n) ((0x40 * (_n)) + 0x4006) +#define FM10K_RXQCTL_ENABLE 0x00000001 +#define FM10K_RXQCTL_PF 0x000000FC +#define FM10K_RXQCTL_VF_SHIFT 2 +#define FM10K_RXQCTL_VF 0x00000100 +#define FM10K_RXQCTL_ID_MASK (FM10K_RXQCTL_PF | FM10K_RXQCTL_VF) +#define FM10K_RXDCTL(_n) ((0x40 * (_n)) + 0x4007) +#define FM10K_RXDCTL_WRITE_BACK_MIN_DELAY 0x00000001 +#define FM10K_RXDCTL_DROP_ON_EMPTY 0x00000200 +#define FM10K_RXINT(_n) ((0x40 * (_n)) + 0x4008) +#define FM10K_SRRCTL(_n) ((0x40 * (_n)) + 0x4009) +#define FM10K_SRRCTL_BSIZEPKT_SHIFT 8 /* shift _right_ */ +#define FM10K_SRRCTL_LOOPBACK_SUPPRESS 0x40000000 +#define FM10K_SRRCTL_BUFFER_CHAINING_EN 0x80000000 + +/* Rx Statistics */ +#define FM10K_QPRC(_n) ((0x40 * (_n)) + 0x400A) +#define FM10K_QPRDC(_n) ((0x40 * (_n)) + 0x400B) +#define FM10K_QBRC_L(_n) ((0x40 * (_n)) + 0x400C) +#define FM10K_QBRC_H(_n) ((0x40 * (_n)) + 0x400D) + +/* Rx GLORT register */ +#define FM10K_RX_SGLORT(_n) ((0x40 * (_n)) + 0x400E) + +/* Tx ring registers */ +#define FM10K_TDBAL(_n) ((0x40 * (_n)) + 0x8000) +#define FM10K_TDBAH(_n) ((0x40 * (_n)) + 0x8001) +#define FM10K_TDLEN(_n) ((0x40 * (_n)) + 0x8002) +/* When fist initialized, VFs need to know the Interrupt Throttle Rate (ITR) + * scale which is based on the PCIe speed but the speed information in the PCI + * configuration space may not be accurate. The PF already knows the ITR scale + * but there is no defined method to pass that information from the PF to the + * VF. This is accomplished during VF initialization by temporarily co-opting + * the yet-to-be-used TDLEN register to have the PF store the ITR shift for + * the VF to retrieve before the VF needs to use the TDLEN register for its + * intended purpose, i.e. before the Tx resources are allocated. + */ +#define FM10K_TDLEN_ITR_SCALE_SHIFT 9 +#define FM10K_TDLEN_ITR_SCALE_MASK 0x00000E00 +#define FM10K_TDLEN_ITR_SCALE_GEN1 2 +#define FM10K_TDLEN_ITR_SCALE_GEN2 1 +#define FM10K_TDLEN_ITR_SCALE_GEN3 0 +#define FM10K_TPH_TXCTRL(_n) ((0x40 * (_n)) + 0x8003) +#define FM10K_TPH_TXCTRL_DESC_TPHEN 0x00000020 +#define FM10K_TPH_TXCTRL_DESC_RROEN 0x00000200 +#define FM10K_TPH_TXCTRL_DESC_WROEN 0x00000800 +#define FM10K_TPH_TXCTRL_DATA_RROEN 0x00002000 +#define FM10K_TDH(_n) ((0x40 * (_n)) + 0x8004) +#define FM10K_TDT(_n) ((0x40 * (_n)) + 0x8005) +#define FM10K_TXDCTL(_n) ((0x40 * (_n)) + 0x8006) +#define FM10K_TXDCTL_ENABLE 0x00004000 +#define FM10K_TXDCTL_MAX_TIME_SHIFT 16 +#define FM10K_TXQCTL(_n) ((0x40 * (_n)) + 0x8007) +#define FM10K_TXQCTL_PF 0x0000003F +#define FM10K_TXQCTL_VF 0x00000040 +#define FM10K_TXQCTL_ID_MASK (FM10K_TXQCTL_PF | FM10K_TXQCTL_VF) +#define FM10K_TXQCTL_PC_SHIFT 7 +#define FM10K_TXQCTL_PC_MASK 0x00000380 +#define FM10K_TXQCTL_TC_SHIFT 10 +#define FM10K_TXQCTL_VID_SHIFT 16 +#define FM10K_TXQCTL_VID_MASK 0x0FFF0000 +#define FM10K_TXQCTL_UNLIMITED_BW 0x10000000 +#define FM10K_TXINT(_n) ((0x40 * (_n)) + 0x8008) + +/* Tx Statistics */ +#define FM10K_QPTC(_n) ((0x40 * (_n)) + 0x8009) +#define FM10K_QBTC_L(_n) ((0x40 * (_n)) + 0x800A) +#define FM10K_QBTC_H(_n) ((0x40 * (_n)) + 0x800B) + +/* Tx Push registers */ +#define FM10K_TQDLOC(_n) ((0x40 * (_n)) + 0x800C) +#define FM10K_TQDLOC_BASE_32_DESC 0x08 +#define FM10K_TQDLOC_SIZE_32_DESC 0x00050000 + +/* Tx GLORT registers */ +#define FM10K_TX_SGLORT(_n) ((0x40 * (_n)) + 0x800D) +#define FM10K_PFVTCTL(_n) ((0x40 * (_n)) + 0x800E) +#define FM10K_PFVTCTL_FTAG_DESC_ENABLE 0x00000001 + +/* Interrupt moderation and control registers */ +#define FM10K_INT_MAP(_n) ((_n) + 0x10080) +#define FM10K_INT_MAP_TIMER0 0x00000000 +#define FM10K_INT_MAP_TIMER1 0x00000100 +#define FM10K_INT_MAP_IMMEDIATE 0x00000200 +#define FM10K_INT_MAP_DISABLE 0x00000300 +#define FM10K_MSIX_VECTOR_MASK(_n) ((0x4 * (_n)) + 0x11003) +#define FM10K_INT_CTRL 0x12000 +#define FM10K_INT_CTRL_ENABLEMODERATOR 0x00000400 +#define FM10K_ITR(_n) ((_n) + 0x12400) +#define FM10K_ITR_INTERVAL1_SHIFT 12 +#define FM10K_ITR_PENDING2 0x10000000 +#define FM10K_ITR_AUTOMASK 0x20000000 +#define FM10K_ITR_MASK_SET 0x40000000 +#define FM10K_ITR_MASK_CLEAR 0x80000000 +#define FM10K_ITR2(_n) ((0x2 * (_n)) + 0x12800) +#define FM10K_ITR_REG_COUNT 768 +#define FM10K_ITR_REG_COUNT_PF 256 + +/* Switch manager interrupt registers */ +#define FM10K_IP 0x13000 +#define FM10K_IP_NOTINRESET 0x00000100 + +/* VLAN registers */ +#define FM10K_VLAN_TABLE(_n, _m) ((0x80 * (_n)) + (_m) + 0x14000) +#define FM10K_VLAN_TABLE_SIZE 128 + +/* VLAN specific message offsets */ +#define FM10K_VLAN_TABLE_VID_MAX 4096 +#define FM10K_VLAN_TABLE_VSI_MAX 64 +#define FM10K_VLAN_LENGTH_SHIFT 16 +#define FM10K_VLAN_CLEAR BIT(15) +#define FM10K_VLAN_ALL \ + ((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT) + +/* VF FLR event notification registers */ +#define FM10K_PFVFLRE(_n) ((0x1 * (_n)) + 0x18844) +#define FM10K_PFVFLREC(_n) ((0x1 * (_n)) + 0x18846) + +/* Defines for size of uncacheable and write-combining memories */ +#define FM10K_UC_ADDR_START 0x000000 /* start of standard regs */ +#define FM10K_WC_ADDR_START 0x100000 /* start of Tx Desc Cache */ +#define FM10K_DBI_ADDR_START 0x200000 /* start of debug registers */ +#define FM10K_UC_ADDR_SIZE (FM10K_WC_ADDR_START - FM10K_UC_ADDR_START) +#define FM10K_WC_ADDR_SIZE (FM10K_DBI_ADDR_START - FM10K_WC_ADDR_START) + +/* Define timeouts for resets and disables */ +#define FM10K_QUEUE_DISABLE_TIMEOUT 100 +#define FM10K_RESET_TIMEOUT 150 + +/* Maximum supported combined inner and outer header length for encapsulation */ +#define FM10K_TUNNEL_HEADER_LENGTH 184 + +/* VF registers */ +#define FM10K_VFCTRL 0x00000 +#define FM10K_VFCTRL_RST 0x00000008 +#define FM10K_VFINT_MAP 0x00030 +#define FM10K_VFSYSTIME 0x00040 +#define FM10K_VFITR(_n) ((_n) + 0x00060) + +/* Registers contained in BAR 4 for Switch management */ +#define FM10K_SW_SYSTIME_ADJUST 0x0224D +#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF +#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000 +#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252) + +#ifndef ETH_ALEN +#define ETH_ALEN 6 +#endif /* ETH_ALEN */ + +#ifndef FM10K_IS_ZERO_ETHER_ADDR +/* make certain address is not 0 */ +#define FM10K_IS_ZERO_ETHER_ADDR(addr) \ +(!((addr)[0] | (addr)[1] | (addr)[2] | (addr)[3] | (addr)[4] | (addr)[5])) +#endif + +#ifndef FM10K_IS_MULTICAST_ETHER_ADDR +#define FM10K_IS_MULTICAST_ETHER_ADDR(addr) ((addr)[0] & 0x1) +#endif + +#ifndef FM10K_IS_VALID_ETHER_ADDR +/* make certain address is not multicast or 0 */ +#define FM10K_IS_VALID_ETHER_ADDR(addr) \ +(!FM10K_IS_MULTICAST_ETHER_ADDR(addr) && !FM10K_IS_ZERO_ETHER_ADDR(addr)) +#endif + +enum fm10k_int_source { + fm10k_int_mailbox = 0, + fm10k_int_pcie_fault = 1, + fm10k_int_switch_up_down = 2, + fm10k_int_switch_event = 3, + fm10k_int_sram = 4, + fm10k_int_vflr = 5, + fm10k_int_max_hold_time = 6, + fm10k_int_sources_max_pf +}; + +/* PCIe bus speeds */ +enum fm10k_bus_speed { + fm10k_bus_speed_unknown = 0, + fm10k_bus_speed_2500 = 2500, + fm10k_bus_speed_5000 = 5000, + fm10k_bus_speed_8000 = 8000, + fm10k_bus_speed_reserved +}; + +/* PCIe bus widths */ +enum fm10k_bus_width { + fm10k_bus_width_unknown = 0, + fm10k_bus_width_pcie_x1 = 1, + fm10k_bus_width_pcie_x2 = 2, + fm10k_bus_width_pcie_x4 = 4, + fm10k_bus_width_pcie_x8 = 8, + fm10k_bus_width_reserved +}; + +/* PCIe payload sizes */ +enum fm10k_bus_payload { + fm10k_bus_payload_unknown = 0, + fm10k_bus_payload_128 = 1, + fm10k_bus_payload_256 = 2, + fm10k_bus_payload_512 = 3, + fm10k_bus_payload_reserved +}; + +/* Bus parameters */ +struct fm10k_bus_info { + enum fm10k_bus_speed speed; + enum fm10k_bus_width width; + enum fm10k_bus_payload payload; +}; + +/* Statistics related declarations */ +struct fm10k_hw_stat { + u64 count; + u32 base_l; + u32 base_h; +}; + +struct fm10k_hw_stats_q { + struct fm10k_hw_stat tx_bytes; + struct fm10k_hw_stat tx_packets; +#define tx_stats_idx tx_packets.base_h + struct fm10k_hw_stat rx_bytes; + struct fm10k_hw_stat rx_packets; +#define rx_stats_idx rx_packets.base_h + struct fm10k_hw_stat rx_drops; +}; + +struct fm10k_hw_stats { + struct fm10k_hw_stat timeout; +#define stats_idx timeout.base_h + struct fm10k_hw_stat ur; + struct fm10k_hw_stat ca; + struct fm10k_hw_stat um; + struct fm10k_hw_stat xec; + struct fm10k_hw_stat vlan_drop; + struct fm10k_hw_stat loopback_drop; + struct fm10k_hw_stat nodesc_drop; + struct fm10k_hw_stats_q q[FM10K_MAX_QUEUES_PF]; +}; + +/* Establish DGLORT feature priority */ +enum fm10k_dglortdec_idx { + fm10k_dglort_default = 0, + fm10k_dglort_vf_rsvd0 = 1, + fm10k_dglort_vf_rss = 2, + fm10k_dglort_pf_rsvd0 = 3, + fm10k_dglort_pf_queue = 4, + fm10k_dglort_pf_vsi = 5, + fm10k_dglort_pf_rsvd1 = 6, + fm10k_dglort_pf_rss = 7 +}; + +struct fm10k_dglort_cfg { + u16 glort; /* GLORT base */ + u16 queue_b; /* Base value for queue */ + u8 vsi_b; /* Base value for VSI */ + u8 idx; /* index of DGLORTDEC entry */ + u8 rss_l; /* RSS indices */ + u8 pc_l; /* Priority Class indices */ + u8 vsi_l; /* Number of bits from GLORT used to determine VSI */ + u8 queue_l; /* Number of bits from GLORT used to determine queue */ + u8 shared_l; /* Ignored bits from GLORT resulting in shared VSI */ + u8 inner_rss; /* Boolean value if inner header is used for RSS */ +}; + +enum fm10k_pca_fault { + PCA_NO_FAULT, + PCA_UNMAPPED_ADDR, + PCA_BAD_QACCESS_PF, + PCA_BAD_QACCESS_VF, + PCA_MALICIOUS_REQ, + PCA_POISONED_TLP, + PCA_TLP_ABORT, + __PCA_MAX +}; + +enum fm10k_thi_fault { + THI_NO_FAULT, + THI_MAL_DIS_Q_FAULT, + __THI_MAX +}; + +enum fm10k_fum_fault { + FUM_NO_FAULT, + FUM_UNMAPPED_ADDR, + FUM_POISONED_TLP, + FUM_BAD_VF_QACCESS, + FUM_ADD_DECODE_ERR, + FUM_RO_ERROR, + FUM_QPRC_CRC_ERROR, + FUM_CSR_TIMEOUT, + FUM_INVALID_TYPE, + FUM_INVALID_LENGTH, + FUM_INVALID_BE, + FUM_INVALID_ALIGN, + __FUM_MAX +}; + +struct fm10k_fault { + u64 address; /* Address at the time fault was detected */ + u32 specinfo; /* Extra info on this fault (fault dependent) */ + u8 type; /* Fault value dependent on subunit */ + u8 func; /* Function number of the fault */ +}; + +struct fm10k_mac_ops { + /* basic bring-up and tear-down */ + s32 (*reset_hw)(struct fm10k_hw *); + s32 (*init_hw)(struct fm10k_hw *); + s32 (*start_hw)(struct fm10k_hw *); + s32 (*stop_hw)(struct fm10k_hw *); + s32 (*get_bus_info)(struct fm10k_hw *); + s32 (*get_host_state)(struct fm10k_hw *, bool *); +#ifndef NO_IS_SLOT_APPROPRIATE_CHECK + bool (*is_slot_appropriate)(struct fm10k_hw *); +#endif + s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool); + s32 (*read_mac_addr)(struct fm10k_hw *); + s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *, + u16, bool, u8); + s32 (*update_mc_addr)(struct fm10k_hw *, u16, const u8 *, u16, bool); + s32 (*update_xcast_mode)(struct fm10k_hw *, u16, u8); + void (*update_int_moderator)(struct fm10k_hw *); + s32 (*update_lport_state)(struct fm10k_hw *, u16, u16, bool); + void (*update_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *); + void (*rebind_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *); + s32 (*configure_dglort_map)(struct fm10k_hw *, + struct fm10k_dglort_cfg *); + void (*set_dma_mask)(struct fm10k_hw *, u64); + s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *); + s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb); + s32 (*notify_offset)(struct fm10k_hw *, u64 offset); + u64 (*read_systime)(struct fm10k_hw *); +}; + +enum fm10k_mac_type { + fm10k_mac_unknown = 0, + fm10k_mac_pf, + fm10k_mac_vf, + fm10k_num_macs +}; + +struct fm10k_mac_info { + struct fm10k_mac_ops ops; + enum fm10k_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u16 default_vid; + u16 max_msix_vectors; + u16 max_queues; + bool vlan_override; + bool get_host_state; + bool tx_ready; + u32 dglort_map; + u8 itr_scale; +}; + +struct fm10k_swapi_table_info { + u32 used; + u32 avail; +}; + +struct fm10k_swapi_info { + u32 status; + struct fm10k_swapi_table_info mac; + struct fm10k_swapi_table_info nexthop; + struct fm10k_swapi_table_info ffu; +}; + +enum fm10k_xcast_modes { + FM10K_XCAST_MODE_ALLMULTI = 0, + FM10K_XCAST_MODE_MULTI = 1, + FM10K_XCAST_MODE_PROMISC = 2, + FM10K_XCAST_MODE_NONE = 3, + FM10K_XCAST_MODE_DISABLE = 4 +}; + +enum fm10k_timestamp_modes { + FM10K_TIMESTAMP_MODE_NONE = 0, + FM10K_TIMESTAMP_MODE_PEP_TO_PEP = 1, + FM10K_TIMESTAMP_MODE_PEP_TO_ANY = 2, +}; + +#define FM10K_VF_TC_MAX 100000 /* 100,000 Mb/s aka 100Gb/s */ +#define FM10K_VF_TC_MIN 1 /* 1 Mb/s is the slowest rate */ + +struct fm10k_vf_info { + /* mbx must be first field in struct unless all default IOV message + * handlers are redone as the assumption is that vf_info starts + * at the same offset as the mailbox + */ + struct fm10k_mbx_info mbx; /* PF side of VF mailbox */ + int rate; /* Tx BW cap as defined by OS */ + u16 glort; /* resource tag for this VF */ + u16 sw_vid; /* Switch API assigned VLAN */ + u16 pf_vid; /* PF assigned Default VLAN */ + u8 mac[ETH_ALEN]; /* PF Default MAC address */ + u8 vsi; /* VSI identifier */ + u8 vf_idx; /* which VF this is */ + u8 vf_flags; /* flags indicating what modes + * are supported for the port + */ +}; + +#define FM10K_VF_FLAG_ALLMULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_ALLMULTI)) +#define FM10K_VF_FLAG_MULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_MULTI)) +#define FM10K_VF_FLAG_PROMISC_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_PROMISC)) +#define FM10K_VF_FLAG_NONE_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_NONE)) +#define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF) +#define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4) +#define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode)) +#define FM10K_VF_FLAG_SET_MODE_NONE \ + FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_NONE) +#define FM10K_VF_FLAG_MULTI_ENABLED \ + (FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_ALLMULTI) | \ + FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_MULTI) | \ + FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_PROMISC)) + +struct fm10k_iov_ops { + /* IOV related bring-up and tear-down */ + s32 (*assign_resources)(struct fm10k_hw *, u16, u16); + s32 (*configure_tc)(struct fm10k_hw *, u16, int); + s32 (*assign_int_moderator)(struct fm10k_hw *, u16); + s32 (*assign_default_mac_vlan)(struct fm10k_hw *, + struct fm10k_vf_info *); + s32 (*reset_resources)(struct fm10k_hw *, + struct fm10k_vf_info *); + s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8); + void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *); + void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16); + void (*notify_offset)(struct fm10k_hw *, struct fm10k_vf_info*, u64); +}; + +struct fm10k_iov_info { + struct fm10k_iov_ops ops; + u16 total_vfs; + u16 num_vfs; + u16 num_pools; +}; + +struct fm10k_hw { + u32 *hw_addr; + u32 *sw_addr; + void *back; + struct fm10k_mac_info mac; + struct fm10k_bus_info bus; + struct fm10k_bus_info bus_caps; + struct fm10k_iov_info iov; + struct fm10k_mbx_info mbx; + struct fm10k_swapi_info swapi; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + u32 flags; +#define FM10K_HW_FLAG_CLOCK_OWNER BIT(0) +}; + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define FM10K_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define FM10K_REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Transmit Descriptor */ +struct fm10k_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 buflen; /* Length of data to be DMAed */ + __le16 vlan; /* VLAN_ID and VPRI to be inserted in FTAG */ + __le16 mss; /* MSS for segmentation offload */ + u8 hdrlen; /* Header size for segmentation offload */ + u8 flags; /* Status and offload request flags */ +}; + +/* Transmit Descriptor Cache Structure */ +struct fm10k_tx_desc_cache { + struct fm10k_tx_desc tx_desc[256]; +}; + +#define FM10K_TXD_FLAG_INT 0x01 +#define FM10K_TXD_FLAG_TIME 0x02 +#define FM10K_TXD_FLAG_CSUM 0x04 +#define FM10K_TXD_FLAG_FTAG 0x10 +#define FM10K_TXD_FLAG_RS 0x20 +#define FM10K_TXD_FLAG_LAST 0x40 +#define FM10K_TXD_FLAG_DONE 0x80 + + +/* These macros are meant to enable optimal placement of the RS and INT + * bits. It will point us to the last descriptor in the cache for either the + * start of the packet, or the end of the packet. If the index is actually + * at the start of the FIFO it will point to the offset for the last index + * in the FIFO to prevent an unnecessary write. + */ +#define FM10K_TXD_WB_FIFO_SIZE 4 + +/* Receive Descriptor - 32B */ +union fm10k_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + __le64 reserved; /* Empty space, RSS hash */ + __le64 timestamp; + } q; /* Read, Writeback, 64b quad-words */ + struct { + __le32 data; /* RSS and header data */ + __le32 rss; /* RSS Hash */ + __le32 staterr; + __le32 vlan_len; + __le32 glort; /* sglort/dglort */ + } d; /* Writeback, 32b double-words */ + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen, xC */ + __le16 rss_lower; + __le16 rss_upper; + __le16 status; /* status/error */ + __le16 csum_err; /* checksum or extended error value */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + __le16 dglort; + __le16 sglort; + } w; /* Writeback, 16b words */ +}; + +#define FM10K_RXD_RSSTYPE_MASK 0x000F +enum fm10k_rdesc_rss_type { + FM10K_RSSTYPE_NONE = 0x0, + FM10K_RSSTYPE_IPV4_TCP = 0x1, + FM10K_RSSTYPE_IPV4 = 0x2, + FM10K_RSSTYPE_IPV6_TCP = 0x3, + /* Reserved 0x4 */ + FM10K_RSSTYPE_IPV6 = 0x5, + /* Reserved 0x6 */ + FM10K_RSSTYPE_IPV4_UDP = 0x7, + FM10K_RSSTYPE_IPV6_UDP = 0x8 + /* Reserved 0x9 - 0xF */ +}; + + +#define FM10K_RXD_HDR_INFO_XC_MASK 0x0006 +enum fm10k_rxdesc_xc { + FM10K_XC_UNICAST = 0x0, + FM10K_XC_MULTICAST = 0x4, + FM10K_XC_BROADCAST = 0x6 +}; + + +#define FM10K_RXD_STATUS_DD 0x0001 /* Descriptor done */ +#define FM10K_RXD_STATUS_EOP 0x0002 /* End of packet */ +#define FM10K_RXD_STATUS_L4CS 0x0010 /* Indicates an L4 csum */ +#define FM10K_RXD_STATUS_L4CS2 0x0040 /* Inner header L4 csum */ +#define FM10K_RXD_STATUS_L4E2 0x0800 /* Inner header L4 csum err */ +#define FM10K_RXD_STATUS_IPE2 0x1000 /* Inner header IPv4 csum err */ +#define FM10K_RXD_STATUS_RXE 0x2000 /* Generic Rx error */ +#define FM10K_RXD_STATUS_L4E 0x4000 /* L4 csum error */ +#define FM10K_RXD_STATUS_IPE 0x8000 /* IPv4 csum error */ + +#define FM10K_RXD_ERR_SWITCH_ERROR 0x0001 /* Switch found bad packet */ +#define FM10K_RXD_ERR_NO_DESCRIPTOR 0x0002 /* No descriptor available */ +#define FM10K_RXD_ERR_PP_ERROR 0x0004 /* RAM error during processing */ +#define FM10K_RXD_ERR_SWITCH_READY 0x0008 /* Link transition mid-packet */ +#define FM10K_RXD_ERR_TOO_BIG 0x0010 /* Pkt too big for single buf */ + + +struct fm10k_ftag { + __be16 swpri_type_user; + __be16 vlan; + __be16 sglort; + __be16 dglort; +}; + +#endif /* _FM10K_TYPE_H */ diff --git a/drivers/net/fm10k/base/fm10k_vf.c b/drivers/net/fm10k/base/fm10k_vf.c new file mode 100644 index 00000000..efbdbd1e --- /dev/null +++ b/drivers/net/fm10k/base/fm10k_vf.c @@ -0,0 +1,673 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "fm10k_vf.h" + +/** + * fm10k_stop_hw_vf - Stop Tx/Rx units + * @hw: pointer to hardware structure + * + **/ +STATIC s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) +{ + u8 *perm_addr = hw->mac.perm_addr; + u32 bal = 0, bah = 0, tdlen; + s32 err; + u16 i; + + DEBUGFUNC("fm10k_stop_hw_vf"); + + /* we need to disable the queues before taking further steps */ + err = fm10k_stop_hw_generic(hw); + if (err) + return err; + + /* If permanent address is set then we need to restore it */ + if (FM10K_IS_VALID_ETHER_ADDR(perm_addr)) { + bal = (((u32)perm_addr[3]) << 24) | + (((u32)perm_addr[4]) << 16) | + (((u32)perm_addr[5]) << 8); + bah = (((u32)0xFF) << 24) | + (((u32)perm_addr[0]) << 16) | + (((u32)perm_addr[1]) << 8) | + ((u32)perm_addr[2]); + } + + /* restore default itr_scale for next VF initialization */ + tdlen = hw->mac.itr_scale << FM10K_TDLEN_ITR_SCALE_SHIFT; + + /* The queues have already been disabled so we just need to + * update their base address registers + */ + for (i = 0; i < hw->mac.max_queues; i++) { + FM10K_WRITE_REG(hw, FM10K_TDBAL(i), bal); + FM10K_WRITE_REG(hw, FM10K_TDBAH(i), bah); + FM10K_WRITE_REG(hw, FM10K_RDBAL(i), bal); + FM10K_WRITE_REG(hw, FM10K_RDBAH(i), bah); + /* Restore ITR scale in software-defined mechanism in TDLEN + * for next VF initialization. See definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more details on the use of + * TDLEN here. + */ + FM10K_WRITE_REG(hw, FM10K_TDLEN(i), tdlen); + } + + return FM10K_SUCCESS; +} + +/** + * fm10k_reset_hw_vf - VF hardware reset + * @hw: pointer to hardware structure + * + * This function should return the hardware to a state similar to the + * one it is in after just being initialized. + **/ +STATIC s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) +{ + s32 err; + + DEBUGFUNC("fm10k_reset_hw_vf"); + + /* shut down queues we own and reset DMA configuration */ + err = fm10k_stop_hw_vf(hw); + if (err) + return err; + + /* Inititate VF reset */ + FM10K_WRITE_REG(hw, FM10K_VFCTRL, FM10K_VFCTRL_RST); + + /* Flush write and allow 100us for reset to complete */ + FM10K_WRITE_FLUSH(hw); + usec_delay(FM10K_RESET_TIMEOUT); + + /* Clear reset bit and verify it was cleared */ + FM10K_WRITE_REG(hw, FM10K_VFCTRL, 0); + if (FM10K_READ_REG(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST) + err = FM10K_ERR_RESET_FAILED; + + return err; +} + +/** + * fm10k_init_hw_vf - VF hardware initialization + * @hw: pointer to hardware structure + * + **/ +STATIC s32 fm10k_init_hw_vf(struct fm10k_hw *hw) +{ + u32 tqdloc, tqdloc0 = ~FM10K_READ_REG(hw, FM10K_TQDLOC(0)); + s32 err; + u16 i; + + DEBUGFUNC("fm10k_init_hw_vf"); + + /* verify we have at least 1 queue */ + if (!~FM10K_READ_REG(hw, FM10K_TXQCTL(0)) || + !~FM10K_READ_REG(hw, FM10K_RXQCTL(0))) { + err = FM10K_ERR_NO_RESOURCES; + goto reset_max_queues; + } + + /* determine how many queues we have */ + for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) { + /* verify the Descriptor cache offsets are increasing */ + tqdloc = ~FM10K_READ_REG(hw, FM10K_TQDLOC(i)); + if (!tqdloc || (tqdloc == tqdloc0)) + break; + + /* check to verify the PF doesn't own any of our queues */ + if (!~FM10K_READ_REG(hw, FM10K_TXQCTL(i)) || + !~FM10K_READ_REG(hw, FM10K_RXQCTL(i))) + break; + } + + /* shut down queues we own and reset DMA configuration */ + err = fm10k_disable_queues_generic(hw, i); + if (err) + goto reset_max_queues; + + /* record maximum queue count */ + hw->mac.max_queues = i; + + /* fetch default VLAN and ITR scale */ + hw->mac.default_vid = (FM10K_READ_REG(hw, FM10K_TXQCTL(0)) & + FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT; + /* Read the ITR scale from TDLEN. See the definition of + * FM10K_TDLEN_ITR_SCALE_SHIFT for more information about how TDLEN is + * used here. + */ + hw->mac.itr_scale = (FM10K_READ_REG(hw, FM10K_TDLEN(0)) & + FM10K_TDLEN_ITR_SCALE_MASK) >> + FM10K_TDLEN_ITR_SCALE_SHIFT; + + return FM10K_SUCCESS; + +reset_max_queues: + hw->mac.max_queues = 0; + + return err; +} + +#ifndef NO_IS_SLOT_APPROPRIATE_CHECK +/** + * fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU + * @hw: pointer to hardware structure + * + * Looks at the PCIe bus info to confirm whether or not this slot can support + * the necessary bandwidth for this device. Since the VF has no control over + * the "slot" it is in, always indicate that the slot is appropriate. + **/ +STATIC bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + DEBUGFUNC("fm10k_is_slot_appropriate_vf"); + + return TRUE; +} + +#endif +/* This structure defines the attibutes to be parsed below */ +const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = { + FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN), + FM10K_TLV_ATTR_BOOL(FM10K_MAC_VLAN_MSG_SET), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MAC), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_DEFAULT_MAC), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MULTICAST), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_update_vlan_vf - Update status of VLAN ID in VLAN filter table + * @hw: pointer to hardware structure + * @vid: VLAN ID to add to table + * @vsi: Reserved, should always be 0 + * @set: Indicates if this is a set or clear operation + * + * This function adds or removes the corresponding VLAN ID from the VLAN + * filter table for this VF. + **/ +STATIC s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[4]; + + /* verify the index is not set */ + if (vsi) + return FM10K_ERR_PARAM; + + /* verify upper 4 bits of vid and length are 0 */ + if ((vid << 16 | vid) >> 28) + return FM10K_ERR_PARAM; + + /* encode set bit into the VLAN ID */ + if (!set) + vid |= FM10K_VLAN_CLEAR; + + /* generate VLAN request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_u32(msg, FM10K_MAC_VLAN_MSG_VLAN, vid); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_msg_mac_vlan_vf - Read device MAC address from mailbox message + * @hw: pointer to the HW structure + * @results: Attributes for message + * @mbx: unused mailbox data + * + * This function should determine the MAC address for the VF + **/ +s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u8 perm_addr[ETH_ALEN]; + u16 vid; + s32 err; + + UNREFERENCED_1PARAMETER(mbx); + DEBUGFUNC("fm10k_msg_mac_vlan_vf"); + + /* record MAC address requested */ + err = fm10k_tlv_attr_get_mac_vlan( + results[FM10K_MAC_VLAN_MSG_DEFAULT_MAC], + perm_addr, &vid); + if (err) + return err; + + memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN); + hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1); + hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR); + + return FM10K_SUCCESS; +} + +/** + * fm10k_read_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + * + * This function should determine the MAC address for the VF + **/ +STATIC s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw) +{ + u8 perm_addr[ETH_ALEN]; + u32 base_addr; + + DEBUGFUNC("fm10k_read_mac_addr_vf"); + + base_addr = FM10K_READ_REG(hw, FM10K_TDBAL(0)); + + /* last byte should be 0 */ + if (base_addr << 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[3] = (u8)(base_addr >> 24); + perm_addr[4] = (u8)(base_addr >> 16); + perm_addr[5] = (u8)(base_addr >> 8); + + base_addr = FM10K_READ_REG(hw, FM10K_TDBAH(0)); + + /* first byte should be all 1's */ + if ((~base_addr) >> 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[0] = (u8)(base_addr >> 16); + perm_addr[1] = (u8)(base_addr >> 8); + perm_addr[2] = (u8)(base_addr); + + memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN); + memcpy(hw->mac.addr, perm_addr, ETH_ALEN); + + return FM10K_SUCCESS; +} + +/** + * fm10k_update_uc_addr_vf - Update device unicast addresses + * @hw: pointer to the HW structure + * @glort: unused + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * @flags: flags field to indicate add and secure - unused + * + * This function is used to add or remove unicast MAC addresses for + * the VF. + **/ +STATIC s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add, u8 flags) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[7]; + + DEBUGFUNC("fm10k_update_uc_addr_vf"); + + UNREFERENCED_2PARAMETER(glort, flags); + + /* verify VLAN ID is valid */ + if (vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* verify MAC address is valid */ + if (!FM10K_IS_VALID_ETHER_ADDR(mac)) + return FM10K_ERR_PARAM; + + /* verify we are not locked down on the MAC address */ + if (FM10K_IS_VALID_ETHER_ADDR(hw->mac.perm_addr) && + memcmp(hw->mac.perm_addr, mac, ETH_ALEN)) + return FM10K_ERR_PARAM; + + /* add bit to notify us if this is a set or clear operation */ + if (!add) + vid |= FM10K_VLAN_CLEAR; + + /* generate VLAN request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MAC, mac, vid); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_mc_addr_vf - Update device multicast addresses + * @hw: pointer to the HW structure + * @glort: unused + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * + * This function is used to add or remove multicast MAC addresses for + * the VF. + **/ +STATIC s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[7]; + + DEBUGFUNC("fm10k_update_uc_addr_vf"); + + UNREFERENCED_1PARAMETER(glort); + + /* verify VLAN ID is valid */ + if (vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* verify multicast address is valid */ + if (!FM10K_IS_MULTICAST_ETHER_ADDR(mac)) + return FM10K_ERR_PARAM; + + /* add bit to notify us if this is a set or clear operation */ + if (!add) + vid |= FM10K_VLAN_CLEAR; + + /* generate VLAN request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MULTICAST, + mac, vid); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_int_moderator_vf - Request update of interrupt moderator list + * @hw: pointer to hardware structure + * + * This function will issue a request to the PF to rescan our MSI-X table + * and to update the interrupt moderator linked list. + **/ +STATIC void fm10k_update_int_moderator_vf(struct fm10k_hw *hw) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[1]; + + /* generate MSI-X request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MSIX); + + /* load onto outgoing mailbox */ + mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/* This structure defines the attibutes to be parsed below */ +const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = { + FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_DISABLE), + FM10K_TLV_ATTR_U8(FM10K_LPORT_STATE_MSG_XCAST_MODE), + FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_READY), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_lport_state_vf - Message handler for lport_state message from PF + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler is meant to capture the indication from the PF that we + * are ready to bring up the interface. + **/ +s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + UNREFERENCED_1PARAMETER(mbx); + DEBUGFUNC("fm10k_msg_lport_state_vf"); + + hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ? + FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO; + + return FM10K_SUCCESS; +} + +/** + * fm10k_update_lport_state_vf - Update device state in lower device + * @hw: pointer to the HW structure + * @glort: unused + * @count: number of logical ports to enable - unused (always 1) + * @enable: boolean value indicating if this is an enable or disable request + * + * Notify the lower device of a state change. If the lower device is + * enabled we can add filters, if it is disabled all filters for this + * logical port are flushed. + **/ +STATIC s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort, + u16 count, bool enable) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[2]; + + UNREFERENCED_2PARAMETER(glort, count); + DEBUGFUNC("fm10k_update_lport_state_vf"); + + /* reset glort mask 0 as we have to wait to be enabled */ + hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; + + /* generate port state request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + if (!enable) + fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_DISABLE); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_xcast_mode_vf - Request update of multicast mode + * @hw: pointer to hardware structure + * @glort: unused + * @mode: integer value indicating mode being requested + * + * This function will attempt to request a higher mode for the port + * so that it can enable either multicast, multicast promiscuous, or + * promiscuous mode of operation. + **/ +STATIC s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[3]; + + UNREFERENCED_1PARAMETER(glort); + DEBUGFUNC("fm10k_update_xcast_mode_vf"); + + if (mode > FM10K_XCAST_MODE_NONE) + return FM10K_ERR_PARAM; + + /* generate message requesting to change xcast mode */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + fm10k_tlv_attr_put_u8(msg, FM10K_LPORT_STATE_MSG_XCAST_MODE, mode); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = { + FM10K_TLV_ATTR_U64(FM10K_1588_MSG_CLK_OFFSET), + FM10K_TLV_ATTR_LAST +}; + +/* currently there is no shared 1588 message handler */ + +/** + * fm10k_update_hw_stats_vf - Updates hardware related statistics of VF + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * + * This function collects and aggregates per queue hardware statistics. + **/ +STATIC void fm10k_update_hw_stats_vf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + DEBUGFUNC("fm10k_update_hw_stats_vf"); + + fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); +} + +/** + * fm10k_rebind_hw_stats_vf - Resets base for hardware statistics of VF + * @hw: pointer to hardware structure + * @stats: pointer to the stats structure to update + * + * This function resets the base for queue hardware statistics. + **/ +STATIC void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + DEBUGFUNC("fm10k_rebind_hw_stats_vf"); + + /* Unbind Queue Statistics */ + fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); + + /* Reinitialize bases for all stats */ + fm10k_update_hw_stats_vf(hw, stats); +} + +/** + * fm10k_configure_dglort_map_vf - Configures GLORT entry and queues + * @hw: pointer to hardware structure + * @dglort: pointer to dglort configuration structure + * + * Reads the configuration structure contained in dglort_cfg and uses + * that information to then populate a DGLORTMAP/DEC entry and the queues + * to which it has been assigned. + **/ +STATIC s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw, + struct fm10k_dglort_cfg *dglort) +{ + UNREFERENCED_1PARAMETER(hw); + DEBUGFUNC("fm10k_configure_dglort_map_vf"); + + /* verify the dglort pointer */ + if (!dglort) + return FM10K_ERR_PARAM; + + /* stub for now until we determine correct message for this */ + + return FM10K_SUCCESS; +} + +/** + * fm10k_adjust_systime_vf - Adjust systime frequency + * @hw: pointer to hardware structure + * @ppb: adjustment rate in parts per billion + * + * This function takes an adjustment rate in parts per billion and will + * verify that this value is 0 as the VF cannot support adjusting the + * systime clock. + * + * If the ppb value is non-zero the return is ERR_PARAM else success + **/ +STATIC s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb) +{ + UNREFERENCED_1PARAMETER(hw); + DEBUGFUNC("fm10k_adjust_systime_vf"); + + /* The VF cannot adjust the clock frequency, however it should + * already have a syntonic clock with whichever host interface is + * running as the master for the host interface clock domain so + * there should be not frequency adjustment necessary. + */ + return ppb ? FM10K_ERR_PARAM : FM10K_SUCCESS; +} + +/** + * fm10k_read_systime_vf - Reads value of systime registers + * @hw: pointer to the hardware structure + * + * Function reads the content of 2 registers, combined to represent a 64 bit + * value measured in nanoseconds. In order to guarantee the value is accurate + * we check the 32 most significant bits both before and after reading the + * 32 least significant bits to verify they didn't change as we were reading + * the registers. + **/ +static u64 fm10k_read_systime_vf(struct fm10k_hw *hw) +{ + u32 systime_l, systime_h, systime_tmp; + + systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1); + + do { + systime_tmp = systime_h; + systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME); + systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1); + } while (systime_tmp != systime_h); + + return ((u64)systime_h << 32) | systime_l; +} + +static const struct fm10k_msg_data fm10k_msg_data_vf[] = { + FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf), + FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), +}; + +/** + * fm10k_init_ops_vf - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for VF. + * Does not touch the hardware. + **/ +s32 fm10k_init_ops_vf(struct fm10k_hw *hw) +{ + struct fm10k_mac_info *mac = &hw->mac; + + DEBUGFUNC("fm10k_init_ops_vf"); + + fm10k_init_ops_generic(hw); + + mac->ops.reset_hw = &fm10k_reset_hw_vf; + mac->ops.init_hw = &fm10k_init_hw_vf; + mac->ops.start_hw = &fm10k_start_hw_generic; + mac->ops.stop_hw = &fm10k_stop_hw_vf; +#ifndef NO_IS_SLOT_APPROPRIATE_CHECK + mac->ops.is_slot_appropriate = &fm10k_is_slot_appropriate_vf; +#endif + mac->ops.update_vlan = &fm10k_update_vlan_vf; + mac->ops.read_mac_addr = &fm10k_read_mac_addr_vf; + mac->ops.update_uc_addr = &fm10k_update_uc_addr_vf; + mac->ops.update_mc_addr = &fm10k_update_mc_addr_vf; + mac->ops.update_xcast_mode = &fm10k_update_xcast_mode_vf; + mac->ops.update_int_moderator = &fm10k_update_int_moderator_vf; + mac->ops.update_lport_state = &fm10k_update_lport_state_vf; + mac->ops.update_hw_stats = &fm10k_update_hw_stats_vf; + mac->ops.rebind_hw_stats = &fm10k_rebind_hw_stats_vf; + mac->ops.configure_dglort_map = &fm10k_configure_dglort_map_vf; + mac->ops.get_host_state = &fm10k_get_host_state_generic; + mac->ops.adjust_systime = &fm10k_adjust_systime_vf; + mac->ops.read_systime = &fm10k_read_systime_vf; + + mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw); + + return fm10k_pfvf_mbx_init(hw, &hw->mbx, fm10k_msg_data_vf, 0); +} diff --git a/drivers/net/fm10k/base/fm10k_vf.h b/drivers/net/fm10k/base/fm10k_vf.h new file mode 100644 index 00000000..116c56fc --- /dev/null +++ b/drivers/net/fm10k/base/fm10k_vf.h @@ -0,0 +1,92 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _FM10K_VF_H_ +#define _FM10K_VF_H_ + +#include "fm10k_type.h" +#include "fm10k_common.h" + +enum fm10k_vf_tlv_msg_id { + FM10K_VF_MSG_ID_TEST = 0, /* msg ID reserved for testing */ + FM10K_VF_MSG_ID_MSIX, + FM10K_VF_MSG_ID_MAC_VLAN, + FM10K_VF_MSG_ID_LPORT_STATE, + FM10K_VF_MSG_ID_1588, + FM10K_VF_MSG_ID_MAX, +}; + +enum fm10k_tlv_mac_vlan_attr_id { + FM10K_MAC_VLAN_MSG_VLAN, + FM10K_MAC_VLAN_MSG_SET, + FM10K_MAC_VLAN_MSG_MAC, + FM10K_MAC_VLAN_MSG_DEFAULT_MAC, + FM10K_MAC_VLAN_MSG_MULTICAST, + FM10K_MAC_VLAN_MSG_ID_MAX +}; + +enum fm10k_tlv_lport_state_attr_id { + FM10K_LPORT_STATE_MSG_DISABLE, + FM10K_LPORT_STATE_MSG_XCAST_MODE, + FM10K_LPORT_STATE_MSG_READY, + FM10K_LPORT_STATE_MSG_MAX +}; + +enum fm10k_tlv_1588_attr_id { + FM10K_1588_MSG_TIMESTAMP = 0, /* deprecated */ + FM10K_1588_MSG_CLK_OFFSET, + FM10K_1588_MSG_MAX +}; + +#define FM10K_VF_MSG_MSIX_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func) + +s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[]; +#define FM10K_VF_MSG_MAC_VLAN_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MAC_VLAN, \ + fm10k_mac_vlan_msg_attr, func) + +s32 fm10k_msg_lport_state_vf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[]; +#define FM10K_VF_MSG_LPORT_STATE_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \ + fm10k_lport_state_msg_attr, func) + +extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[]; +#define FM10K_VF_MSG_1588_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func) + +s32 fm10k_init_ops_vf(struct fm10k_hw *hw); +#endif /* _FM10K_VF_H */ diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h new file mode 100644 index 00000000..05aa1a25 --- /dev/null +++ b/drivers/net/fm10k/fm10k.h @@ -0,0 +1,370 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _FM10K_H_ +#define _FM10K_H_ + +#include <stdint.h> +#include <rte_mbuf.h> +#include <rte_mempool.h> +#include <rte_malloc.h> +#include <rte_spinlock.h> +#include "fm10k_logs.h" +#include "base/fm10k_type.h" + +/* descriptor ring base addresses must be aligned to the following */ +#define FM10K_ALIGN_RX_DESC 128 +#define FM10K_ALIGN_TX_DESC 128 + +/* The maximum packet size that FM10K supports */ +#define FM10K_MAX_PKT_SIZE (15 * 1024) + +/* Minimum size of RX buffer FM10K supported */ +#define FM10K_MIN_RX_BUF_SIZE 256 + +/* The maximum of SRIOV VFs per port supported */ +#define FM10K_MAX_VF_NUM 64 + +/* number of descriptors must be a multiple of the following */ +#define FM10K_MULT_RX_DESC FM10K_REQ_RX_DESCRIPTOR_MULTIPLE +#define FM10K_MULT_TX_DESC FM10K_REQ_TX_DESCRIPTOR_MULTIPLE + +/* maximum size of descriptor rings */ +#define FM10K_MAX_RX_RING_SZ (512 * 1024) +#define FM10K_MAX_TX_RING_SZ (512 * 1024) + +/* minimum and maximum number of descriptors in a ring */ +#define FM10K_MIN_RX_DESC 32 +#define FM10K_MIN_TX_DESC 32 +#define FM10K_MAX_RX_DESC (FM10K_MAX_RX_RING_SZ / sizeof(union fm10k_rx_desc)) +#define FM10K_MAX_TX_DESC (FM10K_MAX_TX_RING_SZ / sizeof(struct fm10k_tx_desc)) + +/* + * byte aligment for HW RX data buffer + * Datasheet requires RX buffer addresses shall either be 512-byte aligned or + * be 8-byte aligned but without crossing host memory pages (4KB alignment + * boundaries). Satisfy first option. + */ +#define FM10K_RX_DATABUF_ALIGN 512 + +/* + * threshold default, min, max, and divisor constraints + * the configured values must satisfy the following: + * MIN <= value <= MAX + * DIV % value == 0 + */ +#define FM10K_RX_FREE_THRESH_DEFAULT(rxq) 32 +#define FM10K_RX_FREE_THRESH_MIN(rxq) 1 +#define FM10K_RX_FREE_THRESH_MAX(rxq) ((rxq)->nb_desc - 1) +#define FM10K_RX_FREE_THRESH_DIV(rxq) ((rxq)->nb_desc) + +#define FM10K_TX_FREE_THRESH_DEFAULT(txq) 32 +#define FM10K_TX_FREE_THRESH_MIN(txq) 1 +#define FM10K_TX_FREE_THRESH_MAX(txq) ((txq)->nb_desc - 3) +#define FM10K_TX_FREE_THRESH_DIV(txq) 0 + +#define FM10K_DEFAULT_RX_PTHRESH 8 +#define FM10K_DEFAULT_RX_HTHRESH 8 +#define FM10K_DEFAULT_RX_WTHRESH 0 + +#define FM10K_DEFAULT_TX_PTHRESH 32 +#define FM10K_DEFAULT_TX_HTHRESH 0 +#define FM10K_DEFAULT_TX_WTHRESH 0 + +#define FM10K_TX_RS_THRESH_DEFAULT(txq) 32 +#define FM10K_TX_RS_THRESH_MIN(txq) 1 +#define FM10K_TX_RS_THRESH_MAX(txq) \ + RTE_MIN(((txq)->nb_desc - 2), (txq)->free_thresh) +#define FM10K_TX_RS_THRESH_DIV(txq) ((txq)->nb_desc) + +#define FM10K_VLAN_TAG_SIZE 4 + +/* Maximum number of MAC addresses per PF/VF */ +#define FM10K_MAX_MACADDR_NUM 64 + +#define FM10K_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t)) +#define FM10K_VFTA_SIZE (4096 / FM10K_UINT32_BIT_SIZE) + +/* vlan_id is a 12 bit number. + * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. + * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element. + * The higher 7 bit val specifies VFTA array index. + */ +#define FM10K_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F)) +#define FM10K_VFTA_IDX(vlan_id) ((vlan_id) >> 5) + +#define RTE_FM10K_RXQ_REARM_THRESH 32 +#define RTE_FM10K_VPMD_TX_BURST 32 +#define RTE_FM10K_MAX_RX_BURST RTE_FM10K_RXQ_REARM_THRESH +#define RTE_FM10K_TX_MAX_FREE_BUF_SZ 64 +#define RTE_FM10K_DESCS_PER_LOOP 4 + +#define FM10K_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define FM10K_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + +#define FM10K_SIMPLE_TX_FLAG ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ + ETH_TXQ_FLAGS_NOOFFLOADS) + +struct fm10k_macvlan_filter_info { + uint16_t vlan_num; /* Total VLAN number */ + uint16_t mac_num; /* Total mac number */ + uint16_t nb_queue_pools; /* Active queue pools number */ + /* VMDQ ID for each MAC address */ + uint8_t mac_vmdq_id[FM10K_MAX_MACADDR_NUM]; + uint32_t vfta[FM10K_VFTA_SIZE]; /* VLAN bitmap */ +}; + +struct fm10k_dev_info { + volatile uint32_t enable; + volatile uint32_t glort; + /* Protect the mailbox to avoid race condition */ + rte_spinlock_t mbx_lock; + struct fm10k_macvlan_filter_info macvlan; + /* Flag to indicate if RX vector conditions satisfied */ + bool rx_vec_allowed; +}; + +/* + * Structure to store private data for each driver instance. + */ +struct fm10k_adapter { + struct fm10k_hw hw; + struct fm10k_hw_stats stats; + struct fm10k_dev_info info; +}; + +#define FM10K_DEV_PRIVATE_TO_HW(adapter) \ + (&((struct fm10k_adapter *)adapter)->hw) + +#define FM10K_DEV_PRIVATE_TO_STATS(adapter) \ + (&((struct fm10k_adapter *)adapter)->stats) + +#define FM10K_DEV_PRIVATE_TO_INFO(adapter) \ + (&((struct fm10k_adapter *)adapter)->info) + +#define FM10K_DEV_PRIVATE_TO_MBXLOCK(adapter) \ + (&(((struct fm10k_adapter *)adapter)->info.mbx_lock)) + +#define FM10K_DEV_PRIVATE_TO_MACVLAN(adapter) \ + (&(((struct fm10k_adapter *)adapter)->info.macvlan)) + +struct fm10k_rx_queue { + struct rte_mempool *mp; + struct rte_mbuf **sw_ring; + volatile union fm10k_rx_desc *hw_ring; + struct rte_mbuf *pkt_first_seg; /* First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /* Last segment of current packet. */ + uint64_t hw_ring_phys_addr; + uint64_t mbuf_initializer; /* value to init mbufs */ + /* need to alloc dummy mbuf, for wraparound when scanning hw ring */ + struct rte_mbuf fake_mbuf; + uint16_t next_dd; + uint16_t next_alloc; + uint16_t next_trigger; + uint16_t alloc_thresh; + volatile uint32_t *tail_ptr; + uint16_t nb_desc; + /* Number of faked desc added at the tail for Vector RX function */ + uint16_t nb_fake_desc; + uint16_t queue_id; + /* Below 2 fields only valid in case vPMD is applied. */ + uint16_t rxrearm_nb; /* number of remaining to be re-armed */ + uint16_t rxrearm_start; /* the idx we start the re-arming from */ + uint16_t rx_using_sse; /* indicates that vector RX is in use */ + uint8_t port_id; + uint8_t drop_en; + uint8_t rx_deferred_start; /* don't start this queue in dev start. */ + uint16_t rx_ftag_en; /* indicates FTAG RX supported */ +}; + +/* + * a FIFO is used to track which descriptors have their RS bit set for Tx + * queues which are configured to allow multiple descriptors per packet + */ +struct fifo { + uint16_t *list; + uint16_t *head; + uint16_t *tail; + uint16_t *endp; +}; + +struct fm10k_txq_ops; + +struct fm10k_tx_queue { + struct rte_mbuf **sw_ring; + struct fm10k_tx_desc *hw_ring; + uint64_t hw_ring_phys_addr; + struct fifo rs_tracker; + const struct fm10k_txq_ops *ops; /* txq ops */ + uint16_t last_free; + uint16_t next_free; + uint16_t nb_free; + uint16_t nb_used; + uint16_t free_thresh; + uint16_t rs_thresh; + /* Below 2 fields only valid in case vPMD is applied. */ + uint16_t next_rs; /* Next pos to set RS flag */ + uint16_t next_dd; /* Next pos to check DD flag */ + volatile uint32_t *tail_ptr; + uint32_t txq_flags; /* Holds flags for this TXq */ + uint16_t nb_desc; + uint8_t port_id; + uint8_t tx_deferred_start; /** don't start this queue in dev start. */ + uint16_t queue_id; + uint16_t tx_ftag_en; /* indicates FTAG TX supported */ +}; + +struct fm10k_txq_ops { + void (*reset)(struct fm10k_tx_queue *txq); +}; + +#define MBUF_DMA_ADDR(mb) \ + ((uint64_t) ((mb)->buf_physaddr + (mb)->data_off)) + +/* enforce 512B alignment on default Rx DMA addresses */ +#define MBUF_DMA_ADDR_DEFAULT(mb) \ + ((uint64_t) RTE_ALIGN(((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM),\ + FM10K_RX_DATABUF_ALIGN)) + +static inline void fifo_reset(struct fifo *fifo, uint32_t len) +{ + fifo->head = fifo->tail = fifo->list; + fifo->endp = fifo->list + len; +} + +static inline void fifo_insert(struct fifo *fifo, uint16_t val) +{ + *fifo->head = val; + if (++fifo->head == fifo->endp) + fifo->head = fifo->list; +} + +/* do not worry about list being empty since we only check it once we know + * we have used enough descriptors to set the RS bit at least once */ +static inline uint16_t fifo_peek(struct fifo *fifo) +{ + return *fifo->tail; +} + +static inline uint16_t fifo_remove(struct fifo *fifo) +{ + uint16_t val; + val = *fifo->tail; + if (++fifo->tail == fifo->endp) + fifo->tail = fifo->list; + return val; +} + +static inline void +fm10k_pktmbuf_reset(struct rte_mbuf *mb, uint8_t in_port) +{ + rte_mbuf_refcnt_set(mb, 1); + mb->next = NULL; + mb->nb_segs = 1; + + /* enforce 512B alignment on default Rx virtual addresses */ + mb->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb->buf_addr + + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN) + - (char *)mb->buf_addr); + mb->port = in_port; +} + +/* + * Verify Rx packet buffer alignment is valid. + * + * Hardware requires specific alignment for Rx packet buffers. At + * least one of the following two conditions must be satisfied. + * 1. Address is 512B aligned + * 2. Address is 8B aligned and buffer does not cross 4K boundary. + * + * Return 1 if buffer alignment satisfies at least one condition, + * otherwise return 0. + * + * Note: Alignment is checked by the driver when the Rx queue is reset. It + * is assumed that if an entire descriptor ring can be filled with + * buffers containing valid alignment, then all buffers in that mempool + * have valid address alignment. It is the responsibility of the user + * to ensure all buffers have valid alignment, as it is the user who + * creates the mempool. + * Note: It is assumed the buffer needs only to store a maximum size Ethernet + * frame. + */ +static inline int +fm10k_addr_alignment_valid(struct rte_mbuf *mb) +{ + uint64_t addr = MBUF_DMA_ADDR_DEFAULT(mb); + uint64_t boundary1, boundary2; + + /* 512B aligned? */ + if (RTE_ALIGN(addr, FM10K_RX_DATABUF_ALIGN) == addr) + return 1; + + /* 8B aligned, and max Ethernet frame would not cross a 4KB boundary? */ + if (RTE_ALIGN(addr, 8) == addr) { + boundary1 = RTE_ALIGN_FLOOR(addr, 4096); + boundary2 = RTE_ALIGN_FLOOR(addr + ETHER_MAX_VLAN_FRAME_LEN, + 4096); + if (boundary1 == boundary2) + return 1; + } + + PMD_INIT_LOG(ERR, "Error: Invalid buffer alignment!"); + + return 0; +} + +/* Rx and Tx prototypes */ +uint16_t fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t fm10k_recv_scattered_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + +int +fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); + +uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +int fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq); +int fm10k_rx_vec_condition_check(struct rte_eth_dev *); +void fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq); +uint16_t fm10k_recv_pkts_vec(void *, struct rte_mbuf **, uint16_t); +uint16_t fm10k_recv_scattered_pkts_vec(void *, struct rte_mbuf **, + uint16_t); +uint16_t fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +void fm10k_txq_vec_setup(struct fm10k_tx_queue *txq); +int fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq); + +#endif diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c new file mode 100644 index 00000000..c2d377f1 --- /dev/null +++ b/drivers/net/fm10k/fm10k_ethdev.c @@ -0,0 +1,3058 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rte_ethdev.h> +#include <rte_malloc.h> +#include <rte_memzone.h> +#include <rte_string_fns.h> +#include <rte_dev.h> +#include <rte_spinlock.h> +#include <rte_kvargs.h> + +#include "fm10k.h" +#include "base/fm10k_api.h" + +/* Default delay to acquire mailbox lock */ +#define FM10K_MBXLOCK_DELAY_US 20 +#define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL + +#define MAIN_VSI_POOL_NUMBER 0 + +/* Max try times to acquire switch status */ +#define MAX_QUERY_SWITCH_STATE_TIMES 10 +/* Wait interval to get switch status */ +#define WAIT_SWITCH_MSG_US 100000 +/* Number of chars per uint32 type */ +#define CHARS_PER_UINT32 (sizeof(uint32_t)) +#define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1) + +/* default 1:1 map from queue ID to interrupt vector ID */ +#define Q2V(dev, queue_id) (dev->pci_dev->intr_handle.intr_vec[queue_id]) + +/* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */ +#define MAX_LPORT_NUM 128 +#define GLORT_FD_Q_BASE 0x40 +#define GLORT_PF_MASK 0xFFC0 +#define GLORT_FD_MASK GLORT_PF_MASK +#define GLORT_FD_INDEX GLORT_FD_Q_BASE + +static void fm10k_close_mbx_service(struct fm10k_hw *hw); +static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev); +static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev); +static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev); +static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev); +static inline int fm10k_glort_valid(struct fm10k_hw *hw); +static int +fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); +static void fm10k_MAC_filter_set(struct rte_eth_dev *dev, + const u8 *mac, bool add, uint32_t pool); +static void fm10k_tx_queue_release(void *queue); +static void fm10k_rx_queue_release(void *queue); +static void fm10k_set_rx_function(struct rte_eth_dev *dev); +static void fm10k_set_tx_function(struct rte_eth_dev *dev); +static int fm10k_check_ftag(struct rte_devargs *devargs); + +struct fm10k_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned offset; +}; + +struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = { + {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)}, + {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)}, + {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)}, + {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)}, + {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)}, + {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)}, + {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)}, + {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats, + nodesc_drop)}, +}; + +#define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \ + sizeof(fm10k_hw_stats_strings[0])) + +struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = { + {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)}, + {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)}, + {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)}, +}; + +#define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \ + sizeof(fm10k_hw_stats_rx_q_strings[0])) + +struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = { + {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)}, + {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)}, +}; + +#define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \ + sizeof(fm10k_hw_stats_tx_q_strings[0])) + +#define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \ + (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS)) +static int +fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); + +static void +fm10k_mbx_initlock(struct fm10k_hw *hw) +{ + rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)); +} + +static void +fm10k_mbx_lock(struct fm10k_hw *hw) +{ + while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back))) + rte_delay_us(FM10K_MBXLOCK_DELAY_US); +} + +static void +fm10k_mbx_unlock(struct fm10k_hw *hw) +{ + rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)); +} + +/* Stubs needed for linkage when vPMD is disabled */ +int __attribute__((weak)) +fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev) +{ + return -1; +} + +uint16_t __attribute__((weak)) +fm10k_recv_pkts_vec( + __rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +uint16_t __attribute__((weak)) +fm10k_recv_scattered_pkts_vec( + __rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +int __attribute__((weak)) +fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq) + +{ + return -1; +} + +void __attribute__((weak)) +fm10k_rx_queue_release_mbufs_vec( + __rte_unused struct fm10k_rx_queue *rxq) +{ + return; +} + +void __attribute__((weak)) +fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq) +{ + return; +} + +int __attribute__((weak)) +fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq) +{ + return -1; +} + +uint16_t __attribute__((weak)) +fm10k_xmit_pkts_vec(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +/* + * reset queue to initial state, allocate software buffers used when starting + * device. + * return 0 on success + * return -ENOMEM if buffers cannot be allocated + * return -EINVAL if buffers do not satisfy alignment condition + */ +static inline int +rx_queue_reset(struct fm10k_rx_queue *q) +{ + static const union fm10k_rx_desc zero = {{0} }; + uint64_t dma_addr; + int i, diag; + PMD_INIT_FUNC_TRACE(); + + diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc); + if (diag != 0) + return -ENOMEM; + + for (i = 0; i < q->nb_desc; ++i) { + fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id); + if (!fm10k_addr_alignment_valid(q->sw_ring[i])) { + rte_mempool_put_bulk(q->mp, (void **)q->sw_ring, + q->nb_desc); + return -EINVAL; + } + dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]); + q->hw_ring[i].q.pkt_addr = dma_addr; + q->hw_ring[i].q.hdr_addr = dma_addr; + } + + /* initialize extra software ring entries. Space for these extra + * entries is always allocated. + */ + memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf)); + for (i = 0; i < q->nb_fake_desc; ++i) { + q->sw_ring[q->nb_desc + i] = &q->fake_mbuf; + q->hw_ring[q->nb_desc + i] = zero; + } + + q->next_dd = 0; + q->next_alloc = 0; + q->next_trigger = q->alloc_thresh - 1; + FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1); + q->rxrearm_start = 0; + q->rxrearm_nb = 0; + + return 0; +} + +/* + * clean queue, descriptor rings, free software buffers used when stopping + * device. + */ +static inline void +rx_queue_clean(struct fm10k_rx_queue *q) +{ + union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} }; + uint32_t i; + PMD_INIT_FUNC_TRACE(); + + /* zero descriptor rings */ + for (i = 0; i < q->nb_desc; ++i) + q->hw_ring[i] = zero; + + /* zero faked descriptors */ + for (i = 0; i < q->nb_fake_desc; ++i) + q->hw_ring[q->nb_desc + i] = zero; + + /* vPMD driver has a different way of releasing mbufs. */ + if (q->rx_using_sse) { + fm10k_rx_queue_release_mbufs_vec(q); + return; + } + + /* free software buffers */ + for (i = 0; i < q->nb_desc; ++i) { + if (q->sw_ring[i]) { + rte_pktmbuf_free_seg(q->sw_ring[i]); + q->sw_ring[i] = NULL; + } + } +} + +/* + * free all queue memory used when releasing the queue (i.e. configure) + */ +static inline void +rx_queue_free(struct fm10k_rx_queue *q) +{ + PMD_INIT_FUNC_TRACE(); + if (q) { + PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q); + rx_queue_clean(q); + if (q->sw_ring) { + rte_free(q->sw_ring); + q->sw_ring = NULL; + } + rte_free(q); + q = NULL; + } +} + +/* + * disable RX queue, wait unitl HW finished necessary flush operation + */ +static inline int +rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum) +{ + uint32_t reg, i; + + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum)); + FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum), + reg & ~FM10K_RXQCTL_ENABLE); + + /* Wait 100us at most */ + for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) { + rte_delay_us(1); + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum)); + if (!(reg & FM10K_RXQCTL_ENABLE)) + break; + } + + if (i == FM10K_QUEUE_DISABLE_TIMEOUT) + return -1; + + return 0; +} + +/* + * reset queue to initial state, allocate software buffers used when starting + * device + */ +static inline void +tx_queue_reset(struct fm10k_tx_queue *q) +{ + PMD_INIT_FUNC_TRACE(); + q->last_free = 0; + q->next_free = 0; + q->nb_used = 0; + q->nb_free = q->nb_desc - 1; + fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh); + FM10K_PCI_REG_WRITE(q->tail_ptr, 0); +} + +/* + * clean queue, descriptor rings, free software buffers used when stopping + * device + */ +static inline void +tx_queue_clean(struct fm10k_tx_queue *q) +{ + struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0}; + uint32_t i; + PMD_INIT_FUNC_TRACE(); + + /* zero descriptor rings */ + for (i = 0; i < q->nb_desc; ++i) + q->hw_ring[i] = zero; + + /* free software buffers */ + for (i = 0; i < q->nb_desc; ++i) { + if (q->sw_ring[i]) { + rte_pktmbuf_free_seg(q->sw_ring[i]); + q->sw_ring[i] = NULL; + } + } +} + +/* + * free all queue memory used when releasing the queue (i.e. configure) + */ +static inline void +tx_queue_free(struct fm10k_tx_queue *q) +{ + PMD_INIT_FUNC_TRACE(); + if (q) { + PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q); + tx_queue_clean(q); + if (q->rs_tracker.list) { + rte_free(q->rs_tracker.list); + q->rs_tracker.list = NULL; + } + if (q->sw_ring) { + rte_free(q->sw_ring); + q->sw_ring = NULL; + } + rte_free(q); + q = NULL; + } +} + +/* + * disable TX queue, wait unitl HW finished necessary flush operation + */ +static inline int +tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum) +{ + uint32_t reg, i; + + reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum)); + FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum), + reg & ~FM10K_TXDCTL_ENABLE); + + /* Wait 100us at most */ + for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) { + rte_delay_us(1); + reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum)); + if (!(reg & FM10K_TXDCTL_ENABLE)) + break; + } + + if (i == FM10K_QUEUE_DISABLE_TIMEOUT) + return -1; + + return 0; +} + +static int +fm10k_check_mq_mode(struct rte_eth_dev *dev) +{ + enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_vmdq_rx_conf *vmdq_conf; + uint16_t nb_rx_q = dev->data->nb_rx_queues; + + vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) { + PMD_INIT_LOG(ERR, "DCB mode is not supported."); + return -EINVAL; + } + + if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG)) + return 0; + + if (hw->mac.type == fm10k_mac_vf) { + PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF."); + return -EINVAL; + } + + /* Check VMDQ queue pool number */ + if (vmdq_conf->nb_queue_pools > + sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT || + vmdq_conf->nb_queue_pools > nb_rx_q) { + PMD_INIT_LOG(ERR, "Too many of queue pools: %d", + vmdq_conf->nb_queue_pools); + return -EINVAL; + } + + return 0; +} + +static const struct fm10k_txq_ops def_txq_ops = { + .reset = tx_queue_reset, +}; + +static int +fm10k_dev_configure(struct rte_eth_dev *dev) +{ + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->dev_conf.rxmode.hw_strip_crc == 0) + PMD_INIT_LOG(WARNING, "fm10k always strip CRC"); + /* multipe queue mode checking */ + ret = fm10k_check_mq_mode(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.", + ret); + return ret; + } + + return 0; +} + +/* fls = find last set bit = 32 minus the number of leading zeros */ +#ifndef fls +#define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x)))) +#endif + +static void +fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_vmdq_rx_conf *vmdq_conf; + uint32_t i; + + vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + for (i = 0; i < vmdq_conf->nb_pool_maps; i++) { + if (!vmdq_conf->pool_map[i].pools) + continue; + fm10k_mbx_lock(hw); + fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true); + fm10k_mbx_unlock(hw); + } +} + +static void +fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Add default mac address */ + fm10k_MAC_filter_set(dev, hw->mac.addr, true, + MAIN_VSI_POOL_NUMBER); +} + +static void +fm10k_dev_rss_configure(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + uint32_t mrqc, *key, i, reta, j; + uint64_t hf; + +#define RSS_KEY_SIZE 40 + static uint8_t rss_intel_key[RSS_KEY_SIZE] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, + }; + + if (dev->data->nb_rx_queues == 1 || + dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS || + dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) { + FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0); + return; + } + + /* random key is rss_intel_key (default) or user provided (rss_key) */ + if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL) + key = (uint32_t *)rss_intel_key; + else + key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key; + + /* Now fill our hash function seeds, 4 bytes at a time */ + for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i) + FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]); + + /* + * Fill in redirection table + * The byte-swap is needed because NIC registers are in + * little-endian order. + */ + reta = 0; + for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) { + if (j == dev->data->nb_rx_queues) + j = 0; + reta = (reta << CHAR_BIT) | j; + if ((i & 3) == 3) + FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), + rte_bswap32(reta)); + } + + /* + * Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + hf = dev_conf->rx_adv_conf.rss_conf.rss_hf; + mrqc = 0; + mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0; + mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0; + mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0; + mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0; + mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0; + + if (mrqc == 0) { + PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not" + "supported", hf); + return; + } + + FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc); +} + +static void +fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t i; + + for (i = 0; i < nb_lport_new; i++) { + /* Set unicast mode by default. App can change + * to other mode in other API func. + */ + fm10k_mbx_lock(hw); + hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i, + FM10K_XCAST_MODE_NONE); + fm10k_mbx_unlock(hw); + } +} + +static void +fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_vmdq_rx_conf *vmdq_conf; + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct fm10k_macvlan_filter_info *macvlan; + uint16_t nb_queue_pools = 0; /* pool number in configuration */ + uint16_t nb_lport_new; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + fm10k_dev_rss_configure(dev); + + /* only PF supports VMDQ */ + if (hw->mac.type != fm10k_mac_pf) + return; + + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) + nb_queue_pools = vmdq_conf->nb_queue_pools; + + /* no pool number change, no need to update logic port and VLAN/MAC */ + if (macvlan->nb_queue_pools == nb_queue_pools) + return; + + nb_lport_new = nb_queue_pools ? nb_queue_pools : 1; + fm10k_dev_logic_port_update(dev, nb_lport_new); + + /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */ + memset(dev->data->mac_addrs, 0, + ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM); + ether_addr_copy((const struct ether_addr *)hw->mac.addr, + &dev->data->mac_addrs[0]); + memset(macvlan, 0, sizeof(*macvlan)); + macvlan->nb_queue_pools = nb_queue_pools; + + if (nb_queue_pools) + fm10k_dev_vmdq_rx_configure(dev); + else + fm10k_dev_pf_main_vsi_reset(dev); +} + +static int +fm10k_dev_tx_init(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i, ret; + struct fm10k_tx_queue *txq; + uint64_t base_addr; + uint32_t size; + + /* Disable TXINT to avoid possible interrupt */ + for (i = 0; i < hw->mac.max_queues; i++) + FM10K_WRITE_REG(hw, FM10K_TXINT(i), + 3 << FM10K_TXINT_TIMER_SHIFT); + + /* Setup TX queue */ + for (i = 0; i < dev->data->nb_tx_queues; ++i) { + txq = dev->data->tx_queues[i]; + base_addr = txq->hw_ring_phys_addr; + size = txq->nb_desc * sizeof(struct fm10k_tx_desc); + + /* disable queue to avoid issues while updating state */ + ret = tx_queue_disable(hw, i); + if (ret) { + PMD_INIT_LOG(ERR, "failed to disable queue %d", i); + return -1; + } + /* Enable use of FTAG bit in TX descriptor, PFVTCTL + * register is read-only for VF. + */ + if (fm10k_check_ftag(dev->pci_dev->devargs)) { + if (hw->mac.type == fm10k_mac_pf) { + FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i), + FM10K_PFVTCTL_FTAG_DESC_ENABLE); + PMD_INIT_LOG(DEBUG, "FTAG mode is enabled"); + } else { + PMD_INIT_LOG(ERR, "VF FTAG is not supported."); + return -ENOTSUP; + } + } + + /* set location and size for descriptor ring */ + FM10K_WRITE_REG(hw, FM10K_TDBAL(i), + base_addr & UINT64_LOWER_32BITS_MASK); + FM10K_WRITE_REG(hw, FM10K_TDBAH(i), + base_addr >> (CHAR_BIT * sizeof(uint32_t))); + FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size); + + /* assign default SGLORT for each TX queue */ + FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map); + } + + /* set up vector or scalar TX function as appropriate */ + fm10k_set_tx_function(dev); + + return 0; +} + +static int +fm10k_dev_rx_init(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_macvlan_filter_info *macvlan; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + int i, ret; + struct fm10k_rx_queue *rxq; + uint64_t base_addr; + uint32_t size; + uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; + uint32_t logic_port = hw->mac.dglort_map; + uint16_t buf_size; + uint16_t queue_stride = 0; + + /* enable RXINT for interrupt mode */ + i = 0; + if (rte_intr_dp_is_en(intr_handle)) { + for (; i < dev->data->nb_rx_queues; i++) { + FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(dev, i)); + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)), + FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)), + FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + } + } + /* Disable other RXINT to avoid possible interrupt */ + for (; i < hw->mac.max_queues; i++) + FM10K_WRITE_REG(hw, FM10K_RXINT(i), + 3 << FM10K_RXINT_TIMER_SHIFT); + + /* Setup RX queues */ + for (i = 0; i < dev->data->nb_rx_queues; ++i) { + rxq = dev->data->rx_queues[i]; + base_addr = rxq->hw_ring_phys_addr; + size = rxq->nb_desc * sizeof(union fm10k_rx_desc); + + /* disable queue to avoid issues while updating state */ + ret = rx_queue_disable(hw, i); + if (ret) { + PMD_INIT_LOG(ERR, "failed to disable queue %d", i); + return -1; + } + + /* Setup the Base and Length of the Rx Descriptor Ring */ + FM10K_WRITE_REG(hw, FM10K_RDBAL(i), + base_addr & UINT64_LOWER_32BITS_MASK); + FM10K_WRITE_REG(hw, FM10K_RDBAH(i), + base_addr >> (CHAR_BIT * sizeof(uint32_t))); + FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size); + + /* Configure the Rx buffer size for one buff without split */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + /* As RX buffer is aligned to 512B within mbuf, some bytes are + * reserved for this purpose, and the worst case could be 511B. + * But SRR reg assumes all buffers have the same size. In order + * to fill the gap, we'll have to consider the worst case and + * assume 512B is reserved. If we don't do so, it's possible + * for HW to overwrite data to next mbuf. + */ + buf_size -= FM10K_RX_DATABUF_ALIGN; + + FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), + (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) | + FM10K_SRRCTL_LOOPBACK_SUPPRESS); + + /* It adds dual VLAN length for supporting dual VLAN */ + if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * FM10K_VLAN_TAG_SIZE) > buf_size || + dev->data->dev_conf.rxmode.enable_scatter) { + uint32_t reg; + dev->data->scattered_rx = 1; + reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i)); + reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN; + FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg); + } + + /* Enable drop on empty, it's RO for VF */ + if (hw->mac.type == fm10k_mac_pf && rxq->drop_en) + rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; + + FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl); + FM10K_WRITE_FLUSH(hw); + } + + /* Configure VMDQ/RSS if applicable */ + fm10k_dev_mq_rx_configure(dev); + + /* Decide the best RX function */ + fm10k_set_rx_function(dev); + + /* update RX_SGLORT for loopback suppress*/ + if (hw->mac.type != fm10k_mac_pf) + return 0; + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + if (macvlan->nb_queue_pools) + queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools; + for (i = 0; i < dev->data->nb_rx_queues; ++i) { + if (i && queue_stride && !(i % queue_stride)) + logic_port++; + FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port); + } + + return 0; +} + +static int +fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int err = -1; + uint32_t reg; + struct fm10k_rx_queue *rxq; + + PMD_INIT_FUNC_TRACE(); + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + err = rx_queue_reset(rxq); + if (err == -ENOMEM) { + PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err); + return err; + } else if (err == -EINVAL) { + PMD_INIT_LOG(ERR, "Invalid buffer address alignment :" + " %d", err); + return err; + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers + * Note: this must be done AFTER the queue is enabled on real + * hardware, but BEFORE the queue is enabled when using the + * emulation platform. Do it in both places for now and remove + * this comment and the following two register writes when the + * emulation platform is no longer being used. + */ + FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); + + /* Set PF ownership flag for PF devices */ + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id)); + if (hw->mac.type == fm10k_mac_pf) + reg |= FM10K_RXQCTL_PF; + reg |= FM10K_RXQCTL_ENABLE; + /* enable RX queue */ + FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg); + FM10K_WRITE_FLUSH(hw); + + /* Setup the HW Rx Head and Tail Descriptor Pointers + * Note: this must be done AFTER the queue is enabled + */ + FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + } + + return err; +} + +static int +fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + if (rx_queue_id < dev->data->nb_rx_queues) { + /* Disable RX queue */ + rx_queue_disable(hw, rx_queue_id); + + /* Free mbuf and clean HW ring */ + rx_queue_clean(dev->data->rx_queues[rx_queue_id]); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + return 0; +} + +static int +fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /** @todo - this should be defined in the shared code */ +#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000 + uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY; + int err = 0; + + PMD_INIT_FUNC_TRACE(); + + if (tx_queue_id < dev->data->nb_tx_queues) { + struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id]; + + q->ops->reset(q); + + /* reset head and tail pointers */ + FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0); + + /* enable TX queue */ + FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id), + FM10K_TXDCTL_ENABLE | txdctl); + FM10K_WRITE_FLUSH(hw); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + } else + err = -1; + + return err; +} + +static int +fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + if (tx_queue_id < dev->data->nb_tx_queues) { + tx_queue_disable(hw, tx_queue_id); + tx_queue_clean(dev->data->tx_queues[tx_queue_id]); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + return 0; +} + +static inline int fm10k_glort_valid(struct fm10k_hw *hw) +{ + return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE) + != FM10K_DGLORTMAP_NONE); +} + +static void +fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int status; + + PMD_INIT_FUNC_TRACE(); + + /* Return if it didn't acquire valid glort range */ + if (!fm10k_glort_valid(hw)) + return; + + fm10k_mbx_lock(hw); + status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map, + FM10K_XCAST_MODE_PROMISC); + fm10k_mbx_unlock(hw); + + if (status != FM10K_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode"); +} + +static void +fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t mode; + int status; + + PMD_INIT_FUNC_TRACE(); + + /* Return if it didn't acquire valid glort range */ + if (!fm10k_glort_valid(hw)) + return; + + if (dev->data->all_multicast == 1) + mode = FM10K_XCAST_MODE_ALLMULTI; + else + mode = FM10K_XCAST_MODE_NONE; + + fm10k_mbx_lock(hw); + status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map, + mode); + fm10k_mbx_unlock(hw); + + if (status != FM10K_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode"); +} + +static void +fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int status; + + PMD_INIT_FUNC_TRACE(); + + /* Return if it didn't acquire valid glort range */ + if (!fm10k_glort_valid(hw)) + return; + + /* If promiscuous mode is enabled, it doesn't make sense to enable + * allmulticast and disable promiscuous since fm10k only can select + * one of the modes. + */ + if (dev->data->promiscuous) { + PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\ + "needn't enable allmulticast"); + return; + } + + fm10k_mbx_lock(hw); + status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map, + FM10K_XCAST_MODE_ALLMULTI); + fm10k_mbx_unlock(hw); + + if (status != FM10K_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode"); +} + +static void +fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int status; + + PMD_INIT_FUNC_TRACE(); + + /* Return if it didn't acquire valid glort range */ + if (!fm10k_glort_valid(hw)) + return; + + if (dev->data->promiscuous) { + PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\ + "since promisc mode is enabled"); + return; + } + + fm10k_mbx_lock(hw); + /* Change mode to unicast mode */ + status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map, + FM10K_XCAST_MODE_NONE); + fm10k_mbx_unlock(hw); + + if (status != FM10K_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode"); +} + +static void +fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t dglortdec, pool_len, rss_len, i, dglortmask; + uint16_t nb_queue_pools; + struct fm10k_macvlan_filter_info *macvlan; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + nb_queue_pools = macvlan->nb_queue_pools; + pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0; + rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len; + + /* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */ + dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len; + dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) | + hw->mac.dglort_map; + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask); + /* Configure VMDQ/RSS DGlort Decoder */ + FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec); + + /* Flow Director configurations, only queue number is valid. */ + dglortdec = fls(dev->data->nb_rx_queues - 1); + dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) | + (hw->mac.dglort_map + GLORT_FD_Q_BASE); + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask); + FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec); + + /* Invalidate all other GLORT entries */ + for (i = 2; i < FM10K_DGLORT_COUNT; i++) + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i), + FM10K_DGLORTMAP_NONE); +} + +#define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1) +static int +fm10k_dev_start(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i, diag; + + PMD_INIT_FUNC_TRACE(); + + /* stop, init, then start the hw */ + diag = fm10k_stop_hw(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag); + return -EIO; + } + + diag = fm10k_init_hw(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag); + return -EIO; + } + + diag = fm10k_start_hw(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag); + return -EIO; + } + + diag = fm10k_dev_tx_init(dev); + if (diag) { + PMD_INIT_LOG(ERR, "TX init failed: %d", diag); + return diag; + } + + if (fm10k_dev_rxq_interrupt_setup(dev)) + return -EIO; + + diag = fm10k_dev_rx_init(dev); + if (diag) { + PMD_INIT_LOG(ERR, "RX init failed: %d", diag); + return diag; + } + + if (hw->mac.type == fm10k_mac_pf) + fm10k_dev_dglort_map_configure(dev); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct fm10k_rx_queue *rxq; + rxq = dev->data->rx_queues[i]; + + if (rxq->rx_deferred_start) + continue; + diag = fm10k_dev_rx_queue_start(dev, i); + if (diag != 0) { + int j; + for (j = 0; j < i; ++j) + rx_queue_clean(dev->data->rx_queues[j]); + return diag; + } + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct fm10k_tx_queue *txq; + txq = dev->data->tx_queues[i]; + + if (txq->tx_deferred_start) + continue; + diag = fm10k_dev_tx_queue_start(dev, i); + if (diag != 0) { + int j; + for (j = 0; j < i; ++j) + tx_queue_clean(dev->data->tx_queues[j]); + for (j = 0; j < dev->data->nb_rx_queues; ++j) + rx_queue_clean(dev->data->rx_queues[j]); + return diag; + } + } + + /* Update default vlan when not in VMDQ mode */ + if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)) + fm10k_vlan_filter_set(dev, hw->mac.default_vid, true); + + return 0; +} + +static void +fm10k_dev_stop(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + int i; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->tx_queues) + for (i = 0; i < dev->data->nb_tx_queues; i++) + fm10k_dev_tx_queue_stop(dev, i); + + if (dev->data->rx_queues) + for (i = 0; i < dev->data->nb_rx_queues; i++) + fm10k_dev_rx_queue_stop(dev, i); + + /* Disable datapath event */ + if (rte_intr_dp_is_en(intr_handle)) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + FM10K_WRITE_REG(hw, FM10K_RXINT(i), + 3 << FM10K_RXINT_TIMER_SHIFT); + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)), + FM10K_ITR_MASK_SET); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)), + FM10K_ITR_MASK_SET); + } + } + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; +} + +static void +fm10k_dev_queue_release(struct rte_eth_dev *dev) +{ + int i; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->tx_queues) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct fm10k_tx_queue *txq = dev->data->tx_queues[i]; + + tx_queue_free(txq); + } + } + + if (dev->data->rx_queues) { + for (i = 0; i < dev->data->nb_rx_queues; i++) + fm10k_rx_queue_release(dev->data->rx_queues[i]); + } +} + +static void +fm10k_dev_close(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + fm10k_mbx_lock(hw); + hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, + MAX_LPORT_NUM, false); + fm10k_mbx_unlock(hw); + + /* Stop mailbox service first */ + fm10k_close_mbx_service(hw); + fm10k_dev_stop(dev); + fm10k_dev_queue_release(dev); + fm10k_stop_hw(hw); +} + +static int +fm10k_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete) +{ + PMD_INIT_FUNC_TRACE(); + + /* The host-interface link is always up. The speed is ~50Gbps per Gen3 + * x8 PCIe interface. For now, we leave the speed undefined since there + * is no 50Gbps Ethernet. */ + dev->data->dev_link.link_speed = 0; + dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + dev->data->dev_link.link_status = ETH_LINK_UP; + + return 0; +} + +static int +fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, + unsigned n) +{ + struct fm10k_hw_stats *hw_stats = + FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + unsigned i, q, count = 0; + + if (n < FM10K_NB_XSTATS) + return FM10K_NB_XSTATS; + + /* Global stats */ + for (i = 0; i < FM10K_NB_HW_XSTATS; i++) { + snprintf(xstats[count].name, sizeof(xstats[count].name), + "%s", fm10k_hw_stats_strings[count].name); + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + fm10k_hw_stats_strings[count].offset); + count++; + } + + /* PF queue stats */ + for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) { + for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) { + snprintf(xstats[count].name, sizeof(xstats[count].name), + "rx_q%u_%s", q, + fm10k_hw_stats_rx_q_strings[i].name); + xstats[count].value = + *(uint64_t *)(((char *)&hw_stats->q[q]) + + fm10k_hw_stats_rx_q_strings[i].offset); + count++; + } + for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) { + snprintf(xstats[count].name, sizeof(xstats[count].name), + "tx_q%u_%s", q, + fm10k_hw_stats_tx_q_strings[i].name); + xstats[count].value = + *(uint64_t *)(((char *)&hw_stats->q[q]) + + fm10k_hw_stats_tx_q_strings[i].offset); + count++; + } + } + + return FM10K_NB_XSTATS; +} + +static void +fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + uint64_t ipackets, opackets, ibytes, obytes; + struct fm10k_hw *hw = + FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_hw_stats *hw_stats = + FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + int i; + + PMD_INIT_FUNC_TRACE(); + + fm10k_update_hw_stats(hw, hw_stats); + + ipackets = opackets = ibytes = obytes = 0; + for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) && + (i < hw->mac.max_queues); ++i) { + stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count; + stats->q_opackets[i] = hw_stats->q[i].tx_packets.count; + stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count; + stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count; + ipackets += stats->q_ipackets[i]; + opackets += stats->q_opackets[i]; + ibytes += stats->q_ibytes[i]; + obytes += stats->q_obytes[i]; + } + stats->ipackets = ipackets; + stats->opackets = opackets; + stats->ibytes = ibytes; + stats->obytes = obytes; +} + +static void +fm10k_stats_reset(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_hw_stats *hw_stats = + FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + memset(hw_stats, 0, sizeof(*hw_stats)); + fm10k_rebind_hw_stats(hw, hw_stats); +} + +static void +fm10k_dev_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE; + dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE; + dev_info->max_rx_queues = hw->mac.max_queues; + dev_info->max_tx_queues = hw->mac.max_queues; + dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM; + dev_info->max_hash_mac_addrs = 0; + dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->vmdq_pool_base = 0; + dev_info->vmdq_queue_base = 0; + dev_info->max_vmdq_pools = ETH_32_POOLS; + dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + + dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t); + dev_info->reta_size = FM10K_MAX_RSS_INDICES; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = FM10K_DEFAULT_RX_PTHRESH, + .hthresh = FM10K_DEFAULT_RX_HTHRESH, + .wthresh = FM10K_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0), + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = FM10K_DEFAULT_TX_PTHRESH, + .hthresh = FM10K_DEFAULT_TX_HTHRESH, + .wthresh = FM10K_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0), + .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0), + .txq_flags = FM10K_SIMPLE_TX_FLAG, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = FM10K_MAX_RX_DESC, + .nb_min = FM10K_MIN_RX_DESC, + .nb_align = FM10K_MULT_RX_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = FM10K_MAX_TX_DESC, + .nb_min = FM10K_MIN_TX_DESC, + .nb_align = FM10K_MULT_TX_DESC, + }; + + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | + ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G; +} + +#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE +static const uint32_t * +fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + if (dev->rx_pkt_burst == fm10k_recv_pkts || + dev->rx_pkt_burst == fm10k_recv_scattered_pkts) { + static uint32_t ptypes[] = { + /* refers to rx_desc_to_ol_flags() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + return ptypes; + } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec || + dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) { + static uint32_t ptypes_vec[] = { + /* refers to fm10k_desc_to_pktype_v() */ + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_GENEVE, + RTE_PTYPE_TUNNEL_NVGRE, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_GRE, + RTE_PTYPE_UNKNOWN + }; + + return ptypes_vec; + } + + return NULL; +} +#else +static const uint32_t * +fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) +{ + return NULL; +} +#endif + +static int +fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + s32 result; + uint16_t mac_num = 0; + uint32_t vid_idx, vid_bit, mac_index; + struct fm10k_hw *hw; + struct fm10k_macvlan_filter_info *macvlan; + struct rte_eth_dev_data *data = dev->data; + + hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + + if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */ + PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode"); + return -EINVAL; + } + + if (vlan_id > ETH_VLAN_ID_MAX) { + PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096"); + return -EINVAL; + } + + vid_idx = FM10K_VFTA_IDX(vlan_id); + vid_bit = FM10K_VFTA_BIT(vlan_id); + /* this VLAN ID is already in the VLAN filter table, return SUCCESS */ + if (on && (macvlan->vfta[vid_idx] & vid_bit)) + return 0; + /* this VLAN ID is NOT in the VLAN filter table, cannot remove */ + if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) { + PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing " + "in the VLAN filter table"); + return -EINVAL; + } + + fm10k_mbx_lock(hw); + result = fm10k_update_vlan(hw, vlan_id, 0, on); + fm10k_mbx_unlock(hw); + if (result != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "VLAN update failed: %d", result); + return -EIO; + } + + for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) && + (result == FM10K_SUCCESS); mac_index++) { + if (is_zero_ether_addr(&data->mac_addrs[mac_index])) + continue; + if (mac_num > macvlan->mac_num - 1) { + PMD_INIT_LOG(ERR, "MAC address number " + "not match"); + break; + } + fm10k_mbx_lock(hw); + result = fm10k_update_uc_addr(hw, hw->mac.dglort_map, + data->mac_addrs[mac_index].addr_bytes, + vlan_id, on, 0); + fm10k_mbx_unlock(hw); + mac_num++; + } + if (result != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "MAC address update failed: %d", result); + return -EIO; + } + + if (on) { + macvlan->vlan_num++; + macvlan->vfta[vid_idx] |= vid_bit; + } else { + macvlan->vlan_num--; + macvlan->vfta[vid_idx] &= ~vid_bit; + } + return 0; +} + +static void +fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask) +{ + if (mask & ETH_VLAN_STRIP_MASK) { + if (!dev->data->dev_conf.rxmode.hw_vlan_strip) + PMD_INIT_LOG(ERR, "VLAN stripping is " + "always on in fm10k"); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + if (dev->data->dev_conf.rxmode.hw_vlan_extend) + PMD_INIT_LOG(ERR, "VLAN QinQ is not " + "supported in fm10k"); + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (!dev->data->dev_conf.rxmode.hw_vlan_filter) + PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k"); + } +} + +/* Add/Remove a MAC address, and update filters to main VSI */ +static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev, + const u8 *mac, bool add, uint32_t pool) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_macvlan_filter_info *macvlan; + uint32_t i, j, k; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + + if (pool != MAIN_VSI_POOL_NUMBER) { + PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set " + "mac to pool %u", pool); + return; + } + for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) { + if (!macvlan->vfta[j]) + continue; + for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) { + if (!(macvlan->vfta[j] & (1 << k))) + continue; + if (i + 1 > macvlan->vlan_num) { + PMD_INIT_LOG(ERR, "vlan number not match"); + return; + } + fm10k_mbx_lock(hw); + fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac, + j * FM10K_UINT32_BIT_SIZE + k, add, 0); + fm10k_mbx_unlock(hw); + i++; + } + } +} + +/* Add/Remove a MAC address, and update filters to VMDQ */ +static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev, + const u8 *mac, bool add, uint32_t pool) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_macvlan_filter_info *macvlan; + struct rte_eth_vmdq_rx_conf *vmdq_conf; + uint32_t i; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + if (pool > macvlan->nb_queue_pools) { + PMD_DRV_LOG(ERR, "Pool number %u invalid." + " Max pool is %u", + pool, macvlan->nb_queue_pools); + return; + } + for (i = 0; i < vmdq_conf->nb_pool_maps; i++) { + if (!(vmdq_conf->pool_map[i].pools & (1UL << pool))) + continue; + fm10k_mbx_lock(hw); + fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac, + vmdq_conf->pool_map[i].vlan_id, add, 0); + fm10k_mbx_unlock(hw); + } +} + +/* Add/Remove a MAC address, and update filters */ +static void fm10k_MAC_filter_set(struct rte_eth_dev *dev, + const u8 *mac, bool add, uint32_t pool) +{ + struct fm10k_macvlan_filter_info *macvlan; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + + if (macvlan->nb_queue_pools > 0) /* VMDQ mode */ + fm10k_MAC_filter_set_vmdq(dev, mac, add, pool); + else + fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool); + + if (add) + macvlan->mac_num++; + else + macvlan->mac_num--; +} + +/* Add a MAC address, and update filters */ +static void +fm10k_macaddr_add(struct rte_eth_dev *dev, + struct ether_addr *mac_addr, + uint32_t index, + uint32_t pool) +{ + struct fm10k_macvlan_filter_info *macvlan; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool); + macvlan->mac_vmdq_id[index] = pool; +} + +/* Remove a MAC address, and update filters */ +static void +fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct rte_eth_dev_data *data = dev->data; + struct fm10k_macvlan_filter_info *macvlan; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes, + FALSE, macvlan->mac_vmdq_id[index]); + macvlan->mac_vmdq_id[index] = 0; +} + +static inline int +check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request) +{ + if ((request < min) || (request > max) || ((request % mult) != 0)) + return -1; + else + return 0; +} + + +static inline int +check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request) +{ + if ((request < min) || (request > max) || ((div % request) != 0)) + return -1; + else + return 0; +} + +static inline int +handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf) +{ + uint16_t rx_free_thresh; + + if (conf->rx_free_thresh == 0) + rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q); + else + rx_free_thresh = conf->rx_free_thresh; + + /* make sure the requested threshold satisfies the constraints */ + if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q), + FM10K_RX_FREE_THRESH_MAX(q), + FM10K_RX_FREE_THRESH_DIV(q), + rx_free_thresh)) { + PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be " + "less than or equal to %u, " + "greater than or equal to %u, " + "and a divisor of %u", + rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q), + FM10K_RX_FREE_THRESH_MIN(q), + FM10K_RX_FREE_THRESH_DIV(q)); + return -EINVAL; + } + + q->alloc_thresh = rx_free_thresh; + q->drop_en = conf->rx_drop_en; + q->rx_deferred_start = conf->rx_deferred_start; + + return 0; +} + +/* + * Hardware requires specific alignment for Rx packet buffers. At + * least one of the following two conditions must be satisfied. + * 1. Address is 512B aligned + * 2. Address is 8B aligned and buffer does not cross 4K boundary. + * + * As such, the driver may need to adjust the DMA address within the + * buffer by up to 512B. + * + * return 1 if the element size is valid, otherwise return 0. + */ +static int +mempool_element_size_valid(struct rte_mempool *mp) +{ + uint32_t min_size; + + /* elt_size includes mbuf header and headroom */ + min_size = mp->elt_size - sizeof(struct rte_mbuf) - + RTE_PKTMBUF_HEADROOM; + + /* account for up to 512B of alignment */ + min_size -= FM10K_RX_DATABUF_ALIGN; + + /* sanity check for overflow */ + if (min_size > mp->elt_size) + return 0; + + /* size is valid */ + return 1; +} + +static int +fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *conf, struct rte_mempool *mp) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev); + struct fm10k_rx_queue *q; + const struct rte_memzone *mz; + + PMD_INIT_FUNC_TRACE(); + + /* make sure the mempool element size can account for alignment. */ + if (!mempool_element_size_valid(mp)) { + PMD_INIT_LOG(ERR, "Error : Mempool element size is too small"); + return -EINVAL; + } + + /* make sure a valid number of descriptors have been requested */ + if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC, + FM10K_MULT_RX_DESC, nb_desc)) { + PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be " + "less than or equal to %"PRIu32", " + "greater than or equal to %u, " + "and a multiple of %u", + nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC, + FM10K_MULT_RX_DESC); + return -EINVAL; + } + + /* + * if this queue existed already, free the associated memory. The + * queue cannot be reused in case we need to allocate memory on + * different socket than was previously used. + */ + if (dev->data->rx_queues[queue_id] != NULL) { + rx_queue_free(dev->data->rx_queues[queue_id]); + dev->data->rx_queues[queue_id] = NULL; + } + + /* allocate memory for the queue structure */ + q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE, + socket_id); + if (q == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate queue structure"); + return -ENOMEM; + } + + /* setup queue */ + q->mp = mp; + q->nb_desc = nb_desc; + q->nb_fake_desc = FM10K_MULT_RX_DESC; + q->port_id = dev->data->port_id; + q->queue_id = queue_id; + q->tail_ptr = (volatile uint32_t *) + &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)]; + if (handle_rxconf(q, conf)) + return -EINVAL; + + /* allocate memory for the software ring */ + q->sw_ring = rte_zmalloc_socket("fm10k sw ring", + (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE, socket_id); + if (q->sw_ring == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate software ring"); + rte_free(q); + return -ENOMEM; + } + + /* + * allocate memory for the hardware descriptor ring. A memzone large + * enough to hold the maximum ring size is requested to allow for + * resizing in later calls to the queue setup function. + */ + mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id, + FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC, + socket_id); + if (mz == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate hardware ring"); + rte_free(q->sw_ring); + rte_free(q); + return -ENOMEM; + } + q->hw_ring = mz->addr; + q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); + + /* Check if number of descs satisfied Vector requirement */ + if (!rte_is_power_of_2(nb_desc)) { + PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx " + "preconditions - canceling the feature for " + "the whole port[%d]", + q->queue_id, q->port_id); + dev_info->rx_vec_allowed = false; + } else + fm10k_rxq_vec_setup(q); + + dev->data->rx_queues[queue_id] = q; + return 0; +} + +static void +fm10k_rx_queue_release(void *queue) +{ + PMD_INIT_FUNC_TRACE(); + + rx_queue_free(queue); +} + +static inline int +handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf) +{ + uint16_t tx_free_thresh; + uint16_t tx_rs_thresh; + + /* constraint MACROs require that tx_free_thresh is configured + * before tx_rs_thresh */ + if (conf->tx_free_thresh == 0) + tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q); + else + tx_free_thresh = conf->tx_free_thresh; + + /* make sure the requested threshold satisfies the constraints */ + if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q), + FM10K_TX_FREE_THRESH_MAX(q), + FM10K_TX_FREE_THRESH_DIV(q), + tx_free_thresh)) { + PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be " + "less than or equal to %u, " + "greater than or equal to %u, " + "and a divisor of %u", + tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q), + FM10K_TX_FREE_THRESH_MIN(q), + FM10K_TX_FREE_THRESH_DIV(q)); + return -EINVAL; + } + + q->free_thresh = tx_free_thresh; + + if (conf->tx_rs_thresh == 0) + tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q); + else + tx_rs_thresh = conf->tx_rs_thresh; + + q->tx_deferred_start = conf->tx_deferred_start; + + /* make sure the requested threshold satisfies the constraints */ + if (check_thresh(FM10K_TX_RS_THRESH_MIN(q), + FM10K_TX_RS_THRESH_MAX(q), + FM10K_TX_RS_THRESH_DIV(q), + tx_rs_thresh)) { + PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be " + "less than or equal to %u, " + "greater than or equal to %u, " + "and a divisor of %u", + tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q), + FM10K_TX_RS_THRESH_MIN(q), + FM10K_TX_RS_THRESH_DIV(q)); + return -EINVAL; + } + + q->rs_thresh = tx_rs_thresh; + + return 0; +} + +static int +fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *conf) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_tx_queue *q; + const struct rte_memzone *mz; + + PMD_INIT_FUNC_TRACE(); + + /* make sure a valid number of descriptors have been requested */ + if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC, + FM10K_MULT_TX_DESC, nb_desc)) { + PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be " + "less than or equal to %"PRIu32", " + "greater than or equal to %u, " + "and a multiple of %u", + nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC, + FM10K_MULT_TX_DESC); + return -EINVAL; + } + + /* + * if this queue existed already, free the associated memory. The + * queue cannot be reused in case we need to allocate memory on + * different socket than was previously used. + */ + if (dev->data->tx_queues[queue_id] != NULL) { + struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id]; + + tx_queue_free(txq); + dev->data->tx_queues[queue_id] = NULL; + } + + /* allocate memory for the queue structure */ + q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE, + socket_id); + if (q == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate queue structure"); + return -ENOMEM; + } + + /* setup queue */ + q->nb_desc = nb_desc; + q->port_id = dev->data->port_id; + q->queue_id = queue_id; + q->txq_flags = conf->txq_flags; + q->ops = &def_txq_ops; + q->tail_ptr = (volatile uint32_t *) + &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)]; + if (handle_txconf(q, conf)) + return -EINVAL; + + /* allocate memory for the software ring */ + q->sw_ring = rte_zmalloc_socket("fm10k sw ring", + nb_desc * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE, socket_id); + if (q->sw_ring == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate software ring"); + rte_free(q); + return -ENOMEM; + } + + /* + * allocate memory for the hardware descriptor ring. A memzone large + * enough to hold the maximum ring size is requested to allow for + * resizing in later calls to the queue setup function. + */ + mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id, + FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC, + socket_id); + if (mz == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate hardware ring"); + rte_free(q->sw_ring); + rte_free(q); + return -ENOMEM; + } + q->hw_ring = mz->addr; + q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); + + /* + * allocate memory for the RS bit tracker. Enough slots to hold the + * descriptor index for each RS bit needing to be set are required. + */ + q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker", + ((nb_desc + 1) / q->rs_thresh) * + sizeof(uint16_t), + RTE_CACHE_LINE_SIZE, socket_id); + if (q->rs_tracker.list == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker"); + rte_free(q->sw_ring); + rte_free(q); + return -ENOMEM; + } + + dev->data->tx_queues[queue_id] = q; + return 0; +} + +static void +fm10k_tx_queue_release(void *queue) +{ + struct fm10k_tx_queue *q = queue; + PMD_INIT_FUNC_TRACE(); + + tx_queue_free(q); +} + +static int +fm10k_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t i, j, idx, shift; + uint8_t mask; + uint32_t reta; + + PMD_INIT_FUNC_TRACE(); + + if (reta_size > FM10K_MAX_RSS_INDICES) { + PMD_INIT_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", reta_size, FM10K_MAX_RSS_INDICES); + return -EINVAL; + } + + /* + * Update Redirection Table RETA[n], n=0..31. The redirection table has + * 128-entries in 32 registers + */ + for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + BIT_MASK_PER_UINT32); + if (mask == 0) + continue; + + reta = 0; + if (mask != BIT_MASK_PER_UINT32) + reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2)); + + for (j = 0; j < CHARS_PER_UINT32; j++) { + if (mask & (0x1 << j)) { + if (mask != 0xF) + reta &= ~(UINT8_MAX << CHAR_BIT * j); + reta |= reta_conf[idx].reta[shift + j] << + (CHAR_BIT * j); + } + } + FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta); + } + + return 0; +} + +static int +fm10k_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t i, j, idx, shift; + uint8_t mask; + uint32_t reta; + + PMD_INIT_FUNC_TRACE(); + + if (reta_size < FM10K_MAX_RSS_INDICES) { + PMD_INIT_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", reta_size, FM10K_MAX_RSS_INDICES); + return -EINVAL; + } + + /* + * Read Redirection Table RETA[n], n=0..31. The redirection table has + * 128-entries in 32 registers + */ + for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + BIT_MASK_PER_UINT32); + if (mask == 0) + continue; + + reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2)); + for (j = 0; j < CHARS_PER_UINT32; j++) { + if (mask & (0x1 << j)) + reta_conf[idx].reta[shift + j] = ((reta >> + CHAR_BIT * j) & UINT8_MAX); + } + } + + return 0; +} + +static int +fm10k_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *key = (uint32_t *)rss_conf->rss_key; + uint32_t mrqc; + uint64_t hf = rss_conf->rss_hf; + int i; + + PMD_INIT_FUNC_TRACE(); + + if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE * + FM10K_RSSRK_ENTRIES_PER_REG) + return -EINVAL; + + if (hf == 0) + return -EINVAL; + + mrqc = 0; + mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0; + mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0; + mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0; + mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0; + mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0; + mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0; + + /* If the mapping doesn't fit any supported, return */ + if (mrqc == 0) + return -EINVAL; + + if (key != NULL) + for (i = 0; i < FM10K_RSSRK_SIZE; ++i) + FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]); + + FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc); + + return 0; +} + +static int +fm10k_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *key = (uint32_t *)rss_conf->rss_key; + uint32_t mrqc; + uint64_t hf; + int i; + + PMD_INIT_FUNC_TRACE(); + + if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE * + FM10K_RSSRK_ENTRIES_PER_REG) + return -EINVAL; + + if (key != NULL) + for (i = 0; i < FM10K_RSSRK_SIZE; ++i) + key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i)); + + mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0)); + hf = 0; + hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0; + hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0; + hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0; + hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0; + hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0; + hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0; + hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0; + hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0; + hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0; + + rss_conf->rss_hf = hf; + + return 0; +} + +static void +fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t int_map = FM10K_INT_MAP_IMMEDIATE; + + /* Bind all local non-queue interrupt to vector 0 */ + int_map |= FM10K_MISC_VEC_ID; + + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map); + + /* Enable misc causes */ + FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) | + FM10K_EIMR_ENABLE(THI_FAULT) | + FM10K_EIMR_ENABLE(FUM_FAULT) | + FM10K_EIMR_ENABLE(MAILBOX) | + FM10K_EIMR_ENABLE(SWITCHREADY) | + FM10K_EIMR_ENABLE(SWITCHNOTREADY) | + FM10K_EIMR_ENABLE(SRAMERROR) | + FM10K_EIMR_ENABLE(VFLR)); + + /* Enable ITR 0 */ + FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + FM10K_WRITE_FLUSH(hw); +} + +static void +fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t int_map = FM10K_INT_MAP_DISABLE; + + int_map |= FM10K_MISC_VEC_ID; + + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map); + + /* Disable misc causes */ + FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) | + FM10K_EIMR_DISABLE(THI_FAULT) | + FM10K_EIMR_DISABLE(FUM_FAULT) | + FM10K_EIMR_DISABLE(MAILBOX) | + FM10K_EIMR_DISABLE(SWITCHREADY) | + FM10K_EIMR_DISABLE(SWITCHNOTREADY) | + FM10K_EIMR_DISABLE(SRAMERROR) | + FM10K_EIMR_DISABLE(VFLR)); + + /* Disable ITR 0 */ + FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET); + FM10K_WRITE_FLUSH(hw); +} + +static void +fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t int_map = FM10K_INT_MAP_IMMEDIATE; + + /* Bind all local non-queue interrupt to vector 0 */ + int_map |= FM10K_MISC_VEC_ID; + + /* Only INT 0 available, other 15 are reserved. */ + FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map); + + /* Enable ITR 0 */ + FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + FM10K_WRITE_FLUSH(hw); +} + +static void +fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t int_map = FM10K_INT_MAP_DISABLE; + + int_map |= FM10K_MISC_VEC_ID; + + /* Only INT 0 available, other 15 are reserved. */ + FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map); + + /* Disable ITR 0 */ + FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET); + FM10K_WRITE_FLUSH(hw); +} + +static int +fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Enable ITR */ + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)), + FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)), + FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); + rte_intr_enable(&dev->pci_dev->intr_handle); + return 0; +} + +static int +fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Disable ITR */ + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)), + FM10K_ITR_MASK_SET); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)), + FM10K_ITR_MASK_SET); + return 0; +} + +static int +fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + uint32_t intr_vector, vec; + uint16_t queue_id; + int result = 0; + + /* fm10k needs one separate interrupt for mailbox, + * so only drivers which support multiple interrupt vectors + * e.g. vfio-pci can work for fm10k interrupt mode + */ + if (!rte_intr_cap_multiple(intr_handle) || + dev->data->dev_conf.intr_conf.rxq == 0) + return result; + + intr_vector = dev->data->nb_rx_queues; + + /* disable interrupt first */ + rte_intr_disable(&dev->pci_dev->intr_handle); + if (hw->mac.type == fm10k_mac_pf) + fm10k_dev_disable_intr_pf(dev); + else + fm10k_dev_disable_intr_vf(dev); + + if (rte_intr_efd_enable(intr_handle, intr_vector)) { + PMD_INIT_LOG(ERR, "Failed to init event fd"); + result = -EIO; + } + + if (rte_intr_dp_is_en(intr_handle) && !result) { + intr_handle->intr_vec = rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec) { + for (queue_id = 0, vec = FM10K_RX_VEC_START; + queue_id < dev->data->nb_rx_queues; + queue_id++) { + intr_handle->intr_vec[queue_id] = vec; + if (vec < intr_handle->nb_efd - 1 + + FM10K_RX_VEC_START) + vec++; + } + } else { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + rte_intr_efd_disable(intr_handle); + result = -ENOMEM; + } + } + + if (hw->mac.type == fm10k_mac_pf) + fm10k_dev_enable_intr_pf(dev); + else + fm10k_dev_enable_intr_vf(dev); + rte_intr_enable(&dev->pci_dev->intr_handle); + hw->mac.ops.update_int_moderator(hw); + return result; +} + +static int +fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr) +{ + struct fm10k_fault fault; + int err; + const char *estr = "Unknown error"; + + /* Process PCA fault */ + if (eicr & FM10K_EICR_PCA_FAULT) { + err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault); + if (err) + goto error; + switch (fault.type) { + case PCA_NO_FAULT: + estr = "PCA_NO_FAULT"; break; + case PCA_UNMAPPED_ADDR: + estr = "PCA_UNMAPPED_ADDR"; break; + case PCA_BAD_QACCESS_PF: + estr = "PCA_BAD_QACCESS_PF"; break; + case PCA_BAD_QACCESS_VF: + estr = "PCA_BAD_QACCESS_VF"; break; + case PCA_MALICIOUS_REQ: + estr = "PCA_MALICIOUS_REQ"; break; + case PCA_POISONED_TLP: + estr = "PCA_POISONED_TLP"; break; + case PCA_TLP_ABORT: + estr = "PCA_TLP_ABORT"; break; + default: + goto error; + } + PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x", + estr, fault.func ? "VF" : "PF", fault.func, + fault.address, fault.specinfo); + } + + /* Process THI fault */ + if (eicr & FM10K_EICR_THI_FAULT) { + err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault); + if (err) + goto error; + switch (fault.type) { + case THI_NO_FAULT: + estr = "THI_NO_FAULT"; break; + case THI_MAL_DIS_Q_FAULT: + estr = "THI_MAL_DIS_Q_FAULT"; break; + default: + goto error; + } + PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x", + estr, fault.func ? "VF" : "PF", fault.func, + fault.address, fault.specinfo); + } + + /* Process FUM fault */ + if (eicr & FM10K_EICR_FUM_FAULT) { + err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault); + if (err) + goto error; + switch (fault.type) { + case FUM_NO_FAULT: + estr = "FUM_NO_FAULT"; break; + case FUM_UNMAPPED_ADDR: + estr = "FUM_UNMAPPED_ADDR"; break; + case FUM_POISONED_TLP: + estr = "FUM_POISONED_TLP"; break; + case FUM_BAD_VF_QACCESS: + estr = "FUM_BAD_VF_QACCESS"; break; + case FUM_ADD_DECODE_ERR: + estr = "FUM_ADD_DECODE_ERR"; break; + case FUM_RO_ERROR: + estr = "FUM_RO_ERROR"; break; + case FUM_QPRC_CRC_ERROR: + estr = "FUM_QPRC_CRC_ERROR"; break; + case FUM_CSR_TIMEOUT: + estr = "FUM_CSR_TIMEOUT"; break; + case FUM_INVALID_TYPE: + estr = "FUM_INVALID_TYPE"; break; + case FUM_INVALID_LENGTH: + estr = "FUM_INVALID_LENGTH"; break; + case FUM_INVALID_BE: + estr = "FUM_INVALID_BE"; break; + case FUM_INVALID_ALIGN: + estr = "FUM_INVALID_ALIGN"; break; + default: + goto error; + } + PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x", + estr, fault.func ? "VF" : "PF", fault.func, + fault.address, fault.specinfo); + } + + return 0; +error: + PMD_INIT_LOG(ERR, "Failed to handle fault event."); + return err; +} + +/** + * PF interrupt handler triggered by NIC for handling specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +fm10k_dev_interrupt_handler_pf( + __rte_unused struct rte_intr_handle *handle, + void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t cause, status; + + if (hw->mac.type != fm10k_mac_pf) + return; + + cause = FM10K_READ_REG(hw, FM10K_EICR); + + /* Handle PCI fault cases */ + if (cause & FM10K_EICR_FAULT_MASK) { + PMD_INIT_LOG(ERR, "INT: find fault!"); + fm10k_dev_handle_fault(hw, cause); + } + + /* Handle switch up/down */ + if (cause & FM10K_EICR_SWITCHNOTREADY) + PMD_INIT_LOG(ERR, "INT: Switch is not ready"); + + if (cause & FM10K_EICR_SWITCHREADY) + PMD_INIT_LOG(INFO, "INT: Switch is ready"); + + /* Handle mailbox message */ + fm10k_mbx_lock(hw); + hw->mbx.ops.process(hw, &hw->mbx); + fm10k_mbx_unlock(hw); + + /* Handle SRAM error */ + if (cause & FM10K_EICR_SRAMERROR) { + PMD_INIT_LOG(ERR, "INT: SRAM error on PEP"); + + status = FM10K_READ_REG(hw, FM10K_SRAM_IP); + /* Write to clear pending bits */ + FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status); + + /* Todo: print out error message after shared code updates */ + } + + /* Clear these 3 events if having any */ + cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX | + FM10K_EICR_SWITCHREADY; + if (cause) + FM10K_WRITE_REG(hw, FM10K_EICR, cause); + + /* Re-enable interrupt from device side */ + FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + /* Re-enable interrupt from host side */ + rte_intr_enable(&(dev->pci_dev->intr_handle)); +} + +/** + * VF interrupt handler triggered by NIC for handling specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +fm10k_dev_interrupt_handler_vf( + __rte_unused struct rte_intr_handle *handle, + void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != fm10k_mac_vf) + return; + + /* Handle mailbox message if lock is acquired */ + fm10k_mbx_lock(hw); + hw->mbx.ops.process(hw, &hw->mbx); + fm10k_mbx_unlock(hw); + + /* Re-enable interrupt from device side */ + FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + /* Re-enable interrupt from host side */ + rte_intr_enable(&(dev->pci_dev->intr_handle)); +} + +/* Mailbox message handler in VF */ +static const struct fm10k_msg_data fm10k_msgdata_vf[] = { + FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf), + FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), +}; + +static int +fm10k_setup_mbx_service(struct fm10k_hw *hw) +{ + int err = 0; + + /* Initialize mailbox lock */ + fm10k_mbx_initlock(hw); + + /* Replace default message handler with new ones */ + if (hw->mac.type == fm10k_mac_vf) + err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf); + + if (err) { + PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d", + err); + return err; + } + /* Connect to SM for PF device or PF for VF device */ + return hw->mbx.ops.connect(hw, &hw->mbx); +} + +static void +fm10k_close_mbx_service(struct fm10k_hw *hw) +{ + /* Disconnect from SM for PF device or PF for VF device */ + hw->mbx.ops.disconnect(hw, &hw->mbx); +} + +static const struct eth_dev_ops fm10k_eth_dev_ops = { + .dev_configure = fm10k_dev_configure, + .dev_start = fm10k_dev_start, + .dev_stop = fm10k_dev_stop, + .dev_close = fm10k_dev_close, + .promiscuous_enable = fm10k_dev_promiscuous_enable, + .promiscuous_disable = fm10k_dev_promiscuous_disable, + .allmulticast_enable = fm10k_dev_allmulticast_enable, + .allmulticast_disable = fm10k_dev_allmulticast_disable, + .stats_get = fm10k_stats_get, + .xstats_get = fm10k_xstats_get, + .stats_reset = fm10k_stats_reset, + .xstats_reset = fm10k_stats_reset, + .link_update = fm10k_link_update, + .dev_infos_get = fm10k_dev_infos_get, + .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get, + .vlan_filter_set = fm10k_vlan_filter_set, + .vlan_offload_set = fm10k_vlan_offload_set, + .mac_addr_add = fm10k_macaddr_add, + .mac_addr_remove = fm10k_macaddr_remove, + .rx_queue_start = fm10k_dev_rx_queue_start, + .rx_queue_stop = fm10k_dev_rx_queue_stop, + .tx_queue_start = fm10k_dev_tx_queue_start, + .tx_queue_stop = fm10k_dev_tx_queue_stop, + .rx_queue_setup = fm10k_rx_queue_setup, + .rx_queue_release = fm10k_rx_queue_release, + .tx_queue_setup = fm10k_tx_queue_setup, + .tx_queue_release = fm10k_tx_queue_release, + .rx_descriptor_done = fm10k_dev_rx_descriptor_done, + .rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable, + .reta_update = fm10k_reta_update, + .reta_query = fm10k_reta_query, + .rss_hash_update = fm10k_rss_hash_update, + .rss_hash_conf_get = fm10k_rss_hash_conf_get, +}; + +static int ftag_check_handler(__rte_unused const char *key, + const char *value, __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + +static int +fm10k_check_ftag(struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + const char *ftag_key = "enable_ftag"; + + if (devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (kvlist == NULL) + return 0; + + if (!rte_kvargs_count(kvlist, ftag_key)) { + rte_kvargs_free(kvlist); + return 0; + } + /* FTAG is enabled when there's key-value pair: enable_ftag=1 */ + if (rte_kvargs_process(kvlist, ftag_key, + ftag_check_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); + + return 1; +} + +static void __attribute__((cold)) +fm10k_set_tx_function(struct rte_eth_dev *dev) +{ + struct fm10k_tx_queue *txq; + int i; + int use_sse = 1; + uint16_t tx_ftag_en = 0; + + if (fm10k_check_ftag(dev->pci_dev->devargs)) + tx_ftag_en = 1; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + txq->tx_ftag_en = tx_ftag_en; + /* Check if Vector Tx is satisfied */ + if (fm10k_tx_vec_condition_check(txq)) { + use_sse = 0; + break; + } + } + + if (use_sse) { + PMD_INIT_LOG(DEBUG, "Use vector Tx func"); + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + fm10k_txq_vec_setup(txq); + } + dev->tx_pkt_burst = fm10k_xmit_pkts_vec; + } else { + dev->tx_pkt_burst = fm10k_xmit_pkts; + PMD_INIT_LOG(DEBUG, "Use regular Tx func"); + } +} + +static void __attribute__((cold)) +fm10k_set_rx_function(struct rte_eth_dev *dev) +{ + struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev); + uint16_t i, rx_using_sse; + uint16_t rx_ftag_en = 0; + + if (fm10k_check_ftag(dev->pci_dev->devargs)) + rx_ftag_en = 1; + + /* In order to allow Vector Rx there are a few configuration + * conditions to be met. + */ + if (!fm10k_rx_vec_condition_check(dev) && + dev_info->rx_vec_allowed && !rx_ftag_en) { + if (dev->data->scattered_rx) + dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec; + else + dev->rx_pkt_burst = fm10k_recv_pkts_vec; + } else if (dev->data->scattered_rx) + dev->rx_pkt_burst = fm10k_recv_scattered_pkts; + else + dev->rx_pkt_burst = fm10k_recv_pkts; + + rx_using_sse = + (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec || + dev->rx_pkt_burst == fm10k_recv_pkts_vec); + + if (rx_using_sse) + PMD_INIT_LOG(DEBUG, "Use vector Rx func"); + else + PMD_INIT_LOG(DEBUG, "Use regular Rx func"); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct fm10k_rx_queue *rxq = dev->data->rx_queues[i]; + + rxq->rx_using_sse = rx_using_sse; + rxq->rx_ftag_en = rx_ftag_en; + } +} + +static void +fm10k_params_init(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev); + + /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but + * there is no way to get link status without reading BAR4. Until this + * works, assume we have maximum bandwidth. + * @todo - fix bus info + */ + hw->bus_caps.speed = fm10k_bus_speed_8000; + hw->bus_caps.width = fm10k_bus_width_pcie_x8; + hw->bus_caps.payload = fm10k_bus_payload_512; + hw->bus.speed = fm10k_bus_speed_8000; + hw->bus.width = fm10k_bus_width_pcie_x8; + hw->bus.payload = fm10k_bus_payload_256; + + info->rx_vec_allowed = true; +} + +static int +eth_fm10k_dev_init(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int diag, i; + struct fm10k_macvlan_filter_info *macvlan; + + PMD_INIT_FUNC_TRACE(); + + dev->dev_ops = &fm10k_eth_dev_ops; + dev->rx_pkt_burst = &fm10k_recv_pkts; + dev->tx_pkt_burst = &fm10k_xmit_pkts; + + /* only initialize in the primary process */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + rte_eth_copy_pci_info(dev, dev->pci_dev); + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + memset(macvlan, 0, sizeof(*macvlan)); + /* Vendor and Device ID need to be set before init of shared code */ + memset(hw, 0, sizeof(*hw)); + hw->device_id = dev->pci_dev->id.device_id; + hw->vendor_id = dev->pci_dev->id.vendor_id; + hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id; + hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id; + hw->revision_id = 0; + hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr; + if (hw->hw_addr == NULL) { + PMD_INIT_LOG(ERR, "Bad mem resource." + " Try to blacklist unused devices."); + return -EIO; + } + + /* Store fm10k_adapter pointer */ + hw->back = dev->data->dev_private; + + /* Initialize the shared code */ + diag = fm10k_init_shared_code(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); + return -EIO; + } + + /* Initialize parameters */ + fm10k_params_init(dev); + + /* Initialize the hw */ + diag = fm10k_init_hw(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag); + return -EIO; + } + + /* Initialize MAC address(es) */ + dev->data->mac_addrs = rte_zmalloc("fm10k", + ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0); + if (dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses"); + return -ENOMEM; + } + + diag = fm10k_read_mac_addr(hw); + + ether_addr_copy((const struct ether_addr *)hw->mac.addr, + &dev->data->mac_addrs[0]); + + if (diag != FM10K_SUCCESS || + !is_valid_assigned_ether_addr(dev->data->mac_addrs)) { + + /* Generate a random addr */ + eth_random_addr(hw->mac.addr); + memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN); + ether_addr_copy((const struct ether_addr *)hw->mac.addr, + &dev->data->mac_addrs[0]); + } + + /* Reset the hw statistics */ + fm10k_stats_reset(dev); + + /* Reset the hw */ + diag = fm10k_reset_hw(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag); + return -EIO; + } + + /* Setup mailbox service */ + diag = fm10k_setup_mbx_service(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag); + return -EIO; + } + + /*PF/VF has different interrupt handling mechanism */ + if (hw->mac.type == fm10k_mac_pf) { + /* register callback func to eal lib */ + rte_intr_callback_register(&(dev->pci_dev->intr_handle), + fm10k_dev_interrupt_handler_pf, (void *)dev); + + /* enable MISC interrupt */ + fm10k_dev_enable_intr_pf(dev); + } else { /* VF */ + rte_intr_callback_register(&(dev->pci_dev->intr_handle), + fm10k_dev_interrupt_handler_vf, (void *)dev); + + fm10k_dev_enable_intr_vf(dev); + } + + /* Enable intr after callback registered */ + rte_intr_enable(&(dev->pci_dev->intr_handle)); + + hw->mac.ops.update_int_moderator(hw); + + /* Make sure Switch Manager is ready before going forward. */ + if (hw->mac.type == fm10k_mac_pf) { + int switch_ready = 0; + + for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) { + fm10k_mbx_lock(hw); + hw->mac.ops.get_host_state(hw, &switch_ready); + fm10k_mbx_unlock(hw); + if (switch_ready) + break; + /* Delay some time to acquire async LPORT_MAP info. */ + rte_delay_us(WAIT_SWITCH_MSG_US); + } + + if (switch_ready == 0) { + PMD_INIT_LOG(ERR, "switch is not ready"); + return -1; + } + } + + /* + * Below function will trigger operations on mailbox, acquire lock to + * avoid race condition from interrupt handler. Operations on mailbox + * FIFO will trigger interrupt to PF/SM, in which interrupt handler + * will handle and generate an interrupt to our side. Then, FIFO in + * mailbox will be touched. + */ + fm10k_mbx_lock(hw); + /* Enable port first */ + hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, + MAX_LPORT_NUM, 1); + + /* Set unicast mode by default. App can change to other mode in other + * API func. + */ + hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map, + FM10K_XCAST_MODE_NONE); + + fm10k_mbx_unlock(hw); + + /* Make sure default VID is ready before going forward. */ + if (hw->mac.type == fm10k_mac_pf) { + for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) { + if (hw->mac.default_vid) + break; + /* Delay some time to acquire async port VLAN info. */ + rte_delay_us(WAIT_SWITCH_MSG_US); + } + + if (!hw->mac.default_vid) { + PMD_INIT_LOG(ERR, "default VID is not ready"); + return -1; + } + } + + /* Add default mac address */ + fm10k_MAC_filter_set(dev, hw->mac.addr, true, + MAIN_VSI_POOL_NUMBER); + + return 0; +} + +static int +eth_fm10k_dev_uninit(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* only uninitialize in the primary process */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + /* safe to close dev here */ + fm10k_dev_close(dev); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + /* disable uio/vfio intr */ + rte_intr_disable(&(dev->pci_dev->intr_handle)); + + /*PF/VF has different interrupt handling mechanism */ + if (hw->mac.type == fm10k_mac_pf) { + /* disable interrupt */ + fm10k_dev_disable_intr_pf(dev); + + /* unregister callback func to eal lib */ + rte_intr_callback_unregister(&(dev->pci_dev->intr_handle), + fm10k_dev_interrupt_handler_pf, (void *)dev); + } else { + /* disable interrupt */ + fm10k_dev_disable_intr_vf(dev); + + rte_intr_callback_unregister(&(dev->pci_dev->intr_handle), + fm10k_dev_interrupt_handler_vf, (void *)dev); + } + + /* free mac memory */ + if (dev->data->mac_addrs) { + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; + } + + memset(hw, 0, sizeof(*hw)); + + return 0; +} + +/* + * The set of PCI devices this driver supports. This driver will enable both PF + * and SRIOV-VF devices. + */ +static const struct rte_pci_id pci_id_fm10k_map[] = { +#define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) }, +#define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) }, +#include "rte_pci_dev_ids.h" + { .vendor_id = 0, /* sentinel */ }, +}; + +static struct eth_driver rte_pmd_fm10k = { + .pci_drv = { + .name = "rte_pmd_fm10k", + .id_table = pci_id_fm10k_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_DETACHABLE, + }, + .eth_dev_init = eth_fm10k_dev_init, + .eth_dev_uninit = eth_fm10k_dev_uninit, + .dev_private_size = sizeof(struct fm10k_adapter), +}; + +/* + * Driver initialization routine. + * Invoked once at EAL init time. + * Register itself as the [Poll Mode] Driver of PCI FM10K devices. + */ +static int +rte_pmd_fm10k_init(__rte_unused const char *name, + __rte_unused const char *params) +{ + PMD_INIT_FUNC_TRACE(); + rte_eth_driver_register(&rte_pmd_fm10k); + return 0; +} + +static struct rte_driver rte_fm10k_driver = { + .type = PMD_PDEV, + .init = rte_pmd_fm10k_init, +}; + +PMD_REGISTER_DRIVER(rte_fm10k_driver); diff --git a/drivers/net/fm10k/fm10k_logs.h b/drivers/net/fm10k/fm10k_logs.h new file mode 100644 index 00000000..31384c9e --- /dev/null +++ b/drivers/net/fm10k/fm10k_logs.h @@ -0,0 +1,79 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _FM10K_LOGS_H_ +#define _FM10K_LOGS_H_ + +#include <rte_log.h> + +#define PMD_INIT_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args) + +#ifdef RTE_LIBRTE_FM10K_DEBUG_INIT +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +#else +#define PMD_INIT_FUNC_TRACE() do { } while (0) +#endif + +#ifdef RTE_LIBRTE_FM10K_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_FM10K_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_FM10K_DEBUG_TX_FREE +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_FM10K_DEBUG_DRIVER +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) +#else +#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) +#endif + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +#endif /* _FM10K_LOGS_H_ */ diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c new file mode 100644 index 00000000..81ed4e79 --- /dev/null +++ b/drivers/net/fm10k/fm10k_rxtx.c @@ -0,0 +1,601 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <inttypes.h> + +#include <rte_ethdev.h> +#include <rte_common.h> +#include "fm10k.h" +#include "base/fm10k_type.h" + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while (0) +#endif + +#ifdef RTE_LIBRTE_FM10K_DEBUG_RX +static inline void dump_rxd(union fm10k_rx_desc *rxd) +{ + PMD_RX_LOG(DEBUG, "+----------------|----------------+"); + PMD_RX_LOG(DEBUG, "| GLORT | PKT HDR & TYPE |"); + PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.glort, + rxd->d.data); + PMD_RX_LOG(DEBUG, "+----------------|----------------+"); + PMD_RX_LOG(DEBUG, "| VLAN & LEN | STATUS |"); + PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.vlan_len, + rxd->d.staterr); + PMD_RX_LOG(DEBUG, "+----------------|----------------+"); + PMD_RX_LOG(DEBUG, "| RESERVED | RSS_HASH |"); + PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", 0, rxd->d.rss); + PMD_RX_LOG(DEBUG, "+----------------|----------------+"); + PMD_RX_LOG(DEBUG, "| TIME TAG |"); + PMD_RX_LOG(DEBUG, "| 0x%016"PRIx64" |", rxd->q.timestamp); + PMD_RX_LOG(DEBUG, "+----------------|----------------+"); +} +#endif + +/* @note: When this function is changed, make corresponding change to + * fm10k_dev_supported_ptypes_get() + */ +static inline void +rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d) +{ + static const uint32_t + ptype_table[FM10K_RXD_PKTTYPE_MASK >> FM10K_RXD_PKTTYPE_SHIFT] + __rte_cache_aligned = { + [FM10K_PKTTYPE_OTHER] = RTE_PTYPE_L2_ETHER, + [FM10K_PKTTYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, + [FM10K_PKTTYPE_IPV4_EX] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT, + [FM10K_PKTTYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, + [FM10K_PKTTYPE_IPV6_EX] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT, + [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + }; + + m->packet_type = ptype_table[(d->w.pkt_info & FM10K_RXD_PKTTYPE_MASK) + >> FM10K_RXD_PKTTYPE_SHIFT]; + + if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK) + m->ol_flags |= PKT_RX_RSS_HASH; + + if (unlikely((d->d.staterr & + (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) == + (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE))) + m->ol_flags |= PKT_RX_IP_CKSUM_BAD; + + if (unlikely((d->d.staterr & + (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) == + (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E))) + m->ol_flags |= PKT_RX_L4_CKSUM_BAD; + + if (unlikely(d->d.staterr & FM10K_RXD_STATUS_HBO)) + m->ol_flags |= PKT_RX_HBUF_OVERFLOW; + + if (unlikely(d->d.staterr & FM10K_RXD_STATUS_RXE)) + m->ol_flags |= PKT_RX_RECIP_ERR; +} + +uint16_t +fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf *mbuf; + union fm10k_rx_desc desc; + struct fm10k_rx_queue *q = rx_queue; + uint16_t count = 0; + int alloc = 0; + uint16_t next_dd; + int ret; + + next_dd = q->next_dd; + + nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh); + for (count = 0; count < nb_pkts; ++count) { + mbuf = q->sw_ring[next_dd]; + desc = q->hw_ring[next_dd]; + if (!(desc.d.staterr & FM10K_RXD_STATUS_DD)) + break; +#ifdef RTE_LIBRTE_FM10K_DEBUG_RX + dump_rxd(&desc); +#endif + rte_pktmbuf_pkt_len(mbuf) = desc.w.length; + rte_pktmbuf_data_len(mbuf) = desc.w.length; + + mbuf->ol_flags = 0; +#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE + rx_desc_to_ol_flags(mbuf, &desc); +#endif + + mbuf->hash.rss = desc.d.rss; + /** + * Packets in fm10k device always carry at least one VLAN tag. + * For those packets coming in without VLAN tag, + * the port default VLAN tag will be used. + * So, always PKT_RX_VLAN_PKT flag is set and vlan_tci + * is valid for each RX packet's mbuf. + */ + mbuf->ol_flags |= PKT_RX_VLAN_PKT; + mbuf->vlan_tci = desc.w.vlan; + /** + * mbuf->vlan_tci_outer is an idle field in fm10k driver, + * so it can be selected to store sglort value. + */ + if (q->rx_ftag_en) + mbuf->vlan_tci_outer = rte_le_to_cpu_16(desc.w.sglort); + + rx_pkts[count] = mbuf; + if (++next_dd == q->nb_desc) { + next_dd = 0; + alloc = 1; + } + + /* Prefetch next mbuf while processing current one. */ + rte_prefetch0(q->sw_ring[next_dd]); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((next_dd & 0x3) == 0) { + rte_prefetch0(&q->hw_ring[next_dd]); + rte_prefetch0(&q->sw_ring[next_dd]); + } + } + + q->next_dd = next_dd; + + if ((q->next_dd > q->next_trigger) || (alloc == 1)) { + ret = rte_mempool_get_bulk(q->mp, + (void **)&q->sw_ring[q->next_alloc], + q->alloc_thresh); + + if (unlikely(ret != 0)) { + uint8_t port = q->port_id; + PMD_RX_LOG(ERR, "Failed to alloc mbuf"); + /* + * Need to restore next_dd if we cannot allocate new + * buffers to replenish the old ones. + */ + q->next_dd = (q->next_dd + q->nb_desc - count) % + q->nb_desc; + rte_eth_devices[port].data->rx_mbuf_alloc_failed++; + return 0; + } + + for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) { + mbuf = q->sw_ring[q->next_alloc]; + + /* setup static mbuf fields */ + fm10k_pktmbuf_reset(mbuf, q->port_id); + + /* write descriptor */ + desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf); + desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf); + q->hw_ring[q->next_alloc] = desc; + } + FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger); + q->next_trigger += q->alloc_thresh; + if (q->next_trigger >= q->nb_desc) { + q->next_trigger = q->alloc_thresh - 1; + q->next_alloc = 0; + } + } + + return count; +} + +uint16_t +fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf *mbuf; + union fm10k_rx_desc desc; + struct fm10k_rx_queue *q = rx_queue; + uint16_t count = 0; + uint16_t nb_rcv, nb_seg; + int alloc = 0; + uint16_t next_dd; + struct rte_mbuf *first_seg = q->pkt_first_seg; + struct rte_mbuf *last_seg = q->pkt_last_seg; + int ret; + + next_dd = q->next_dd; + nb_rcv = 0; + + nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh); + for (count = 0; count < nb_seg; count++) { + mbuf = q->sw_ring[next_dd]; + desc = q->hw_ring[next_dd]; + if (!(desc.d.staterr & FM10K_RXD_STATUS_DD)) + break; +#ifdef RTE_LIBRTE_FM10K_DEBUG_RX + dump_rxd(&desc); +#endif + + if (++next_dd == q->nb_desc) { + next_dd = 0; + alloc = 1; + } + + /* Prefetch next mbuf while processing current one. */ + rte_prefetch0(q->sw_ring[next_dd]); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((next_dd & 0x3) == 0) { + rte_prefetch0(&q->hw_ring[next_dd]); + rte_prefetch0(&q->sw_ring[next_dd]); + } + + /* Fill data length */ + rte_pktmbuf_data_len(mbuf) = desc.w.length; + + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet and + * initialize its context. + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (!first_seg) { + first_seg = mbuf; + first_seg->pkt_len = desc.w.length; + } else { + first_seg->pkt_len = + (uint16_t)(first_seg->pkt_len + + rte_pktmbuf_data_len(mbuf)); + first_seg->nb_segs++; + last_seg->next = mbuf; + } + + /* + * If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (!(desc.d.staterr & FM10K_RXD_STATUS_EOP)) { + last_seg = mbuf; + continue; + } + + first_seg->ol_flags = 0; +#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE + rx_desc_to_ol_flags(first_seg, &desc); +#endif + first_seg->hash.rss = desc.d.rss; + /** + * Packets in fm10k device always carry at least one VLAN tag. + * For those packets coming in without VLAN tag, + * the port default VLAN tag will be used. + * So, always PKT_RX_VLAN_PKT flag is set and vlan_tci + * is valid for each RX packet's mbuf. + */ + first_seg->ol_flags |= PKT_RX_VLAN_PKT; + first_seg->vlan_tci = desc.w.vlan; + /** + * mbuf->vlan_tci_outer is an idle field in fm10k driver, + * so it can be selected to store sglort value. + */ + if (q->rx_ftag_en) + first_seg->vlan_tci_outer = + rte_le_to_cpu_16(desc.w.sglort); + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch((char *)first_seg->buf_addr + + first_seg->data_off); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rcv++] = first_seg; + + /* + * Setup receipt context for a new packet. + */ + first_seg = NULL; + } + + q->next_dd = next_dd; + + if ((q->next_dd > q->next_trigger) || (alloc == 1)) { + ret = rte_mempool_get_bulk(q->mp, + (void **)&q->sw_ring[q->next_alloc], + q->alloc_thresh); + + if (unlikely(ret != 0)) { + uint8_t port = q->port_id; + PMD_RX_LOG(ERR, "Failed to alloc mbuf"); + /* + * Need to restore next_dd if we cannot allocate new + * buffers to replenish the old ones. + */ + q->next_dd = (q->next_dd + q->nb_desc - count) % + q->nb_desc; + rte_eth_devices[port].data->rx_mbuf_alloc_failed++; + return 0; + } + + for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) { + mbuf = q->sw_ring[q->next_alloc]; + + /* setup static mbuf fields */ + fm10k_pktmbuf_reset(mbuf, q->port_id); + + /* write descriptor */ + desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf); + desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf); + q->hw_ring[q->next_alloc] = desc; + } + FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger); + q->next_trigger += q->alloc_thresh; + if (q->next_trigger >= q->nb_desc) { + q->next_trigger = q->alloc_thresh - 1; + q->next_alloc = 0; + } + } + + q->pkt_first_seg = first_seg; + q->pkt_last_seg = last_seg; + + return nb_rcv; +} + +int +fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile union fm10k_rx_desc *rxdp; + struct fm10k_rx_queue *rxq = rx_queue; + uint16_t desc; + int ret; + + if (unlikely(offset >= rxq->nb_desc)) { + PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset); + return 0; + } + + desc = rxq->next_dd + offset; + if (desc >= rxq->nb_desc) + desc -= rxq->nb_desc; + + rxdp = &rxq->hw_ring[desc]; + + ret = !!(rxdp->w.status & + rte_cpu_to_le_16(FM10K_RXD_STATUS_DD)); + + return ret; +} + +/* + * Free multiple TX mbuf at a time if they are in the same pool + * + * @txep: software desc ring index that starts to free + * @num: number of descs to free + * + */ +static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num) +{ + struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ]; + int i; + int nb_free = 0; + + if (unlikely(num == 0)) + return; + + m = __rte_pktmbuf_prefree_seg(txep[0]); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < num; i++) { + m = __rte_pktmbuf_prefree_seg(txep[i]); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) + free[nb_free++] = m; + else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, nb_free); + free[0] = m; + nb_free = 1; + } + } + txep[i] = NULL; + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < num; i++) { + m = __rte_pktmbuf_prefree_seg(txep[i]); + if (m != NULL) + rte_mempool_put(m->pool, m); + txep[i] = NULL; + } + } +} + +static inline void tx_free_descriptors(struct fm10k_tx_queue *q) +{ + uint16_t next_rs, count = 0; + + next_rs = fifo_peek(&q->rs_tracker); + if (!(q->hw_ring[next_rs].flags & FM10K_TXD_FLAG_DONE)) + return; + + /* the DONE flag is set on this descriptor so remove the ID + * from the RS bit tracker and free the buffers */ + fifo_remove(&q->rs_tracker); + + /* wrap around? if so, free buffers from last_free up to but NOT + * including nb_desc */ + if (q->last_free > next_rs) { + count = q->nb_desc - q->last_free; + tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count); + q->last_free = 0; + } + + /* adjust free descriptor count before the next loop */ + q->nb_free += count + (next_rs + 1 - q->last_free); + + /* free buffers from last_free, up to and including next_rs */ + if (q->last_free <= next_rs) { + count = next_rs - q->last_free + 1; + tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count); + q->last_free += count; + } + + if (q->last_free == q->nb_desc) + q->last_free = 0; +} + +static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb) +{ + uint16_t last_id; + uint8_t flags, hdrlen; + + /* always set the LAST flag on the last descriptor used to + * transmit the packet */ + flags = FM10K_TXD_FLAG_LAST; + last_id = q->next_free + mb->nb_segs - 1; + if (last_id >= q->nb_desc) + last_id = last_id - q->nb_desc; + + /* but only set the RS flag on the last descriptor if rs_thresh + * descriptors will be used since the RS flag was last set */ + if ((q->nb_used + mb->nb_segs) >= q->rs_thresh) { + flags |= FM10K_TXD_FLAG_RS; + fifo_insert(&q->rs_tracker, last_id); + q->nb_used = 0; + } else { + q->nb_used = q->nb_used + mb->nb_segs; + } + + q->nb_free -= mb->nb_segs; + + q->hw_ring[q->next_free].flags = 0; + if (q->tx_ftag_en) + q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_FTAG; + /* set checksum flags on first descriptor of packet. SCTP checksum + * offload is not supported, but we do not explicitly check for this + * case in favor of greatly simplified processing. */ + if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) + q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM; + + /* set vlan if requested */ + if (mb->ol_flags & PKT_TX_VLAN_PKT) + q->hw_ring[q->next_free].vlan = mb->vlan_tci; + + q->sw_ring[q->next_free] = mb; + q->hw_ring[q->next_free].buffer_addr = + rte_cpu_to_le_64(MBUF_DMA_ADDR(mb)); + q->hw_ring[q->next_free].buflen = + rte_cpu_to_le_16(rte_pktmbuf_data_len(mb)); + + if (mb->ol_flags & PKT_TX_TCP_SEG) { + hdrlen = mb->outer_l2_len + mb->outer_l3_len + mb->l2_len + + mb->l3_len + mb->l4_len; + if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG) + hdrlen += sizeof(struct fm10k_ftag); + + if (likely((hdrlen >= FM10K_TSO_MIN_HEADERLEN) && + (hdrlen <= FM10K_TSO_MAX_HEADERLEN) && + (mb->tso_segsz >= FM10K_TSO_MINMSS))) { + q->hw_ring[q->next_free].mss = mb->tso_segsz; + q->hw_ring[q->next_free].hdrlen = hdrlen; + } + } + + if (++q->next_free == q->nb_desc) + q->next_free = 0; + + /* fill up the rings */ + for (mb = mb->next; mb != NULL; mb = mb->next) { + q->sw_ring[q->next_free] = mb; + q->hw_ring[q->next_free].buffer_addr = + rte_cpu_to_le_64(MBUF_DMA_ADDR(mb)); + q->hw_ring[q->next_free].buflen = + rte_cpu_to_le_16(rte_pktmbuf_data_len(mb)); + q->hw_ring[q->next_free].flags = 0; + if (++q->next_free == q->nb_desc) + q->next_free = 0; + } + + q->hw_ring[last_id].flags |= flags; +} + +uint16_t +fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct fm10k_tx_queue *q = tx_queue; + struct rte_mbuf *mb; + uint16_t count; + + for (count = 0; count < nb_pkts; ++count) { + mb = tx_pkts[count]; + + /* running low on descriptors? try to free some... */ + if (q->nb_free < q->free_thresh) + tx_free_descriptors(q); + + /* make sure there are enough free descriptors to transmit the + * entire packet before doing anything */ + if (q->nb_free < mb->nb_segs) + break; + + /* sanity check to make sure the mbuf is valid */ + if ((mb->nb_segs == 0) || + ((mb->nb_segs > 1) && (mb->next == NULL))) + break; + + /* process the packet */ + tx_xmit_pkt(q, mb); + } + + /* update the tail pointer if any packets were processed */ + if (likely(count > 0)) + FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_free); + + return count; +} diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c new file mode 100644 index 00000000..f8efe8f5 --- /dev/null +++ b/drivers/net/fm10k/fm10k_rxtx_vec.c @@ -0,0 +1,875 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <inttypes.h> + +#include <rte_ethdev.h> +#include <rte_common.h> +#include "fm10k.h" +#include "base/fm10k_type.h" + +#include <tmmintrin.h> + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +static void +fm10k_reset_tx_queue(struct fm10k_tx_queue *txq); + +/* Handling the offload flags (olflags) field takes computation + * time when receiving packets. Therefore we provide a flag to disable + * the processing of the olflags field when they are not needed. This + * gives improved performance, at the cost of losing the offload info + * in the received packet + */ +#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE + +/* Vlan present flag shift */ +#define VP_SHIFT (2) +/* L3 type shift */ +#define L3TYPE_SHIFT (4) +/* L4 type shift */ +#define L4TYPE_SHIFT (7) +/* HBO flag shift */ +#define HBOFLAG_SHIFT (10) +/* RXE flag shift */ +#define RXEFLAG_SHIFT (13) +/* IPE/L4E flag shift */ +#define L3L4EFLAG_SHIFT (14) + +static inline void +fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) +{ + __m128i ptype0, ptype1, vtag0, vtag1, eflag0, eflag1, cksumflag; + union { + uint16_t e[4]; + uint64_t dword; + } vol; + + const __m128i pkttype_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, + PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT); + + /* mask everything except rss type */ + const __m128i rsstype_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + 0x000F, 0x000F, 0x000F, 0x000F); + + /* mask for HBO and RXE flag flags */ + const __m128i rxe_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + 0x0001, 0x0001, 0x0001, 0x0001); + + const __m128i l3l4cksum_flag = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, + PKT_RX_IP_CKSUM_BAD, PKT_RX_L4_CKSUM_BAD, 0); + + const __m128i rxe_flag = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, PKT_RX_RECIP_ERR, 0); + + /* map rss type to rss hash flag */ + const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0, + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0); + + /* Calculate RSS_hash and Vlan fields */ + ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]); + ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]); + vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]); + vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]); + + ptype0 = _mm_unpacklo_epi32(ptype0, ptype1); + ptype0 = _mm_and_si128(ptype0, rsstype_msk); + ptype0 = _mm_shuffle_epi8(rss_flags, ptype0); + + vtag1 = _mm_unpacklo_epi32(vtag0, vtag1); + eflag0 = vtag1; + cksumflag = vtag1; + vtag1 = _mm_srli_epi16(vtag1, VP_SHIFT); + vtag1 = _mm_and_si128(vtag1, pkttype_msk); + + vtag1 = _mm_or_si128(ptype0, vtag1); + + /* Process err flags, simply set RECIP_ERR bit if HBO/IXE is set */ + eflag1 = _mm_srli_epi16(eflag0, RXEFLAG_SHIFT); + eflag0 = _mm_srli_epi16(eflag0, HBOFLAG_SHIFT); + eflag0 = _mm_or_si128(eflag0, eflag1); + eflag0 = _mm_and_si128(eflag0, rxe_msk); + eflag0 = _mm_shuffle_epi8(rxe_flag, eflag0); + + vtag1 = _mm_or_si128(eflag0, vtag1); + + /* Process L4/L3 checksum error flags */ + cksumflag = _mm_srli_epi16(cksumflag, L3L4EFLAG_SHIFT); + cksumflag = _mm_shuffle_epi8(l3l4cksum_flag, cksumflag); + vtag1 = _mm_or_si128(cksumflag, vtag1); + + vol.dword = _mm_cvtsi128_si64(vtag1); + + rx_pkts[0]->ol_flags = vol.e[0]; + rx_pkts[1]->ol_flags = vol.e[1]; + rx_pkts[2]->ol_flags = vol.e[2]; + rx_pkts[3]->ol_flags = vol.e[3]; +} + +/* @note: When this function is changed, make corresponding change to + * fm10k_dev_supported_ptypes_get(). + */ +static inline void +fm10k_desc_to_pktype_v(__m128i descs[4], struct rte_mbuf **rx_pkts) +{ + __m128i l3l4type0, l3l4type1, l3type, l4type; + union { + uint16_t e[4]; + uint64_t dword; + } vol; + + /* L3 pkt type mask Bit4 to Bit6 */ + const __m128i l3type_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + 0x0070, 0x0070, 0x0070, 0x0070); + + /* L4 pkt type mask Bit7 to Bit9 */ + const __m128i l4type_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + 0x0380, 0x0380, 0x0380, 0x0380); + + /* convert RRC l3 type to mbuf format */ + const __m128i l3type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L3_IPV6, RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV4, 0); + + /* Convert RRC l4 type to mbuf format l4type_flags shift-left 8 bits + * to fill into8 bits length. + */ + const __m128i l4type_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, + RTE_PTYPE_TUNNEL_GENEVE >> 8, + RTE_PTYPE_TUNNEL_NVGRE >> 8, + RTE_PTYPE_TUNNEL_VXLAN >> 8, + RTE_PTYPE_TUNNEL_GRE >> 8, + RTE_PTYPE_L4_UDP >> 8, + RTE_PTYPE_L4_TCP >> 8, + 0); + + l3l4type0 = _mm_unpacklo_epi16(descs[0], descs[1]); + l3l4type1 = _mm_unpacklo_epi16(descs[2], descs[3]); + l3l4type0 = _mm_unpacklo_epi32(l3l4type0, l3l4type1); + + l3type = _mm_and_si128(l3l4type0, l3type_msk); + l4type = _mm_and_si128(l3l4type0, l4type_msk); + + l3type = _mm_srli_epi16(l3type, L3TYPE_SHIFT); + l4type = _mm_srli_epi16(l4type, L4TYPE_SHIFT); + + l3type = _mm_shuffle_epi8(l3type_flags, l3type); + /* l4type_flags shift-left for 8 bits, need shift-right back */ + l4type = _mm_shuffle_epi8(l4type_flags, l4type); + + l4type = _mm_slli_epi16(l4type, 8); + l3l4type0 = _mm_or_si128(l3type, l4type); + vol.dword = _mm_cvtsi128_si64(l3l4type0); + + rx_pkts[0]->packet_type = vol.e[0]; + rx_pkts[1]->packet_type = vol.e[1]; + rx_pkts[2]->packet_type = vol.e[2]; + rx_pkts[3]->packet_type = vol.e[3]; +} +#else +#define fm10k_desc_to_olflags_v(desc, rx_pkts) do {} while (0) +#define fm10k_desc_to_pktype_v(desc, rx_pkts) do {} while (0) +#endif + +int __attribute__((cold)) +fm10k_rx_vec_condition_check(struct rte_eth_dev *dev) +{ +#ifndef RTE_LIBRTE_IEEE1588 + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; + +#ifndef RTE_FM10K_RX_OLFLAGS_ENABLE + /* whithout rx ol_flags, no VP flag report */ + if (rxmode->hw_vlan_extend != 0) + return -1; +#endif + + /* no fdir support */ + if (fconf->mode != RTE_FDIR_MODE_NONE) + return -1; + + /* - no csum error report support + * - no header split support + */ + if (rxmode->hw_ip_checksum == 1 || + rxmode->header_split == 1) + return -1; + + return 0; +#else + RTE_SET_USED(dev); + return -1; +#endif +} + +int __attribute__((cold)) +fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq) +{ + uintptr_t p; + struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ + + mb_def.nb_segs = 1; + /* data_off will be ajusted after new mbuf allocated for 512-byte + * alignment. + */ + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = rxq->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; + return 0; +} + +static inline void +fm10k_rxq_rearm(struct fm10k_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union fm10k_rx_desc *rxdp; + struct rte_mbuf **mb_alloc = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + __m128i head_off = _mm_set_epi64x( + RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1, + RTE_PKTMBUF_HEADROOM + FM10K_RX_DATABUF_ALIGN - 1); + __m128i dma_addr0, dma_addr1; + /* Rx buffer need to be aligned with 512 byte */ + const __m128i hba_msk = _mm_set_epi64x(0, + UINT64_MAX - FM10K_RX_DATABUF_ALIGN + 1); + + rxdp = rxq->hw_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (rte_mempool_get_bulk(rxq->mp, + (void *)mb_alloc, + RTE_FM10K_RXQ_REARM_THRESH) < 0) { + dma_addr0 = _mm_setzero_si128(); + /* Clean up all the HW/SW ring content */ + for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i++) { + mb_alloc[i] = &rxq->fake_mbuf; + _mm_store_si128((__m128i *)&rxdp[i].q, + dma_addr0); + } + + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + RTE_FM10K_RXQ_REARM_THRESH; + return; + } + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < RTE_FM10K_RXQ_REARM_THRESH; i += 2, mb_alloc += 2) { + __m128i vaddr0, vaddr1; + uintptr_t p0, p1; + + mb0 = mb_alloc[0]; + mb1 = mb_alloc[1]; + + /* Flush mbuf with pkt template. + * Data to be rearmed is 6 bytes long. + * Though, RX will overwrite ol_flags that are coming next + * anyway. So overwrite whole 8 bytes with one load: + * 6 bytes of rearm_data plus first 2 bytes of ol_flags. + */ + p0 = (uintptr_t)&mb0->rearm_data; + *(uint64_t *)p0 = rxq->mbuf_initializer; + p1 = (uintptr_t)&mb1->rearm_data; + *(uint64_t *)p1 = rxq->mbuf_initializer; + + /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + + /* convert pa to dma_addr hdr/data */ + dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); + dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); + + /* add headroom to pa values */ + dma_addr0 = _mm_add_epi64(dma_addr0, head_off); + dma_addr1 = _mm_add_epi64(dma_addr1, head_off); + + /* Do 512 byte alignment to satisfy HW requirement, in the + * meanwhile, set Header Buffer Address to zero. + */ + dma_addr0 = _mm_and_si128(dma_addr0, hba_msk); + dma_addr1 = _mm_and_si128(dma_addr1, hba_msk); + + /* flush desc with pa dma_addr */ + _mm_store_si128((__m128i *)&rxdp++->q, dma_addr0); + _mm_store_si128((__m128i *)&rxdp++->q, dma_addr1); + + /* enforce 512B alignment on default Rx virtual addresses */ + mb0->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb0->buf_addr + + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN) + - (char *)mb0->buf_addr); + mb1->data_off = (uint16_t)(RTE_PTR_ALIGN((char *)mb1->buf_addr + + RTE_PKTMBUF_HEADROOM, FM10K_RX_DATABUF_ALIGN) + - (char *)mb1->buf_addr); + } + + rxq->rxrearm_start += RTE_FM10K_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= RTE_FM10K_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + FM10K_PCI_REG_WRITE(rxq->tail_ptr, rx_id); +} + +void __attribute__((cold)) +fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq) +{ + const unsigned mask = rxq->nb_desc - 1; + unsigned i; + + if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_desc) + return; + + /* free all mbufs that are valid in the ring */ + for (i = rxq->next_dd; i != rxq->rxrearm_start; i = (i + 1) & mask) + rte_pktmbuf_free_seg(rxq->sw_ring[i]); + rxq->rxrearm_nb = rxq->nb_desc; + + /* set all entries to NULL */ + memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_desc); +} + +static inline uint16_t +fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union fm10k_rx_desc *rxdp; + struct rte_mbuf **mbufp; + uint16_t nb_pkts_recd; + int pos; + struct fm10k_rx_queue *rxq = rx_queue; + uint64_t var; + __m128i shuf_msk; + __m128i dd_check, eop_check; + uint16_t next_dd; + + next_dd = rxq->next_dd; + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles + */ + rxdp = rxq->hw_ring + next_dd; + + _mm_prefetch((const void *)rxdp, _MM_HINT_T0); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > RTE_FM10K_RXQ_REARM_THRESH) + fm10k_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->d.staterr & FM10K_RXD_STATUS_DD)) + return 0; + + /* Vecotr RX will process 4 packets at a time, strip the unaligned + * tails in case it's not multiple of 4. + */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_FM10K_DESCS_PER_LOOP); + + /* 4 packets DD mask */ + dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL); + + /* 4 packets EOP mask */ + eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL); + + /* mask to shuffle from desc. to mbuf */ + shuf_msk = _mm_set_epi8( + 7, 6, 5, 4, /* octet 4~7, 32bits rss */ + 15, 14, /* octet 14~15, low 16 bits vlan_macip */ + 13, 12, /* octet 12~13, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 13, 12, /* octet 12~13, low 16 bits pkt_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_type */ + 0xFF, 0xFF /* Skip pkt_type field in shuffle operation */ + ); + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache + */ + mbufp = &rxq->sw_ring[next_dd]; + + /* A. load 4 packet in one loop + * [A*. mask out 4 unused dirty field in desc] + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; + pos += RTE_FM10K_DESCS_PER_LOOP, + rxdp += RTE_FM10K_DESCS_PER_LOOP) { + __m128i descs0[RTE_FM10K_DESCS_PER_LOOP]; + __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + __m128i zero, staterr, sterr_tmp1, sterr_tmp2; + __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */ + + /* B.1 load 1 mbuf point */ + mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]); + + /* Read desc statuses backwards to avoid race condition */ + /* A.1 load 4 pkts desc */ + descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); + + /* B.2 copy 2 mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); + + /* B.1 load 1 mbuf point */ + mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]); + + descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); + /* B.1 load 2 mbuf point */ + descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); + descs0[0] = _mm_loadu_si128((__m128i *)(rxdp)); + + /* B.2 copy 2 mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); + + /* avoid compiler reorder optimization */ + rte_compiler_barrier(); + + if (split_packet) { + rte_prefetch0(&rx_pkts[pos]->cacheline1); + rte_prefetch0(&rx_pkts[pos + 1]->cacheline1); + rte_prefetch0(&rx_pkts[pos + 2]->cacheline1); + rte_prefetch0(&rx_pkts[pos + 3]->cacheline1); + } + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb4 = _mm_shuffle_epi8(descs0[3], shuf_msk); + pkt_mb3 = _mm_shuffle_epi8(descs0[2], shuf_msk); + + /* C.1 4=>2 filter staterr info only */ + sterr_tmp2 = _mm_unpackhi_epi32(descs0[3], descs0[2]); + /* C.1 4=>2 filter staterr info only */ + sterr_tmp1 = _mm_unpackhi_epi32(descs0[1], descs0[0]); + + /* set ol_flags with vlan packet type */ + fm10k_desc_to_olflags_v(descs0, &rx_pkts[pos]); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = _mm_shuffle_epi8(descs0[1], shuf_msk); + pkt_mb1 = _mm_shuffle_epi8(descs0[0], shuf_msk); + + /* C.2 get 4 pkts staterr value */ + zero = _mm_xor_si128(dd_check, dd_check); + staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); + + /* D.3 copy final 3,4 data to rx_pkts */ + _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1, + pkt_mb4); + _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1, + pkt_mb3); + + /* C* extract and record EOP bit */ + if (split_packet) { + __m128i eop_shuf_mask = _mm_set_epi8( + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0x04, 0x0C, 0x00, 0x08 + ); + + /* and with mask to extract bits, flipping 1-0 */ + __m128i eop_bits = _mm_andnot_si128(staterr, eop_check); + /* the staterr values are not in order, as the count + * count of dd bits doesn't care. However, for end of + * packet tracking, we do care, so shuffle. This also + * compresses the 32-bit values to 8-bit + */ + eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask); + /* store the resulting 32-bit value */ + *(int *)split_packet = _mm_cvtsi128_si32(eop_bits); + split_packet += RTE_FM10K_DESCS_PER_LOOP; + + /* zero-out next pointers */ + rx_pkts[pos]->next = NULL; + rx_pkts[pos + 1]->next = NULL; + rx_pkts[pos + 2]->next = NULL; + rx_pkts[pos + 3]->next = NULL; + } + + /* C.3 calc available number of desc */ + staterr = _mm_and_si128(staterr, dd_check); + staterr = _mm_packs_epi32(staterr, zero); + + /* D.3 copy final 1,2 data to rx_pkts */ + _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1, + pkt_mb2); + _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); + + fm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]); + + /* C.4 calc avaialbe number of desc */ + var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + nb_pkts_recd += var; + if (likely(var != RTE_FM10K_DESCS_PER_LOOP)) + break; + } + + /* Update our internal tail pointer */ + rxq->next_dd = (uint16_t)(rxq->next_dd + nb_pkts_recd); + rxq->next_dd = (uint16_t)(rxq->next_dd & (rxq->nb_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + +/* vPMD receive routine + * + * Notice: + * - don't support ol_flags for rss and csum err + */ +uint16_t +fm10k_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return fm10k_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); +} + +static inline uint16_t +fm10k_reassemble_packets(struct fm10k_rx_queue *rxq, + struct rte_mbuf **rx_bufs, + uint16_t nb_bufs, uint8_t *split_flags) +{ + struct rte_mbuf *pkts[RTE_FM10K_MAX_RX_BURST]; /*finished pkts*/ + struct rte_mbuf *start = rxq->pkt_first_seg; + struct rte_mbuf *end = rxq->pkt_last_seg; + unsigned pkt_idx, buf_idx; + + for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { + if (end != NULL) { + /* processing a split packet */ + end->next = rx_bufs[buf_idx]; + start->nb_segs++; + start->pkt_len += rx_bufs[buf_idx]->data_len; + end = end->next; + + if (!split_flags[buf_idx]) { + /* it's the last packet of the set */ + start->hash = end->hash; + start->ol_flags = end->ol_flags; + pkts[pkt_idx++] = start; + start = end = NULL; + } + } else { + /* not processing a split packet */ + if (!split_flags[buf_idx]) { + /* not a split packet, save and skip */ + pkts[pkt_idx++] = rx_bufs[buf_idx]; + continue; + } + end = start = rx_bufs[buf_idx]; + } + } + + /* save the partial packet for next time */ + rxq->pkt_first_seg = start; + rxq->pkt_last_seg = end; + memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); + return pkt_idx; +} + +/* + * vPMD receive routine that reassembles scattered packets + * + * Notice: + * - don't support ol_flags for rss and csum err + * - nb_pkts > RTE_FM10K_MAX_RX_BURST, only scan RTE_FM10K_MAX_RX_BURST + * numbers of DD bit + */ +uint16_t +fm10k_recv_scattered_pkts_vec(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct fm10k_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_FM10K_MAX_RX_BURST] = {0}; + unsigned i = 0; + + /* Split_flags only can support max of RTE_FM10K_MAX_RX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, RTE_FM10K_MAX_RX_BURST); + /* get some new buffers */ + uint16_t nb_bufs = fm10k_recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (rxq->pkt_first_seg == NULL && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + if (rxq->pkt_first_seg == NULL) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + } + return i + fm10k_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +static const struct fm10k_txq_ops vec_txq_ops = { + .reset = fm10k_reset_tx_queue, +}; + +void __attribute__((cold)) +fm10k_txq_vec_setup(struct fm10k_tx_queue *txq) +{ + txq->ops = &vec_txq_ops; +} + +int __attribute__((cold)) +fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq) +{ + /* Vector TX can't offload any features yet */ + if ((txq->txq_flags & FM10K_SIMPLE_TX_FLAG) != FM10K_SIMPLE_TX_FLAG) + return -1; + + if (txq->tx_ftag_en) + return -1; + + return 0; +} + +static inline void +vtx1(volatile struct fm10k_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + __m128i descriptor = _mm_set_epi64x(flags << 56 | + pkt->vlan_tci << 16 | pkt->data_len, + MBUF_DMA_ADDR(pkt)); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +vtx(volatile struct fm10k_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + int i; + + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + vtx1(txdp, *pkt, flags); +} + +static inline int __attribute__((always_inline)) +fm10k_tx_free_bufs(struct fm10k_tx_queue *txq) +{ + struct rte_mbuf **txep; + uint8_t flags; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ]; + + /* check DD bit on threshold descriptor */ + flags = txq->hw_ring[txq->next_dd].flags; + if (!(flags & FM10K_TXD_FLAG_DONE)) + return 0; + + n = txq->rs_thresh; + + /* First buffer to free from S/W ring is at index + * next_dd - (rs_thresh-1) + */ + txep = &txq->sw_ring[txq->next_dd - (n - 1)]; + m = __rte_pktmbuf_prefree_seg(txep[0]); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = __rte_pktmbuf_prefree_seg(txep[i]); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) + free[nb_free++] = m; + else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = __rte_pktmbuf_prefree_seg(txep[i]); + if (m != NULL) + rte_mempool_put(m->pool, m); + } + } + + /* buffers were freed, update counters */ + txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh); + txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh); + if (txq->next_dd >= txq->nb_desc) + txq->next_dd = (uint16_t)(txq->rs_thresh - 1); + + return txq->rs_thresh; +} + +static inline void __attribute__((always_inline)) +tx_backlog_entry(struct rte_mbuf **txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i] = tx_pkts[i]; +} + +uint16_t +fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue; + volatile struct fm10k_tx_desc *txdp; + struct rte_mbuf **txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = FM10K_TXD_FLAG_LAST; + uint64_t rs = FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_LAST; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh); + + if (txq->nb_free < txq->free_thresh) + fm10k_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->next_free; + txdp = &txq->hw_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts); + + n = (uint16_t)(txq->nb_desc - tx_id); + if (nb_commit >= n) { + tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + vtx1(txdp, *tx_pkts, flags); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->next_rs = (uint16_t)(txq->rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &(txq->hw_ring[tx_id]); + txep = &txq->sw_ring[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->next_rs) { + txq->hw_ring[txq->next_rs].flags |= FM10K_TXD_FLAG_RS; + txq->next_rs = (uint16_t)(txq->next_rs + txq->rs_thresh); + } + + txq->next_free = tx_id; + + FM10K_PCI_REG_WRITE(txq->tail_ptr, txq->next_free); + + return nb_pkts; +} + +static void __attribute__((cold)) +fm10k_reset_tx_queue(struct fm10k_tx_queue *txq) +{ + static const struct fm10k_tx_desc zeroed_desc = {0}; + struct rte_mbuf **txe = txq->sw_ring; + uint16_t i; + + /* Zero out HW ring memory */ + for (i = 0; i < txq->nb_desc; i++) + txq->hw_ring[i] = zeroed_desc; + + /* Initialize SW ring entries */ + for (i = 0; i < txq->nb_desc; i++) + txe[i] = NULL; + + txq->next_dd = (uint16_t)(txq->rs_thresh - 1); + txq->next_rs = (uint16_t)(txq->rs_thresh - 1); + + txq->next_free = 0; + txq->nb_used = 0; + /* Always allow 1 descriptor to be un-allocated to avoid + * a H/W race condition + */ + txq->nb_free = (uint16_t)(txq->nb_desc - 1); + FM10K_PCI_REG_WRITE(txq->tail_ptr, 0); +} diff --git a/drivers/net/fm10k/rte_pmd_fm10k_version.map b/drivers/net/fm10k/rte_pmd_fm10k_version.map new file mode 100644 index 00000000..ef353984 --- /dev/null +++ b/drivers/net/fm10k/rte_pmd_fm10k_version.map @@ -0,0 +1,4 @@ +DPDK_2.0 { + + local: *; +}; diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile new file mode 100644 index 00000000..6dd6eaab --- /dev/null +++ b/drivers/net/i40e/Makefile @@ -0,0 +1,110 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2015 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_i40e.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) -DPF_DRIVER -DVF_DRIVER -DINTEGRATED_VF +CFLAGS += -DX722_SUPPORT -DX722_A0_SUPPORT + +EXPORT_MAP := rte_pmd_i40e_version.map + +LIBABIVER := 1 + +# +# Add extra flags for base driver files (also known as shared code) +# to disable warnings +# +ifeq ($(CC), icc) +CFLAGS_BASE_DRIVER = -wd593 -wd188 +else ifeq ($(CC), clang) +CFLAGS_BASE_DRIVER += -Wno-sign-compare +CFLAGS_BASE_DRIVER += -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-unused-parameter +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing +CFLAGS_BASE_DRIVER += -Wno-format +CFLAGS_BASE_DRIVER += -Wno-missing-field-initializers +CFLAGS_BASE_DRIVER += -Wno-pointer-to-int-cast +CFLAGS_BASE_DRIVER += -Wno-format-nonliteral +CFLAGS_BASE_DRIVER += -Wno-unused-variable +else +CFLAGS_BASE_DRIVER = -Wno-sign-compare +CFLAGS_BASE_DRIVER += -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-unused-parameter +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing +CFLAGS_BASE_DRIVER += -Wno-format +CFLAGS_BASE_DRIVER += -Wno-missing-field-initializers +CFLAGS_BASE_DRIVER += -Wno-pointer-to-int-cast +CFLAGS_BASE_DRIVER += -Wno-format-nonliteral +CFLAGS_BASE_DRIVER += -Wno-format-security +CFLAGS_BASE_DRIVER += -Wno-unused-variable + +ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1) +CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable +endif + +CFLAGS_i40e_lan_hmc.o += -Wno-error +endif +OBJS_BASE_DRIVER=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))) +$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +VPATH += $(SRCDIR)/base + +# +# all source are stored in SRCS-y +# base driver is based on the package of dpdk-i40e.2016.01.07.14.tar.gz. +# +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_adminq.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_common.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_diag.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_hmc.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_lan_hmc.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_nvm.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_dcb.c + +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c new file mode 100644 index 00000000..222add40 --- /dev/null +++ b/drivers/net/i40e/base/i40e_adminq.c @@ -0,0 +1,1166 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "i40e_status.h" +#include "i40e_type.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_prototype.h" + +#ifdef PF_DRIVER +/** + * i40e_is_nvm_update_op - return true if this is an NVM update operation + * @desc: API request descriptor + **/ +STATIC INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) +{ + return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) || + desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update)); +} + +#endif /* PF_DRIVER */ +/** + * i40e_adminq_init_regs - Initialize AdminQ registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_asq and alloc_arq functions have already been called + **/ +STATIC void i40e_adminq_init_regs(struct i40e_hw *hw) +{ + /* set head and tail registers in our local struct */ + if (i40e_is_vf(hw)) { + hw->aq.asq.tail = I40E_VF_ATQT1; + hw->aq.asq.head = I40E_VF_ATQH1; + hw->aq.asq.len = I40E_VF_ATQLEN1; + hw->aq.asq.bal = I40E_VF_ATQBAL1; + hw->aq.asq.bah = I40E_VF_ATQBAH1; + hw->aq.arq.tail = I40E_VF_ARQT1; + hw->aq.arq.head = I40E_VF_ARQH1; + hw->aq.arq.len = I40E_VF_ARQLEN1; + hw->aq.arq.bal = I40E_VF_ARQBAL1; + hw->aq.arq.bah = I40E_VF_ARQBAH1; +#ifdef PF_DRIVER + } else { + hw->aq.asq.tail = I40E_PF_ATQT; + hw->aq.asq.head = I40E_PF_ATQH; + hw->aq.asq.len = I40E_PF_ATQLEN; + hw->aq.asq.bal = I40E_PF_ATQBAL; + hw->aq.asq.bah = I40E_PF_ATQBAH; + hw->aq.arq.tail = I40E_PF_ARQT; + hw->aq.arq.head = I40E_PF_ARQH; + hw->aq.arq.len = I40E_PF_ARQLEN; + hw->aq.arq.bal = I40E_PF_ARQBAL; + hw->aq.arq.bah = I40E_PF_ARQBAH; +#endif + } +} + +/** + * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings + * @hw: pointer to the hardware structure + **/ +enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code; + + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, + i40e_mem_atq_ring, + (hw->aq.num_asq_entries * + sizeof(struct i40e_aq_desc)), + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + return ret_code; + + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, + (hw->aq.num_asq_entries * + sizeof(struct i40e_asq_cmd_details))); + if (ret_code) { + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + return ret_code; + } + + return ret_code; +} + +/** + * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings + * @hw: pointer to the hardware structure + **/ +enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code; + + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, + i40e_mem_arq_ring, + (hw->aq.num_arq_entries * + sizeof(struct i40e_aq_desc)), + I40E_ADMINQ_DESC_ALIGNMENT); + + return ret_code; +} + +/** + * i40e_free_adminq_asq - Free Admin Queue send rings + * @hw: pointer to the hardware structure + * + * This assumes the posted send buffers have already been cleaned + * and de-allocated + **/ +void i40e_free_adminq_asq(struct i40e_hw *hw) +{ + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); +} + +/** + * i40e_free_adminq_arq - Free Admin Queue receive rings + * @hw: pointer to the hardware structure + * + * This assumes the posted receive buffers have already been cleaned + * and de-allocated + **/ +void i40e_free_adminq_arq(struct i40e_hw *hw) +{ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); +} + +/** + * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue + * @hw: pointer to the hardware structure + **/ +STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code; + struct i40e_aq_desc *desc; + struct i40e_dma_mem *bi; + int i; + + /* We'll be allocating the buffer info memory first, then we can + * allocate the mapped buffers for the event processing + */ + + /* buffer_info structures do not need alignment */ + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, + (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); + if (ret_code) + goto alloc_arq_bufs; + hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; + + /* allocate the mapped buffers */ + for (i = 0; i < hw->aq.num_arq_entries; i++) { + bi = &hw->aq.arq.r.arq_bi[i]; + ret_code = i40e_allocate_dma_mem(hw, bi, + i40e_mem_arq_buf, + hw->aq.arq_buf_size, + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + goto unwind_alloc_arq_bufs; + + /* now configure the descriptors for use */ + desc = I40E_ADMINQ_DESC(hw->aq.arq, i); + + desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); + desc->opcode = 0; + /* This is in accordance with Admin queue design, there is no + * register for buffer size configuration + */ + desc->datalen = CPU_TO_LE16((u16)bi->size); + desc->retval = 0; + desc->cookie_high = 0; + desc->cookie_low = 0; + desc->params.external.addr_high = + CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); + desc->params.external.addr_low = + CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); + desc->params.external.param0 = 0; + desc->params.external.param1 = 0; + } + +alloc_arq_bufs: + return ret_code; + +unwind_alloc_arq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); + + return ret_code; +} + +/** + * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue + * @hw: pointer to the hardware structure + **/ +STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code; + struct i40e_dma_mem *bi; + int i; + + /* No mapped memory needed yet, just the buffer info structures */ + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, + (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); + if (ret_code) + goto alloc_asq_bufs; + hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; + + /* allocate the mapped buffers */ + for (i = 0; i < hw->aq.num_asq_entries; i++) { + bi = &hw->aq.asq.r.asq_bi[i]; + ret_code = i40e_allocate_dma_mem(hw, bi, + i40e_mem_asq_buf, + hw->aq.asq_buf_size, + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + goto unwind_alloc_asq_bufs; + } +alloc_asq_bufs: + return ret_code; + +unwind_alloc_asq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); + + return ret_code; +} + +/** + * i40e_free_arq_bufs - Free receive queue buffer info elements + * @hw: pointer to the hardware structure + **/ +STATIC void i40e_free_arq_bufs(struct i40e_hw *hw) +{ + int i; + + /* free descriptors */ + for (i = 0; i < hw->aq.num_arq_entries; i++) + i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); +} + +/** + * i40e_free_asq_bufs - Free send queue buffer info elements + * @hw: pointer to the hardware structure + **/ +STATIC void i40e_free_asq_bufs(struct i40e_hw *hw) +{ + int i; + + /* only unmap if the address is non-NULL */ + for (i = 0; i < hw->aq.num_asq_entries; i++) + if (hw->aq.asq.r.asq_bi[i].pa) + i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + + /* free the buffer info list */ + i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); +} + +/** + * i40e_config_asq_regs - configure ASQ registers + * @hw: pointer to the hardware structure + * + * Configure base address and length registers for the transmit queue + **/ +STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u32 reg = 0; + + /* Clear Head and Tail */ + wr32(hw, hw->aq.asq.head, 0); + wr32(hw, hw->aq.asq.tail, 0); + + /* set starting point */ +#ifdef PF_DRIVER +#ifdef INTEGRATED_VF + if (!i40e_is_vf(hw)) + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_PF_ATQLEN_ATQENABLE_MASK)); +#else + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_PF_ATQLEN_ATQENABLE_MASK)); +#endif /* INTEGRATED_VF */ +#endif /* PF_DRIVER */ +#ifdef VF_DRIVER +#ifdef INTEGRATED_VF + if (i40e_is_vf(hw)) + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_VF_ATQLEN1_ATQENABLE_MASK)); +#else + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_VF_ATQLEN1_ATQENABLE_MASK)); +#endif /* INTEGRATED_VF */ +#endif /* VF_DRIVER */ + wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)); + wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa)); + + /* Check one register to verify that config was applied */ + reg = rd32(hw, hw->aq.asq.bal); + if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)) + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + + return ret_code; +} + +/** + * i40e_config_arq_regs - ARQ register configuration + * @hw: pointer to the hardware structure + * + * Configure base address and length registers for the receive (event queue) + **/ +STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u32 reg = 0; + + /* Clear Head and Tail */ + wr32(hw, hw->aq.arq.head, 0); + wr32(hw, hw->aq.arq.tail, 0); + + /* set starting point */ +#ifdef PF_DRIVER +#ifdef INTEGRATED_VF + if (!i40e_is_vf(hw)) + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_PF_ARQLEN_ARQENABLE_MASK)); +#else + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_PF_ARQLEN_ARQENABLE_MASK)); +#endif /* INTEGRATED_VF */ +#endif /* PF_DRIVER */ +#ifdef VF_DRIVER +#ifdef INTEGRATED_VF + if (i40e_is_vf(hw)) + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_VF_ARQLEN1_ARQENABLE_MASK)); +#else + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_VF_ARQLEN1_ARQENABLE_MASK)); +#endif /* INTEGRATED_VF */ +#endif /* VF_DRIVER */ + wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)); + wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa)); + + /* Update tail in the HW to post pre-allocated buffers */ + wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); + + /* Check one register to verify that config was applied */ + reg = rd32(hw, hw->aq.arq.bal); + if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)) + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + + return ret_code; +} + +/** + * i40e_init_asq - main initialization routine for ASQ + * @hw: pointer to the hardware structure + * + * This is the main initialization routine for the Admin Send Queue + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.arq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + **/ +enum i40e_status_code i40e_init_asq(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + if (hw->aq.asq.count > 0) { + /* queue already initialized */ + ret_code = I40E_ERR_NOT_READY; + goto init_adminq_exit; + } + + /* verify input for valid configuration */ + if ((hw->aq.num_asq_entries == 0) || + (hw->aq.asq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + + /* allocate the ring memory */ + ret_code = i40e_alloc_adminq_asq_ring(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_exit; + + /* allocate buffers in the rings */ + ret_code = i40e_alloc_asq_bufs(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_free_rings; + + /* initialize base registers */ + ret_code = i40e_config_asq_regs(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_free_rings; + + /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; + goto init_adminq_exit; + +init_adminq_free_rings: + i40e_free_adminq_asq(hw); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_init_arq - initialize ARQ + * @hw: pointer to the hardware structure + * + * The main initialization routine for the Admin Receive (Event) Queue. + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.arq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + **/ +enum i40e_status_code i40e_init_arq(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + if (hw->aq.arq.count > 0) { + /* queue already initialized */ + ret_code = I40E_ERR_NOT_READY; + goto init_adminq_exit; + } + + /* verify input for valid configuration */ + if ((hw->aq.num_arq_entries == 0) || + (hw->aq.arq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + + /* allocate the ring memory */ + ret_code = i40e_alloc_adminq_arq_ring(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_exit; + + /* allocate buffers in the rings */ + ret_code = i40e_alloc_arq_bufs(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_free_rings; + + /* initialize base registers */ + ret_code = i40e_config_arq_regs(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_free_rings; + + /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; + goto init_adminq_exit; + +init_adminq_free_rings: + i40e_free_adminq_arq(hw); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_shutdown_asq - shutdown the ASQ + * @hw: pointer to the hardware structure + * + * The main shutdown routine for the Admin Send Queue + **/ +enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + i40e_acquire_spinlock(&hw->aq.asq_spinlock); + + if (hw->aq.asq.count == 0) { + ret_code = I40E_ERR_NOT_READY; + goto shutdown_asq_out; + } + + /* Stop firmware AdminQ processing */ + wr32(hw, hw->aq.asq.head, 0); + wr32(hw, hw->aq.asq.tail, 0); + wr32(hw, hw->aq.asq.len, 0); + wr32(hw, hw->aq.asq.bal, 0); + wr32(hw, hw->aq.asq.bah, 0); + + hw->aq.asq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers */ + i40e_free_asq_bufs(hw); + +shutdown_asq_out: + i40e_release_spinlock(&hw->aq.asq_spinlock); + return ret_code; +} + +/** + * i40e_shutdown_arq - shutdown ARQ + * @hw: pointer to the hardware structure + * + * The main shutdown routine for the Admin Receive Queue + **/ +enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + i40e_acquire_spinlock(&hw->aq.arq_spinlock); + + if (hw->aq.arq.count == 0) { + ret_code = I40E_ERR_NOT_READY; + goto shutdown_arq_out; + } + + /* Stop firmware AdminQ processing */ + wr32(hw, hw->aq.arq.head, 0); + wr32(hw, hw->aq.arq.tail, 0); + wr32(hw, hw->aq.arq.len, 0); + wr32(hw, hw->aq.arq.bal, 0); + wr32(hw, hw->aq.arq.bah, 0); + + hw->aq.arq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers */ + i40e_free_arq_bufs(hw); + +shutdown_arq_out: + i40e_release_spinlock(&hw->aq.arq_spinlock); + return ret_code; +} + +/** + * i40e_init_adminq - main initialization routine for Admin Queue + * @hw: pointer to the hardware structure + * + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.num_arq_entries + * - hw->aq.arq_buf_size + * - hw->aq.asq_buf_size + **/ +enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code; +#ifdef PF_DRIVER + u16 eetrack_lo, eetrack_hi; + u16 cfg_ptr, oem_hi, oem_lo; + int retry = 0; +#endif + /* verify input for valid configuration */ + if ((hw->aq.num_arq_entries == 0) || + (hw->aq.num_asq_entries == 0) || + (hw->aq.arq_buf_size == 0) || + (hw->aq.asq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + /* initialize spin locks */ + i40e_init_spinlock(&hw->aq.asq_spinlock); + i40e_init_spinlock(&hw->aq.arq_spinlock); + + /* Set up register offsets */ + i40e_adminq_init_regs(hw); + + /* setup ASQ command write back timeout */ + hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; + + /* allocate the ASQ */ + ret_code = i40e_init_asq(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_destroy_spinlocks; + + /* allocate the ARQ */ + ret_code = i40e_init_arq(hw); + if (ret_code != I40E_SUCCESS) + goto init_adminq_free_asq; + +#ifdef PF_DRIVER +#ifdef INTEGRATED_VF + /* VF has no need of firmware */ + if (i40e_is_vf(hw)) + goto init_adminq_exit; +#endif + /* There are some cases where the firmware may not be quite ready + * for AdminQ operations, so we retry the AdminQ setup a few times + * if we see timeouts in this first AQ call. + */ + do { + ret_code = i40e_aq_get_firmware_version(hw, + &hw->aq.fw_maj_ver, + &hw->aq.fw_min_ver, + &hw->aq.fw_build, + &hw->aq.api_maj_ver, + &hw->aq.api_min_ver, + NULL); + if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT) + break; + retry++; + i40e_msec_delay(100); + i40e_resume_aq(hw); + } while (retry < 10); + if (ret_code != I40E_SUCCESS) + goto init_adminq_free_arq; + + /* get the NVM version info */ + i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, + &hw->nvm.version); + i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); + i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); + hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; + i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); + i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), + &oem_hi); + i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), + &oem_lo); + hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; + + if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { + ret_code = I40E_ERR_FIRMWARE_API_VERSION; + goto init_adminq_free_arq; + } + + /* pre-emptive resource lock release */ + i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + hw->aq.nvm_release_on_done = false; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + + ret_code = i40e_aq_set_hmc_resource_profile(hw, + I40E_HMC_PROFILE_DEFAULT, + 0, + NULL); +#endif /* PF_DRIVER */ + ret_code = I40E_SUCCESS; + + /* success! */ + goto init_adminq_exit; + +#ifdef PF_DRIVER +init_adminq_free_arq: + i40e_shutdown_arq(hw); +#endif +init_adminq_free_asq: + i40e_shutdown_asq(hw); +init_adminq_destroy_spinlocks: + i40e_destroy_spinlock(&hw->aq.asq_spinlock); + i40e_destroy_spinlock(&hw->aq.arq_spinlock); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_shutdown_adminq - shutdown routine for the Admin Queue + * @hw: pointer to the hardware structure + **/ +enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + if (i40e_check_asq_alive(hw)) + i40e_aq_queue_shutdown(hw, true); + + i40e_shutdown_asq(hw); + i40e_shutdown_arq(hw); + + /* destroy the spinlocks */ + i40e_destroy_spinlock(&hw->aq.asq_spinlock); + i40e_destroy_spinlock(&hw->aq.arq_spinlock); + + if (hw->nvm_buff.va) + i40e_free_virt_mem(hw, &hw->nvm_buff); + + return ret_code; +} + +/** + * i40e_clean_asq - cleans Admin send queue + * @hw: pointer to the hardware structure + * + * returns the number of free desc + **/ +u16 i40e_clean_asq(struct i40e_hw *hw) +{ + struct i40e_adminq_ring *asq = &(hw->aq.asq); + struct i40e_asq_cmd_details *details; + u16 ntc = asq->next_to_clean; + struct i40e_aq_desc desc_cb; + struct i40e_aq_desc *desc; + + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); + + while (rd32(hw, hw->aq.asq.head) != ntc) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); + + if (details->callback) { + I40E_ADMINQ_CALLBACK cb_func = + (I40E_ADMINQ_CALLBACK)details->callback; + i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc), + I40E_DMA_TO_DMA); + cb_func(hw, &desc_cb); + } + i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM); + i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM); + ntc++; + if (ntc == asq->count) + ntc = 0; + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); + } + + asq->next_to_clean = ntc; + + return I40E_DESC_UNUSED(asq); +} + +/** + * i40e_asq_done - check if FW has processed the Admin Send Queue + * @hw: pointer to the hw struct + * + * Returns true if the firmware has processed all descriptors on the + * admin send queue. Returns false if there are still requests pending. + **/ +bool i40e_asq_done(struct i40e_hw *hw) +{ + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; + +} + +/** + * i40e_asq_send_command - send command to Admin Queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor describing the command (non DMA mem) + * @buff: buffer to use for indirect commands + * @buff_size: size of buffer for indirect commands + * @cmd_details: pointer to command details structure + * + * This is the main send command driver routine for the Admin Queue send + * queue. It runs the queue, cleans the queue, etc + **/ +enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_dma_mem *dma_buff = NULL; + struct i40e_asq_cmd_details *details; + struct i40e_aq_desc *desc_on_ring; + bool cmd_completed = false; + u16 retval = 0; + u32 val = 0; + + i40e_acquire_spinlock(&hw->aq.asq_spinlock); + + hw->aq.asq_last_status = I40E_AQ_RC_OK; + + if (hw->aq.asq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: Admin queue not initialized.\n"); + status = I40E_ERR_QUEUE_EMPTY; + goto asq_send_command_error; + } + + val = rd32(hw, hw->aq.asq.head); + if (val >= hw->aq.num_asq_entries) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: head overrun at %d\n", val); + status = I40E_ERR_QUEUE_EMPTY; + goto asq_send_command_error; + } + + details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); + if (cmd_details) { + i40e_memcpy(details, + cmd_details, + sizeof(struct i40e_asq_cmd_details), + I40E_NONDMA_TO_NONDMA); + + /* If the cmd_details are defined copy the cookie. The + * CPU_TO_LE32 is not needed here because the data is ignored + * by the FW, only used by the driver + */ + if (details->cookie) { + desc->cookie_high = + CPU_TO_LE32(I40E_HI_DWORD(details->cookie)); + desc->cookie_low = + CPU_TO_LE32(I40E_LO_DWORD(details->cookie)); + } + } else { + i40e_memset(details, 0, + sizeof(struct i40e_asq_cmd_details), + I40E_NONDMA_MEM); + } + + /* clear requested flags and then set additional flags if defined */ + desc->flags &= ~CPU_TO_LE16(details->flags_dis); + desc->flags |= CPU_TO_LE16(details->flags_ena); + + if (buff_size > hw->aq.asq_buf_size) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Invalid buffer size: %d.\n", + buff_size); + status = I40E_ERR_INVALID_SIZE; + goto asq_send_command_error; + } + + if (details->postpone && !details->async) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Async flag not set along with postpone flag"); + status = I40E_ERR_PARAM; + goto asq_send_command_error; + } + + /* call clean and check queue available function to reclaim the + * descriptors that were processed by FW, the function returns the + * number of desc available + */ + /* the clean function called here could be called in a separate thread + * in case of asynchronous completions + */ + if (i40e_clean_asq(hw) == 0) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Error queue is full.\n"); + status = I40E_ERR_ADMIN_QUEUE_FULL; + goto asq_send_command_error; + } + + /* initialize the temp desc pointer with the right desc */ + desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); + + /* if the desc is available copy the temp desc to the right place */ + i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc), + I40E_NONDMA_TO_DMA); + + /* if buff is not NULL assume indirect command */ + if (buff != NULL) { + dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); + /* copy the user buff into the respective DMA buff */ + i40e_memcpy(dma_buff->va, buff, buff_size, + I40E_NONDMA_TO_DMA); + desc_on_ring->datalen = CPU_TO_LE16(buff_size); + + /* Update the address values in the desc with the pa value + * for respective buffer + */ + desc_on_ring->params.external.addr_high = + CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa)); + desc_on_ring->params.external.addr_low = + CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa)); + } + + /* bump the tail */ + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, + buff, buff_size); + (hw->aq.asq.next_to_use)++; + if (hw->aq.asq.next_to_use == hw->aq.asq.count) + hw->aq.asq.next_to_use = 0; + if (!details->postpone) + wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); + + /* if cmd_details are not defined or async flag is not set, + * we need to wait for desc write back + */ + if (!details->async && !details->postpone) { + u32 total_delay = 0; + + do { + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + if (i40e_asq_done(hw)) + break; + /* ugh! delay while spin_lock */ + i40e_msec_delay(1); + total_delay++; + } while (total_delay < hw->aq.asq_cmd_timeout); + } + + /* if ready, copy the desc back to temp */ + if (i40e_asq_done(hw)) { + i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc), + I40E_DMA_TO_NONDMA); + if (buff != NULL) + i40e_memcpy(buff, dma_buff->va, buff_size, + I40E_DMA_TO_NONDMA); + retval = LE16_TO_CPU(desc->retval); + if (retval != 0) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Command completed with error 0x%X.\n", + retval); + + /* strip off FW internal code */ + retval &= 0xff; + } + cmd_completed = true; + if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) + status = I40E_SUCCESS; + else + status = I40E_ERR_ADMIN_QUEUE_ERROR; + hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; + } + + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: desc and buffer writeback:\n"); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); + + /* save writeback aq if requested */ + if (details->wb_desc) + i40e_memcpy(details->wb_desc, desc_on_ring, + sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); + + /* update the error if time out occurred */ + if ((!cmd_completed) && + (!details->async && !details->postpone)) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Writeback timeout.\n"); + status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; + } + +asq_send_command_error: + i40e_release_spinlock(&hw->aq.asq_spinlock); + return status; +} + +/** + * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function + * @desc: pointer to the temp descriptor (non DMA mem) + * @opcode: the opcode can be used to decide which flags to turn off or on + * + * Fill the desc with default values + **/ +void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, + u16 opcode) +{ + /* zero out the desc */ + i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), + I40E_NONDMA_MEM); + desc->opcode = CPU_TO_LE16(opcode); + desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI); +} + +/** + * i40e_clean_arq_element + * @hw: pointer to the hw struct + * @e: event info from the receive descriptor, includes any buffers + * @pending: number of events that could be left to process + * + * This function cleans one Admin Receive Queue element and returns + * the contents through e. It can also return how many events are + * left to process through 'pending' + **/ +enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *pending) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u16 ntc = hw->aq.arq.next_to_clean; + struct i40e_aq_desc *desc; + struct i40e_dma_mem *bi; + u16 desc_idx; + u16 datalen; + u16 flags; + u16 ntu; + + /* pre-clean the event info */ + i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM); + + /* take the lock before we start messing with the ring */ + i40e_acquire_spinlock(&hw->aq.arq_spinlock); + + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + + /* set next_to_use to head */ +#ifdef PF_DRIVER +#ifdef INTEGRATED_VF + if (!i40e_is_vf(hw)) + ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); +#else + ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); +#endif /* INTEGRATED_VF */ +#endif /* PF_DRIVER */ +#ifdef VF_DRIVER +#ifdef INTEGRATED_VF + if (i40e_is_vf(hw)) + ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); +#else + ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); +#endif /* INTEGRATED_VF */ +#endif /* VF_DRIVER */ + if (ntu == ntc) { + /* nothing to do - shouldn't need to update ring's values */ + ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; + goto clean_arq_element_out; + } + + /* now clean the next descriptor */ + desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); + desc_idx = ntc; + + flags = LE16_TO_CPU(desc->flags); + if (flags & I40E_AQ_FLAG_ERR) { + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + hw->aq.arq_last_status = + (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval); + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQRX: Event received with error 0x%X.\n", + hw->aq.arq_last_status); + } + + i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc), + I40E_DMA_TO_NONDMA); + datalen = LE16_TO_CPU(desc->datalen); + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf != NULL && (e->msg_len != 0)) + i40e_memcpy(e->msg_buf, + hw->aq.arq.r.arq_bi[desc_idx].va, + e->msg_len, I40E_DMA_TO_NONDMA); + + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, + hw->aq.arq_buf_size); + + /* Restore the original datalen and buffer address in the desc, + * FW updates datalen to indicate the event message + * size + */ + bi = &hw->aq.arq.r.arq_bi[ntc]; + i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM); + + desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); + desc->datalen = CPU_TO_LE16((u16)bi->size); + desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); + desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); + + /* set tail = the last cleaned desc index. */ + wr32(hw, hw->aq.arq.tail, ntc); + /* ntc is updated to tail + 1 */ + ntc++; + if (ntc == hw->aq.num_arq_entries) + ntc = 0; + hw->aq.arq.next_to_clean = ntc; + hw->aq.arq.next_to_use = ntu; + +#ifdef PF_DRIVER + if (i40e_is_nvm_update_op(&e->desc)) { + if (hw->aq.nvm_release_on_done) { + i40e_release_nvm(hw); + hw->aq.nvm_release_on_done = false; + } + + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + case I40E_NVMUPD_STATE_WRITE_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + break; + + default: + break; + } + } + +#endif +clean_arq_element_out: + /* Set pending if needed, unlock and return */ + if (pending != NULL) + *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); +clean_arq_element_err: + i40e_release_spinlock(&hw->aq.arq_spinlock); + + return ret_code; +} + +void i40e_resume_aq(struct i40e_hw *hw) +{ + /* Registers are reset after PF reset */ + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + + i40e_config_asq_regs(hw); + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + + i40e_config_arq_regs(hw); +} diff --git a/drivers/net/i40e/base/i40e_adminq.h b/drivers/net/i40e/base/i40e_adminq.h new file mode 100644 index 00000000..40c86d9d --- /dev/null +++ b/drivers/net/i40e/base/i40e_adminq.h @@ -0,0 +1,170 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _I40E_ADMINQ_H_ +#define _I40E_ADMINQ_H_ + +#include "i40e_osdep.h" +#include "i40e_status.h" +#include "i40e_adminq_cmd.h" + +#define I40E_ADMINQ_DESC(R, i) \ + (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i])) + +#define I40E_ADMINQ_DESC_ALIGNMENT 4096 + +struct i40e_adminq_ring { + struct i40e_virt_mem dma_head; /* space for dma structures */ + struct i40e_dma_mem desc_buf; /* descriptor ring memory */ + struct i40e_virt_mem cmd_buf; /* command buffer memory */ + + union { + struct i40e_dma_mem *asq_bi; + struct i40e_dma_mem *arq_bi; + } r; + + u16 count; /* Number of descriptors */ + u16 rx_buf_len; /* Admin Receive Queue buffer length */ + + /* used for interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + /* used for queue tracking */ + u32 head; + u32 tail; + u32 len; + u32 bah; + u32 bal; +}; + +/* ASQ transaction details */ +struct i40e_asq_cmd_details { + void *callback; /* cast from type I40E_ADMINQ_CALLBACK */ + u64 cookie; + u16 flags_ena; + u16 flags_dis; + bool async; + bool postpone; + struct i40e_aq_desc *wb_desc; +}; + +#define I40E_ADMINQ_DETAILS(R, i) \ + (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i])) + +/* ARQ event information */ +struct i40e_arq_event_info { + struct i40e_aq_desc desc; + u16 msg_len; + u16 buf_len; + u8 *msg_buf; +}; + +/* Admin Queue information */ +struct i40e_adminq_info { + struct i40e_adminq_ring arq; /* receive queue */ + struct i40e_adminq_ring asq; /* send queue */ + u32 asq_cmd_timeout; /* send queue cmd write back timeout*/ + u16 num_arq_entries; /* receive queue depth */ + u16 num_asq_entries; /* send queue depth */ + u16 arq_buf_size; /* receive queue buffer size */ + u16 asq_buf_size; /* send queue buffer size */ + u16 fw_maj_ver; /* firmware major version */ + u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ + u16 api_maj_ver; /* api major version */ + u16 api_min_ver; /* api minor version */ + bool nvm_release_on_done; + + struct i40e_spinlock asq_spinlock; /* Send queue spinlock */ + struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */ + + /* last status values on send and receive queues */ + enum i40e_admin_queue_err asq_last_status; + enum i40e_admin_queue_err arq_last_status; +}; + +/** + * i40e_aq_rc_to_posix - convert errors to user-land codes + * aq_ret: AdminQ handler error code can override aq_rc + * aq_rc: AdminQ firmware error code to convert + **/ +STATIC INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) +{ + int aq_to_posix[] = { + 0, /* I40E_AQ_RC_OK */ + -EPERM, /* I40E_AQ_RC_EPERM */ + -ENOENT, /* I40E_AQ_RC_ENOENT */ + -ESRCH, /* I40E_AQ_RC_ESRCH */ + -EINTR, /* I40E_AQ_RC_EINTR */ + -EIO, /* I40E_AQ_RC_EIO */ + -ENXIO, /* I40E_AQ_RC_ENXIO */ + -E2BIG, /* I40E_AQ_RC_E2BIG */ + -EAGAIN, /* I40E_AQ_RC_EAGAIN */ + -ENOMEM, /* I40E_AQ_RC_ENOMEM */ + -EACCES, /* I40E_AQ_RC_EACCES */ + -EFAULT, /* I40E_AQ_RC_EFAULT */ + -EBUSY, /* I40E_AQ_RC_EBUSY */ + -EEXIST, /* I40E_AQ_RC_EEXIST */ + -EINVAL, /* I40E_AQ_RC_EINVAL */ + -ENOTTY, /* I40E_AQ_RC_ENOTTY */ + -ENOSPC, /* I40E_AQ_RC_ENOSPC */ + -ENOSYS, /* I40E_AQ_RC_ENOSYS */ + -ERANGE, /* I40E_AQ_RC_ERANGE */ + -EPIPE, /* I40E_AQ_RC_EFLUSHED */ + -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */ + -EROFS, /* I40E_AQ_RC_EMODE */ + -EFBIG, /* I40E_AQ_RC_EFBIG */ + }; + + /* aq_rc is invalid if AQ timed out */ + if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) + return -EAGAIN; + + if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) + return -ERANGE; + + return aq_to_posix[aq_rc]; +} + +/* general information */ +#define I40E_AQ_LARGE_BUF 512 +#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */ +#ifdef I40E_ESS_SUPPORT +#define I40E_ASQ_CMD_TIMEOUT_ESS 50000 /* msecs */ +#endif + +void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, + u16 opcode); + +#endif /* _I40E_ADMINQ_H_ */ diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h new file mode 100644 index 00000000..fe9d5b51 --- /dev/null +++ b/drivers/net/i40e/base/i40e_adminq_cmd.h @@ -0,0 +1,2557 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _I40E_ADMINQ_CMD_H_ +#define _I40E_ADMINQ_CMD_H_ + +/* This header file defines the i40e Admin Queue commands and is shared between + * i40e Firmware and Software. + * + * This file needs to comply with the Linux Kernel coding style. + */ + +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR 0x0005 + +struct i40e_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + struct { + __le32 param0; + __le32 param1; + __le32 param2; + __le32 param3; + } internal; + struct { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; + } external; + u8 raw[16]; + } params; +}; + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +/* command flags and offsets*/ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ + +/* error codes */ +enum i40e_admin_queue_err { + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ +}; + +/* Admin Queue command opcodes */ +enum i40e_admin_queue_opc { + /* aq commands */ + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, + + /* resource ownership */ + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, + + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, + +#ifdef X722_SUPPORT + /* Proxy commands */ + i40e_aqc_opc_set_proxy_config = 0x0104, + i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, + +#endif + /* LAA */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, + + /* PXE */ + i40e_aqc_opc_clear_pxe_mode = 0x0110, + +#ifdef X722_SUPPORT + /* WoL commands */ + i40e_aqc_opc_set_wol_filter = 0x0120, + i40e_aqc_opc_get_wake_reason = 0x0121, + +#endif + /* internal switch commands */ + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + i40e_aqc_opc_set_switch_config = 0x0205, + i40e_aqc_opc_rx_ctl_reg_read = 0x0206, + i40e_aqc_opc_rx_ctl_reg_write = 0x0207, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, + + /* DCB commands */ + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, + + /* TX scheduler */ + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, + + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + + /* phy commands*/ + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_run_phy_activity = 0x0626, + + /* NVM commands */ + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_oem_post_update = 0x0720, + i40e_aqc_opc_thermal_sensor = 0x0721, + + /* virtualization commands */ + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, + + /* alternate structure */ + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, + + /* LLDP commands */ + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, + i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07, + i40e_aqc_opc_lldp_set_local_mib = 0x0A08, + i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09, + + /* Tunnel commands */ + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, +#ifdef X722_SUPPORT + i40e_aqc_opc_set_rss_key = 0x0B02, + i40e_aqc_opc_set_rss_lut = 0x0B03, + i40e_aqc_opc_get_rss_key = 0x0B04, + i40e_aqc_opc_get_rss_lut = 0x0B05, +#endif + + /* Async Events */ + i40e_aqc_opc_event_lan_overflow = 0x1001, + + /* OEM commands */ + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, + i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, + + /* debug commands */ + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, +}; + +/* command structures and indirect data structures */ + +/* Structure naming conventions: + * - no suffix for direct command descriptor structures + * - _data for indirect sent data + * - _resp for indirect return data (data which is both will use _data) + * - _completion for direct return data + * - _element_ for repeated elements (may also be _data or _resp) + * + * Command structures are expected to overlay the params.raw member of the basic + * descriptor, and as such cannot exceed 16 bytes in length. + */ + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ + { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + +/* This macro is used extensively to ensure that command structures are 16 + * bytes in length as they have to map to the raw array of that size. + */ +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) + +/* internal (0x00XX) commands */ + +/* Get version (direct 0x0001) */ +struct i40e_aqc_get_version { + __le32 rom_ver; + __le32 fw_build; + __le16 fw_major; + __le16 fw_minor; + __le16 api_major; + __le16 api_minor; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); + +/* Send driver version (indirect 0x0002) */ +struct i40e_aqc_driver_version { + u8 driver_major_ver; + u8 driver_minor_ver; + u8 driver_build_ver; + u8 driver_subbuild_ver; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); + +/* Queue Shutdown (direct 0x0003) */ +struct i40e_aqc_queue_shutdown { + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); + +/* Set PF context (0x0004, direct) */ +struct i40e_aqc_set_pf_context { + u8 pf_id; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); + +/* Request resource ownership (direct 0x0008) + * Release resource ownership (direct 0x0009) + */ +#define I40E_AQ_RESOURCE_NVM 1 +#define I40E_AQ_RESOURCE_SDP 2 +#define I40E_AQ_RESOURCE_ACCESS_READ 1 +#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 +#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 +#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 + +struct i40e_aqc_request_resource { + __le16 resource_id; + __le16 access_type; + __le32 timeout; + __le32 resource_number; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); + +/* Get function capabilities (indirect 0x000A) + * Get device capabilities (indirect 0x000B) + */ +struct i40e_aqc_list_capabilites { + u8 command_flags; +#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 + u8 pf_index; + u8 reserved[2]; + __le32 count; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); + +struct i40e_aqc_list_capabilities_element_resp { + __le16 id; + u8 major_rev; + u8 minor_rev; + __le32 number; + __le32 logical_id; + __le32 phys_id; + u8 reserved[16]; +}; + +/* list of caps */ + +#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 +#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 +#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 +#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 +#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 +#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 +#define I40E_AQ_CAP_ID_SRIOV 0x0012 +#define I40E_AQ_CAP_ID_VF 0x0013 +#define I40E_AQ_CAP_ID_VMDQ 0x0014 +#define I40E_AQ_CAP_ID_8021QBG 0x0015 +#define I40E_AQ_CAP_ID_8021QBR 0x0016 +#define I40E_AQ_CAP_ID_VSI 0x0017 +#define I40E_AQ_CAP_ID_DCB 0x0018 +#define I40E_AQ_CAP_ID_FCOE 0x0021 +#define I40E_AQ_CAP_ID_ISCSI 0x0022 +#define I40E_AQ_CAP_ID_RSS 0x0040 +#define I40E_AQ_CAP_ID_RXQ 0x0041 +#define I40E_AQ_CAP_ID_TXQ 0x0042 +#define I40E_AQ_CAP_ID_MSIX 0x0043 +#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 +#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 +#define I40E_AQ_CAP_ID_1588 0x0046 +#define I40E_AQ_CAP_ID_IWARP 0x0051 +#define I40E_AQ_CAP_ID_LED 0x0061 +#define I40E_AQ_CAP_ID_SDP 0x0062 +#define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_WSR_PROT 0x0064 +#define I40E_AQ_CAP_ID_FLEX10 0x00F1 +#define I40E_AQ_CAP_ID_CEM 0x00F2 + +/* Set CPPM Configuration (direct 0x0103) */ +struct i40e_aqc_cppm_configuration { + __le16 command_flags; +#define I40E_AQ_CPPM_EN_LTRC 0x0800 +#define I40E_AQ_CPPM_EN_DMCTH 0x1000 +#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 +#define I40E_AQ_CPPM_EN_HPTC 0x4000 +#define I40E_AQ_CPPM_EN_DMARC 0x8000 + __le16 ttlx; + __le32 dmacr; + __le16 dmcth; + u8 hptc; + u8 reserved; + __le32 pfltrc; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); + +/* Set ARP Proxy command / response (indirect 0x0104) */ +struct i40e_aqc_arp_proxy_data { + __le16 command_flags; +#define I40E_AQ_ARP_INIT_IPV4 0x0008 +#define I40E_AQ_ARP_UNSUP_CTL 0x0010 +#define I40E_AQ_ARP_ENA 0x0020 +#define I40E_AQ_ARP_ADD_IPV4 0x0040 +#define I40E_AQ_ARP_DEL_IPV4 0x0080 + __le16 table_id; + __le32 pfpm_proxyfc; + __le32 ip_addr; + u8 mac_addr[6]; + u8 reserved[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data); + +/* Set NS Proxy Table Entry Command (indirect 0x0105) */ +struct i40e_aqc_ns_proxy_data { + __le16 table_idx_mac_addr_0; + __le16 table_idx_mac_addr_1; + __le16 table_idx_ipv6_0; + __le16 table_idx_ipv6_1; + __le16 control; +#define I40E_AQ_NS_PROXY_ADD_0 0x0100 +#define I40E_AQ_NS_PROXY_DEL_0 0x0200 +#define I40E_AQ_NS_PROXY_ADD_1 0x0400 +#define I40E_AQ_NS_PROXY_DEL_1 0x0800 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 + u8 mac_addr_0[6]; + u8 mac_addr_1[6]; + u8 local_mac_addr[6]; + u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ + u8 ipv6_addr_1[16]; +}; + +I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); + +/* Manage LAA Command (0x0106) - obsolete */ +struct i40e_aqc_mng_laa { + __le16 command_flags; +#define I40E_AQ_LAA_FLAG_WR 0x8000 + u8 reserved[2]; + __le32 sal; + __le16 sah; + u8 reserved2[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); + +/* Manage MAC Address Read Command (indirect 0x0107) */ +struct i40e_aqc_mac_address_read { + __le16 command_flags; +#define I40E_AQC_LAN_ADDR_VALID 0x10 +#define I40E_AQC_SAN_ADDR_VALID 0x20 +#define I40E_AQC_PORT_ADDR_VALID 0x40 +#define I40E_AQC_WOL_ADDR_VALID 0x80 +#define I40E_AQC_MC_MAG_EN_VALID 0x100 +#define I40E_AQC_ADDR_VALID_MASK 0x1F0 + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); + +struct i40e_aqc_mac_address_read_data { + u8 pf_lan_mac[6]; + u8 pf_san_mac[6]; + u8 port_mac[6]; + u8 pf_wol_mac[6]; +}; + +I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); + +/* Manage MAC Address Write Command (0x0108) */ +struct i40e_aqc_mac_address_write { + __le16 command_flags; +#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 +#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 +#define I40E_AQC_WRITE_TYPE_PORT 0x8000 +#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 +#define I40E_AQC_WRITE_TYPE_MASK 0xC000 + + __le16 mac_sah; + __le32 mac_sal; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); + +/* PXE commands (0x011x) */ + +/* Clear PXE Command and response (direct 0x0110) */ +struct i40e_aqc_clear_pxe { + u8 rx_cnt; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); + +#ifdef X722_SUPPORT +/* Set WoL Filter (0x0120) */ + +struct i40e_aqc_set_wol_filter { + __le16 filter_index; +#define I40E_AQC_MAX_NUM_WOL_FILTERS 8 + __le16 cmd_flags; +#define I40E_AQC_SET_WOL_FILTER 0x8000 +#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 + __le16 valid_flags; +#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 +#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 + u8 reserved[2]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter); + +/* Get Wake Reason (0x0121) */ + +struct i40e_aqc_get_wake_reason_completion { + u8 reserved_1[2]; + __le16 wake_reason; + u8 reserved_2[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion); + +struct i40e_aqc_set_wol_filter_data { + u8 filter[128]; + u8 mask[16]; +}; + +I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); + +#endif /* X722_SUPPORT */ +/* Switch configuration commands (0x02xx) */ + +/* Used by many indirect commands that only pass an seid and a buffer in the + * command + */ +struct i40e_aqc_switch_seid { + __le16 seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); + +/* Get Switch Configuration command (indirect 0x0200) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_switch_config_header_resp { + __le16 num_reported; + __le16 num_total; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); + +struct i40e_aqc_switch_config_element_resp { + u8 element_type; +#define I40E_AQ_SW_ELEM_TYPE_MAC 1 +#define I40E_AQ_SW_ELEM_TYPE_PF 2 +#define I40E_AQ_SW_ELEM_TYPE_VF 3 +#define I40E_AQ_SW_ELEM_TYPE_EMP 4 +#define I40E_AQ_SW_ELEM_TYPE_BMC 5 +#define I40E_AQ_SW_ELEM_TYPE_PV 16 +#define I40E_AQ_SW_ELEM_TYPE_VEB 17 +#define I40E_AQ_SW_ELEM_TYPE_PA 18 +#define I40E_AQ_SW_ELEM_TYPE_VSI 19 + u8 revision; +#define I40E_AQ_SW_ELEM_REV_1 1 + __le16 seid; + __le16 uplink_seid; + __le16 downlink_seid; + u8 reserved[3]; + u8 connection_type; +#define I40E_AQ_CONN_TYPE_REGULAR 0x1 +#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_CONN_TYPE_CASCADED 0x3 + __le16 scheduler_id; + __le16 element_info; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp); + +/* Get Switch Configuration (indirect 0x0200) + * an array of elements are returned in the response buffer + * the first in the array is the header, remainder are elements + */ +struct i40e_aqc_get_switch_config_resp { + struct i40e_aqc_get_switch_config_header_resp header; + struct i40e_aqc_switch_config_element_resp element[1]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp); + +/* Add Statistics (direct 0x0201) + * Remove Statistics (direct 0x0202) + */ +struct i40e_aqc_add_remove_statistics { + __le16 seid; + __le16 vlan; + __le16 stat_index; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); + +/* Set Port Parameters command (direct 0x0203) */ +struct i40e_aqc_set_port_parameters { + __le16 command_flags; +#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 +#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ +#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 + __le16 bad_frame_vsi; + __le16 default_seid; /* reserved for command */ + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); + +/* Get Switch Resource Allocation (indirect 0x0204) */ +struct i40e_aqc_get_switch_resource_alloc { + u8 num_entries; /* reserved for command */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); + +/* expect an array of these structs in the response buffer */ +struct i40e_aqc_switch_resource_alloc_element_resp { + u8 resource_type; +#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 +#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 +#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 +#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 +#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 +#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 +#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 +#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 +#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 +#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 +#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA +#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB +#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC +#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD +#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF +#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 +#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 +#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 +#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 + u8 reserved1; + __le16 guaranteed; + __le16 total; + __le16 used; + __le16 total_unalloced; + u8 reserved2[6]; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); + +/* Set Switch Configuration (direct 0x0205) */ +struct i40e_aqc_set_switch_config { + __le16 flags; +#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 +#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 + __le16 valid_flags; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); + +/* Read Receive control registers (direct 0x0206) + * Write Receive control registers (direct 0x0207) + * used for accessing Rx control registers that can be + * slow and need special handling when under high Rx load + */ +struct i40e_aqc_rx_ctl_reg_read_write { + __le32 reserved1; + __le32 address; + __le32 reserved2; + __le32 value; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write); + +/* Add VSI (indirect 0x0210) + * this indirect command uses struct i40e_aqc_vsi_properties_data + * as the indirect buffer (128 bytes) + * + * Update VSI (indirect 0x211) + * uses the same data structure as Add VSI + * + * Get VSI (indirect 0x0212) + * uses the same completion and data structure as Add VSI + */ +struct i40e_aqc_add_get_update_vsi { + __le16 uplink_seid; + u8 connection_type; +#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 +#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 + u8 reserved1; + u8 vf_id; + u8 reserved2; + __le16 vsi_flags; +#define I40E_AQ_VSI_TYPE_SHIFT 0x0 +#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) +#define I40E_AQ_VSI_TYPE_VF 0x0 +#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 +#define I40E_AQ_VSI_TYPE_PF 0x2 +#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 +#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); + +struct i40e_aqc_add_get_update_vsi_completion { + __le16 seid; + __le16 vsi_number; + __le16 vsi_used; + __le16 vsi_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); + +struct i40e_aqc_vsi_properties_data { + /* first 96 byte are written by SW */ + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + /* switch section */ + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; + /* security section */ + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; + /* VLAN section */ + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; + /* ingress egress up sections */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ + /* cascaded PV section */ + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; + /* queue mapping section */ + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + /* queueing option section */ + u8 queueing_opt_flags; +#ifdef X722_SUPPORT +#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 +#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 +#endif +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 +#ifdef X722_SUPPORT +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 +#endif + u8 queueing_opt_reserved[3]; + /* scheduler section */ + u8 up_enable_bits; + u8 sched_reserved; + /* outer up section */ + __le32 outer_up_table; /* same structure and defines as ingress table */ + u8 cmd_reserved[8]; + /* last 32 bytes are written by FW */ + __le16 qs_handle[8]; +#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); + +/* Add Port Virtualizer (direct 0x0220) + * also used for update PV (direct 0x0221) but only flags are used + * (IS_CTRL_PORT only works on add PV) + */ +struct i40e_aqc_add_update_pv { + __le16 command_flags; +#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 +#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 + __le16 uplink_seid; + __le16 connected_seid; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); + +struct i40e_aqc_add_update_pv_completion { + /* reserved for update; for add also encodes error if rc == ENOSPC */ + __le16 pv_seid; +#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 +#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); + +/* Get PV Params (direct 0x0222) + * uses i40e_aqc_switch_seid for the descriptor + */ + +struct i40e_aqc_get_pv_params_completion { + __le16 seid; + __le16 default_stag; + __le16 pv_flags; /* same flags as add_pv */ +#define I40E_AQC_GET_PV_PV_TYPE 0x1 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 + u8 reserved[8]; + __le16 default_port_seid; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); + +/* Add VEB (direct 0x0230) */ +struct i40e_aqc_add_veb { + __le16 uplink_seid; + __le16 downlink_seid; + __le16 veb_flags; +#define I40E_AQC_ADD_VEB_FLOATING 0x1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ + I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) +#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 +#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ +#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 + u8 enable_tcs; + u8 reserved[9]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); + +struct i40e_aqc_add_veb_completion { + u8 reserved[6]; + __le16 switch_seid; + /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ + __le16 veb_seid; +#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 +#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); + +/* Get VEB Parameters (direct 0x0232) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_veb_parameters_completion { + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); + +/* Delete Element (direct 0x0243) + * uses the generic i40e_aqc_switch_seid + */ + +/* Add MAC-VLAN (indirect 0x0250) */ + +/* used for the command for most vlan commands */ +struct i40e_aqc_macvlan { + __le16 num_addresses; + __le16 seid[3]; +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) +#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); + +/* indirect data for command and response */ +struct i40e_aqc_add_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + __le16 flags; +#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 +#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 +#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 +#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 +#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 + __le16 queue_number; +#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) + /* response section */ + u8 match_method; +#define I40E_AQC_MM_PERFECT_MATCH 0x01 +#define I40E_AQC_MM_HASH_MATCH 0x02 +#define I40E_AQC_MM_ERR_NO_RES 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_macvlan_completion { + __le16 perfect_mac_used; + __le16 perfect_mac_free; + __le16 unicast_hash_free; + __le16 multicast_hash_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); + +/* Remove MAC-VLAN (indirect 0x0251) + * uses i40e_aqc_macvlan for the descriptor + * data points to an array of num_addresses of elements + */ + +struct i40e_aqc_remove_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + u8 flags; +#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 +#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 +#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 +#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 + u8 reserved[3]; + /* reply section */ + u8 error_code; +#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF + u8 reply_reserved[3]; +}; + +/* Add VLAN (indirect 0x0252) + * Remove VLAN (indirect 0x0253) + * use the generic i40e_aqc_macvlan for the command + */ +struct i40e_aqc_add_remove_vlan_element_data { + __le16 vlan_tag; + u8 vlan_flags; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_LOCAL 0x1 +#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 +#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) +#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 +#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 +#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 +#define I40E_AQC_VLAN_PTYPE_SHIFT 3 +#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) +#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 +#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 +#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 +#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_ALL 0x1 + u8 reserved; + u8 result; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 +#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE +#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_vlan_completion { + u8 reserved[4]; + __le16 vlans_used; + __le16 vlans_free; + __le32 addr_high; + __le32 addr_low; +}; + +/* Set VSI Promiscuous Modes (direct 0x0254) */ +struct i40e_aqc_set_vsi_promiscuous_modes { + __le16 promiscuous_flags; + __le16 valid_flags; +/* flags used for both fields above */ +#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 +#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 +#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 +#define I40E_AQC_SET_VSI_DEFAULT 0x08 +#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 +#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 + __le16 seid; +#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF +#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); + +/* Add S/E-tag command (direct 0x0255) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_add_tag { + __le16 flags; +#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 + __le16 seid; +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + __le16 queue_number; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); + +struct i40e_aqc_add_remove_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); + +/* Remove S/E-tag command (direct 0x0256) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_remove_tag { + __le16 seid; +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag); + +/* Add multicast E-Tag (direct 0x0257) + * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields + * and no external data + */ +struct i40e_aqc_add_remove_mcast_etag { + __le16 pv_seid; + __le16 etag; + u8 num_unicast_etags; + u8 reserved[3]; + __le32 addr_high; /* address of array of 2-byte s-tags */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); + +struct i40e_aqc_add_remove_mcast_etag_completion { + u8 reserved[4]; + __le16 mcast_etags_used; + __le16 mcast_etags_free; + __le32 addr_high; + __le32 addr_low; + +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); + +/* Update S/E-Tag (direct 0x0259) */ +struct i40e_aqc_update_tag { + __le16 seid; +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) + __le16 old_tag; + __le16 new_tag; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); + +struct i40e_aqc_update_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); + +/* Add Control Packet filter (direct 0x025A) + * Remove Control Packet filter (direct 0x025B) + * uses the i40e_aqc_add_oveb_cloud, + * and the generic direct completion structure + */ +struct i40e_aqc_add_remove_control_packet_filter { + u8 mac[6]; + __le16 etype; + __le16 flags; +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 + __le16 seid; +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) + __le16 queue; + u8 reserved[2]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); + +struct i40e_aqc_add_remove_control_packet_filter_completion { + __le16 mac_etype_used; + __le16 etype_used; + __le16 mac_etype_free; + __le16 etype_free; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); + +/* Add Cloud filters (indirect 0x025C) + * Remove Cloud filters (indirect 0x025D) + * uses the i40e_aqc_add_remove_cloud_filters, + * and the generic indirect completion structure + */ +struct i40e_aqc_add_remove_cloud_filters { + u8 num_filters; + u8 reserved; + __le16 seid; +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); + +struct i40e_aqc_add_remove_cloud_filters_element_data { + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; + union { + struct { + u8 reserved[12]; + u8 data[4]; + } v4; + struct { + u8 data[16]; + } v6; + } ipaddr; + __le16 flags; +#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_FILTER_SHIFT) +/* 0x0000 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +/* 0x0002 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +/* 0x0005 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 +/* 0x0007 reserved */ +/* 0x0008 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C + +#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 +#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 +#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 + +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 + + __le32 tenant_id; + u8 reserved[4]; + __le16 queue_number; +#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ + I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) + u8 reserved2[14]; + /* response section */ + u8 allocation_result; +#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 +#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF + u8 response_reserved[7]; +}; + +struct i40e_aqc_remove_cloud_filters_completion { + __le16 perfect_ovlan_used; + __le16 perfect_ovlan_free; + __le16 vlan_used; + __le16 vlan_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); + +/* Add Mirror Rule (indirect or direct 0x0260) + * Delete Mirror Rule (indirect or direct 0x0261) + * note: some rule types (4,5) do not use an external buffer. + * take care to set the flags correctly. + */ +struct i40e_aqc_add_delete_mirror_rule { + __le16 seid; + __le16 rule_type; +#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 +#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ + I40E_AQC_MIRROR_RULE_TYPE_SHIFT) +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 +#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 + __le16 num_entries; + __le16 destination; /* VSI for add, rule id for delete */ + __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); + +struct i40e_aqc_add_delete_mirror_rule_completion { + u8 reserved[2]; + __le16 rule_id; /* only used on add */ + __le16 mirror_rules_used; + __le16 mirror_rules_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); + +/* DCB 0x03xx*/ + +/* PFC Ignore (direct 0x0301) + * the command and response use the same descriptor structure + */ +struct i40e_aqc_pfc_ignore { + u8 tc_bitmap; + u8 command_flags; /* unused on response */ +#define I40E_AQC_PFC_IGNORE_SET 0x80 +#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); + +/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure + * with no parameters + */ + +/* TX scheduler 0x04xx */ + +/* Almost all the indirect commands use + * this generic struct to pass the SEID in param0 + */ +struct i40e_aqc_tx_sched_ind { + __le16 vsi_seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); + +/* Several commands respond with a set of queue set handles */ +struct i40e_aqc_qs_handles_resp { + __le16 qs_handles[8]; +}; + +/* Configure VSI BW limits (direct 0x0400) */ +struct i40e_aqc_configure_vsi_bw_limit { + __le16 vsi_seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_credit; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); + +/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_ets_sla_bw_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data); + +/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_tc_bw_data { + u8 tc_valid_bits; + u8 reserved[3]; + u8 tc_bw_credits[8]; + u8 reserved1[4]; + __le16 qs_handles[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data); + +/* Query vsi bw configuration (indirect 0x0408) */ +struct i40e_aqc_query_vsi_bw_config_resp { + u8 tc_valid_bits; + u8 tc_suspended_bits; + u8 reserved[14]; + __le16 qs_handles[8]; + u8 reserved1[4]; + __le16 port_bw_limit; + u8 reserved2[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved3[23]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp); + +/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ +struct i40e_aqc_query_vsi_ets_sla_config_resp { + u8 tc_valid_bits; + u8 reserved[3]; + u8 share_credits[8]; + __le16 credits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp); + +/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ +struct i40e_aqc_configure_switching_comp_bw_limit { + __le16 seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); + +/* Enable Physical Port ETS (indirect 0x0413) + * Modify Physical Port ETS (indirect 0x0414) + * Disable Physical Port ETS (indirect 0x0415) + */ +struct i40e_aqc_configure_switching_comp_ets_data { + u8 reserved[4]; + u8 tc_valid_bits; + u8 seepage; +#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 + u8 tc_strict_priority_flags; + u8 reserved1[17]; + u8 tc_bw_share_credits[8]; + u8 reserved2[96]; +}; + +I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data); + +/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ +struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credit[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_switching_comp_ets_bw_limit_data); + +/* Configure Switching Component Bandwidth Allocation per Tc + * (indirect 0x0417) + */ +struct i40e_aqc_configure_switching_comp_bw_config_data { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits; /* bool */ + u8 tc_bw_share_credits[8]; + u8 reserved1[20]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data); + +/* Query Switching Component Configuration (indirect 0x0418) */ +struct i40e_aqc_query_switching_comp_ets_config_resp { + u8 tc_valid_bits; + u8 reserved[35]; + __le16 port_bw_limit; + u8 reserved1[2]; + u8 tc_bw_max; /* 0-3, limit = 2^max */ + u8 reserved2[23]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp); + +/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ +struct i40e_aqc_query_port_ets_config_resp { + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_bits; + u8 reserved2; + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved3[32]; +}; + +I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp); + +/* Query Switching Component Bandwidth Allocation per Traffic Type + * (indirect 0x041A) + */ +struct i40e_aqc_query_switching_comp_bw_config_resp { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits_enable; /* bool */ + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp); + +/* Suspend/resume port TX traffic + * (direct 0x041B and 0x041C) uses the generic SEID struct + */ + +/* Configure partition BW + * (indirect 0x041D) + */ +struct i40e_aqc_configure_partition_bw_data { + __le16 pf_valid_bits; + u8 min_bw[16]; /* guaranteed bandwidth */ + u8 max_bw[16]; /* bandwidth limit */ +}; + +I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); + +/* Get and set the active HMC resource profile and status. + * (direct 0x0500) and (direct 0x0501) + */ +struct i40e_aq_get_set_hmc_resource_profile { + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); + +enum i40e_aq_hmc_profile { + /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, +}; + +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F + +/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ + +/* set in param0 for get phy abilities to report qualified modules */ +#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 +#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 + +enum i40e_aq_phy_type { + I40E_PHY_TYPE_SGMII = 0x0, + I40E_PHY_TYPE_1000BASE_KX = 0x1, + I40E_PHY_TYPE_10GBASE_KX4 = 0x2, + I40E_PHY_TYPE_10GBASE_KR = 0x3, + I40E_PHY_TYPE_40GBASE_KR4 = 0x4, + I40E_PHY_TYPE_XAUI = 0x5, + I40E_PHY_TYPE_XFI = 0x6, + I40E_PHY_TYPE_SFI = 0x7, + I40E_PHY_TYPE_XLAUI = 0x8, + I40E_PHY_TYPE_XLPPI = 0x9, + I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, + I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, + I40E_PHY_TYPE_10GBASE_AOC = 0xC, + I40E_PHY_TYPE_40GBASE_AOC = 0xD, + I40E_PHY_TYPE_100BASE_TX = 0x11, + I40E_PHY_TYPE_1000BASE_T = 0x12, + I40E_PHY_TYPE_10GBASE_T = 0x13, + I40E_PHY_TYPE_10GBASE_SR = 0x14, + I40E_PHY_TYPE_10GBASE_LR = 0x15, + I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, + I40E_PHY_TYPE_10GBASE_CR1 = 0x17, + I40E_PHY_TYPE_40GBASE_CR4 = 0x18, + I40E_PHY_TYPE_40GBASE_SR4 = 0x19, + I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, + I40E_PHY_TYPE_1000BASE_SX = 0x1B, + I40E_PHY_TYPE_1000BASE_LX = 0x1C, + I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, + I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, + I40E_PHY_TYPE_MAX +}; + +#define I40E_LINK_SPEED_100MB_SHIFT 0x1 +#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 +#define I40E_LINK_SPEED_10GB_SHIFT 0x3 +#define I40E_LINK_SPEED_40GB_SHIFT 0x4 +#define I40E_LINK_SPEED_20GB_SHIFT 0x5 + +enum i40e_aq_link_speed { + I40E_LINK_SPEED_UNKNOWN = 0, + I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT) +}; + +struct i40e_aqc_module_desc { + u8 oui[3]; + u8 reserved1; + u8 part_number[16]; + u8 revision[4]; + u8 reserved2[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc); + +struct i40e_aq_get_phy_abilities_resp { + __le32 phy_type; /* bitmap using the above enum for offsets */ + u8 link_speed; /* bitmap using the above enum bit patterns */ + u8 abilities; +#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 +#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 +#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 +#define I40E_AQ_PHY_LINK_ENABLED 0x08 +#define I40E_AQ_PHY_AN_ENABLED 0x10 +#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 + __le16 eee_capability; +#define I40E_AQ_EEE_100BASE_TX 0x0002 +#define I40E_AQ_EEE_1000BASE_T 0x0004 +#define I40E_AQ_EEE_10GBASE_T 0x0008 +#define I40E_AQ_EEE_1000BASE_KX 0x0010 +#define I40E_AQ_EEE_10GBASE_KX4 0x0020 +#define I40E_AQ_EEE_10GBASE_KR 0x0040 + __le32 eeer_val; + u8 d3_lpan; +#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 + u8 reserved[3]; + u8 phy_id[4]; + u8 module_type[3]; + u8 qualified_module_count; +#define I40E_AQ_PHY_MAX_QMS 16 + struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; +}; + +I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp); + +/* Set PHY Config (direct 0x0601) */ +struct i40e_aq_set_phy_config { /* same bits as above in all */ + __le32 phy_type; + u8 link_speed; + u8 abilities; +/* bits 0-2 use the values from get_phy_abilities_resp */ +#define I40E_AQ_PHY_ENABLE_LINK 0x08 +#define I40E_AQ_PHY_ENABLE_AN 0x10 +#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 + __le16 eee_capability; + __le32 eeer; + u8 low_power_ctrl; + u8 reserved[3]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); + +/* Set MAC Config command data structure (direct 0x0603) */ +struct i40e_aq_set_mac_config { + __le16 max_frame_size; + u8 params; +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 + u8 tx_timer_priority; /* bitmap */ + __le16 tx_timer_value; + __le16 fc_refresh_threshold; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); + +/* Restart Auto-Negotiation (direct 0x605) */ +struct i40e_aqc_set_link_restart_an { + u8 command; +#define I40E_AQ_PHY_RESTART_AN 0x02 +#define I40E_AQ_PHY_LINK_ENABLE 0x04 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); + +/* Get Link Status cmd & response data structure (direct 0x0607) */ +struct i40e_aqc_get_link_status { + __le16 command_flags; /* only field set on command */ +#define I40E_AQ_LSE_MASK 0x3 +#define I40E_AQ_LSE_NOP 0x0 +#define I40E_AQ_LSE_DISABLE 0x2 +#define I40E_AQ_LSE_ENABLE 0x3 +/* only response uses this flag */ +#define I40E_AQ_LSE_IS_ENABLED 0x1 + u8 phy_type; /* i40e_aq_phy_type */ + u8 link_speed; /* i40e_aq_link_speed */ + u8 link_info; +#define I40E_AQ_LINK_UP 0x01 /* obsolete */ +#define I40E_AQ_LINK_UP_FUNCTION 0x01 +#define I40E_AQ_LINK_FAULT 0x02 +#define I40E_AQ_LINK_FAULT_TX 0x04 +#define I40E_AQ_LINK_FAULT_RX 0x08 +#define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_LINK_UP_PORT 0x20 +#define I40E_AQ_MEDIA_AVAILABLE 0x40 +#define I40E_AQ_SIGNAL_DETECT 0x80 + u8 an_info; +#define I40E_AQ_AN_COMPLETED 0x01 +#define I40E_AQ_LP_AN_ABILITY 0x02 +#define I40E_AQ_PD_FAULT 0x04 +#define I40E_AQ_FEC_EN 0x08 +#define I40E_AQ_PHY_LOW_POWER 0x10 +#define I40E_AQ_LINK_PAUSE_TX 0x20 +#define I40E_AQ_LINK_PAUSE_RX 0x40 +#define I40E_AQ_QUALIFIED_MODULE 0x80 + u8 ext_info; +#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 +#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 +#define I40E_AQ_LINK_TX_SHIFT 0x02 +#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) +#define I40E_AQ_LINK_TX_ACTIVE 0x00 +#define I40E_AQ_LINK_TX_DRAINED 0x01 +#define I40E_AQ_LINK_TX_FLUSHED 0x03 +#define I40E_AQ_LINK_FORCED_40G 0x10 + u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ + __le16 max_frame_size; + u8 config; +#define I40E_AQ_CONFIG_CRC_ENA 0x04 +#define I40E_AQ_CONFIG_PACING_MASK 0x78 + u8 external_power_ability; +#define I40E_AQ_LINK_POWER_CLASS_1 0x00 +#define I40E_AQ_LINK_POWER_CLASS_2 0x01 +#define I40E_AQ_LINK_POWER_CLASS_3 0x02 +#define I40E_AQ_LINK_POWER_CLASS_4 0x03 + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); + +/* Set event mask command (direct 0x613) */ +struct i40e_aqc_set_phy_int_mask { + u8 reserved[8]; + __le16 event_mask; +#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 +#define I40E_AQ_EVENT_MEDIA_NA 0x0004 +#define I40E_AQ_EVENT_LINK_FAULT 0x0008 +#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 +#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 +#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 +#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 +#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 +#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 + u8 reserved1[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); + +/* Get Local AN advt register (direct 0x0614) + * Set Local AN advt register (direct 0x0615) + * Get Link Partner AN advt register (direct 0x0616) + */ +struct i40e_aqc_an_advt_reg { + __le32 local_an_reg0; + __le16 local_an_reg1; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); + +/* Set Loopback mode (0x0618) */ +struct i40e_aqc_set_lb_mode { + __le16 lb_mode; +#define I40E_AQ_LB_PHY_LOCAL 0x01 +#define I40E_AQ_LB_PHY_REMOTE 0x02 +#define I40E_AQ_LB_MAC_LOCAL 0x04 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); + +/* Set PHY Debug command (0x0622) */ +struct i40e_aqc_set_phy_debug { + u8 command_flags; +#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ + I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 +#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); + +enum i40e_aq_phy_reg_type { + I40E_AQC_PHY_REG_INTERNAL = 0x1, + I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, + I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 +}; + +/* Run PHY Activity (0x0626) */ +struct i40e_aqc_run_phy_activity { + __le16 activity_id; + u8 flags; + u8 reserved1; + __le32 control; + __le32 data; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); + +/* NVM Read command (indirect 0x0701) + * NVM Erase commands (direct 0x0702) + * NVM Update commands (indirect 0x0703) + */ +struct i40e_aqc_nvm_update { + u8 command_flags; +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 + u8 module_pointer; + __le16 length; + __le32 offset; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); + +/* NVM Config Read (indirect 0x0704) */ +struct i40e_aqc_nvm_config_read { + __le16 cmd_flags; +#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 +#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 +#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 + __le16 element_count; + __le16 element_id; /* Feature/field ID */ + __le16 element_id_msw; /* MSWord of field ID */ + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); + +/* NVM Config Write (indirect 0x0705) */ +struct i40e_aqc_nvm_config_write { + __le16 cmd_flags; + __le16 element_count; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); + +/* Used for 0x0704 as well as for 0x0705 commands */ +#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 +#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) +#define I40E_AQ_ANVM_FEATURE 0 +#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) +struct i40e_aqc_nvm_config_data_feature { + __le16 feature_id; +#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 +#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 +#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 + __le16 feature_options; + __le16 feature_selection; +}; + +I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature); + +struct i40e_aqc_nvm_config_data_immediate_field { + __le32 field_id; + __le32 field_value; + __le16 field_options; + __le16 reserved; +}; + +I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); + +/* OEM Post Update (indirect 0x0720) + * no command data struct used + */ + struct i40e_aqc_nvm_oem_post_update { +#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 + u8 sel_data; + u8 reserved[7]; +}; + +I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); + +struct i40e_aqc_nvm_oem_post_update_buffer { + u8 str_len; + u8 dev_addr; + __le16 eeprom_addr; + u8 data[36]; +}; + +I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); + +/* Thermal Sensor (indirect 0x0721) + * read or set thermal sensor configs and values + * takes a sensor and command specific data buffer, not detailed here + */ +struct i40e_aqc_thermal_sensor { + u8 sensor_action; +#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 +#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 +#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); + +/* Send to PF command (indirect 0x0801) id is only used by PF + * Send to VF command (indirect 0x0802) id is only used by PF + * Send to Peer PF command (indirect 0x0803) + */ +struct i40e_aqc_pf_vf_message { + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); + +/* Alternate structure */ + +/* Direct write (direct 0x0900) + * Direct read (direct 0x0902) + */ +struct i40e_aqc_alternate_write { + __le32 address0; + __le32 data0; + __le32 address1; + __le32 data1; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); + +/* Indirect write (indirect 0x0901) + * Indirect read (indirect 0x0903) + */ + +struct i40e_aqc_alternate_ind_write { + __le32 address; + __le32 length; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); + +/* Done alternate write (direct 0x0904) + * uses i40e_aq_desc + */ +struct i40e_aqc_alternate_write_done { + __le16 cmd_flags; +#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 +#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 +#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 +#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); + +/* Set OEM mode (direct 0x0905) */ +struct i40e_aqc_alternate_set_mode { + __le32 mode; +#define I40E_AQ_ALTERNATE_MODE_NONE 0 +#define I40E_AQ_ALTERNATE_MODE_OEM 1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); + +/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ + +/* async events 0x10xx */ + +/* Lan Queue Overflow Event (direct, 0x1001) */ +struct i40e_aqc_lan_overflow { + __le32 prtdcb_rupto; + __le32 otx_ctl; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); + +/* Get LLDP MIB (indirect 0x0A00) */ +struct i40e_aqc_lldp_get_mib { + u8 type; + u8 reserved1; +#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 +#define I40E_AQ_LLDP_MIB_LOCAL 0x0 +#define I40E_AQ_LLDP_MIB_REMOTE 0x1 +#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC +#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 +#define I40E_AQ_LLDP_TX_SHIFT 0x4 +#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) +/* TX pause flags use I40E_AQ_LINK_TX_* above */ + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); + +/* Configure LLDP MIB Change Event (direct 0x0A01) + * also used for the event (with type in the command field) + */ +struct i40e_aqc_lldp_update_mib { + u8 command; +#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); + +/* Add LLDP TLV (indirect 0x0A02) + * Delete LLDP TLV (indirect 0x0A04) + */ +struct i40e_aqc_lldp_add_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); + +/* Update LLDP TLV (indirect 0x0A03) */ +struct i40e_aqc_lldp_update_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); + +/* Stop LLDP (direct 0x0A05) */ +struct i40e_aqc_lldp_stop { + u8 command; +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); + +/* Start LLDP (direct 0x0A06) */ + +struct i40e_aqc_lldp_start { + u8 command; +#define I40E_AQ_LLDP_AGENT_START 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); + +/* Get CEE DCBX Oper Config (0x0A07) + * uses the generic descriptor struct + * returns below as indirect response + */ + +#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0 +#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT) +#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3 +#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT) +#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8 +#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) + +#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0 +#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT) +#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3 +#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) +#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) +#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT) +#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB +#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT) +#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10 +#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT) + +/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with + * word boundary layout issues, which the Linux compilers silently deal + * with by adding padding, making the actual struct larger than designed. + * However, the FW compiler for the NIC is less lenient and complains + * about the struct. Hence, the struct defined here has an extra byte in + * fields reserved3 and reserved4 to directly acknowledge that padding, + * and the new length is used in the length check macro. + */ +struct i40e_aqc_get_cee_dcb_cfg_v1_resp { + u8 reserved1; + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 reserved2; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + u8 reserved3[2]; + __le16 oper_app_prio; + u8 reserved4[2]; + __le16 tlv_status; +}; + +I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp); + +struct i40e_aqc_get_cee_dcb_cfg_resp { + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + __le16 oper_app_prio; + __le32 tlv_status; + u8 reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); + +/* Set Local LLDP MIB (indirect 0x0A08) + * Used to replace the local MIB of a given LLDP agent. e.g. DCBx + */ +struct i40e_aqc_lldp_set_local_mib { +#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 +#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \ + SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0 +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \ + SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1 + u8 type; + u8 reserved0; + __le16 length; + u8 reserved1[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib); + +struct i40e_aqc_lldp_set_local_mib_resp { +#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01 + u8 status; + u8 reserved[15]; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_lldp_set_local_mib_resp); + +/* Stop/Start LLDP Agent (direct 0x0A09) + * Used for stopping/starting specific LLDP agent. e.g. DCBx + */ +struct i40e_aqc_lldp_stop_start_specific_agent { +#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0 +#define I40E_AQC_START_SPECIFIC_AGENT_MASK (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT) + u8 command; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent); + +/* Add Udp Tunnel command and completion (direct 0x0B00) */ +struct i40e_aqc_add_udp_tunnel { + __le16 udp_port; + u8 reserved0[3]; + u8 protocol_type; +#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 +#define I40E_AQC_TUNNEL_TYPE_NGE 0x01 +#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 +#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 + u8 reserved1[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); + +struct i40e_aqc_add_udp_tunnel_completion { + __le16 udp_port; + u8 filter_entry_index; + u8 multiple_pfs; +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 + u8 total_filters; + u8 reserved[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); + +/* remove UDP Tunnel command (0x0B01) */ +struct i40e_aqc_remove_udp_tunnel { + u8 reserved[2]; + u8 index; /* 0 to 15 */ + u8 reserved2[13]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); + +struct i40e_aqc_del_udp_tunnel_completion { + __le16 udp_port; + u8 index; /* 0 to 15 */ + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved1[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); +#ifdef X722_SUPPORT + +struct i40e_aqc_get_set_rss_key { +#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); + +struct i40e_aqc_get_set_rss_key_data { + u8 standard_rss_key[0x28]; + u8 extended_hash_key[0xc]; +}; + +I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); + +struct i40e_aqc_get_set_rss_lut { +#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) + __le16 vsi_id; +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) + +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 + __le16 flags; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); +#endif + +/* tunnel key structure 0x0B10 */ + +struct i40e_aqc_tunnel_key_structure { + u8 key1_off; + u8 key2_off; + u8 key1_len; /* 0 to 15 */ + u8 key2_len; /* 0 to 15 */ + u8 flags; +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +/* response flags */ +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 + u8 network_key_index; +#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 +#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 +#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 +#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); + +/* OEM mode commands (direct 0xFE0x) */ +struct i40e_aqc_oem_param_change { + __le32 param_type; +#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 +#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 +#define I40E_AQ_OEM_PARAM_MAC 2 + __le32 param_value1; + __le16 param_value2; + u8 reserved[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); + +struct i40e_aqc_oem_state_change { + __le32 state; +#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 +#define I40E_AQ_OEM_STATE_LINK_UP 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); + +/* Initialize OCSD (0xFE02, direct) */ +struct i40e_aqc_opc_oem_ocsd_initialize { + u8 type_status; + u8 reserved1[3]; + __le32 ocsd_memory_block_addr_high; + __le32 ocsd_memory_block_addr_low; + __le32 requested_update_interval; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize); + +/* Initialize OCBB (0xFE03, direct) */ +struct i40e_aqc_opc_oem_ocbb_initialize { + u8 type_status; + u8 reserved1[3]; + __le32 ocbb_memory_block_addr_high; + __le32 ocbb_memory_block_addr_low; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); + +/* debug commands */ + +/* get device id (0xFF00) uses the generic structure */ + +/* set test more (0xFF01, internal) */ + +struct i40e_acq_set_test_mode { + u8 mode; +#define I40E_AQ_TEST_PARTIAL 0 +#define I40E_AQ_TEST_FULL 1 +#define I40E_AQ_TEST_NVM 2 + u8 reserved[3]; + u8 command; +#define I40E_AQ_TEST_OPEN 0 +#define I40E_AQ_TEST_CLOSE 1 +#define I40E_AQ_TEST_INC 2 + u8 reserved2[3]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); + +/* Debug Read Register command (0xFF03) + * Debug Write Register command (0xFF04) + */ +struct i40e_aqc_debug_reg_read_write { + __le32 reserved; + __le32 address; + __le32 value_high; + __le32 value_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); + +/* Scatter/gather Reg Read (indirect 0xFF05) + * Scatter/gather Reg Write (indirect 0xFF06) + */ + +/* i40e_aq_desc is used for the command */ +struct i40e_aqc_debug_reg_sg_element_data { + __le32 address; + __le32 value; +}; + +/* Debug Modify register (direct 0xFF07) */ +struct i40e_aqc_debug_modify_reg { + __le32 address; + __le32 value; + __le32 clear_mask; + __le32 set_mask; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); + +/* dump internal data (0xFF08, indirect) */ + +#define I40E_AQ_CLUSTER_ID_AUX 0 +#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 +#define I40E_AQ_CLUSTER_ID_TXSCHED 2 +#define I40E_AQ_CLUSTER_ID_HMC 3 +#define I40E_AQ_CLUSTER_ID_MAC0 4 +#define I40E_AQ_CLUSTER_ID_MAC1 5 +#define I40E_AQ_CLUSTER_ID_MAC2 6 +#define I40E_AQ_CLUSTER_ID_MAC3 7 +#define I40E_AQ_CLUSTER_ID_DCB 8 +#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 +#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 +#define I40E_AQ_CLUSTER_ID_ALTRAM 11 + +struct i40e_aqc_debug_dump_internals { + u8 cluster_id; + u8 table_id; + __le16 data_size; + __le32 idx; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); + +struct i40e_aqc_debug_modify_internals { + u8 cluster_id; + u8 cluster_specific_params[7]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); + +#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/i40e/base/i40e_alloc.h b/drivers/net/i40e/base/i40e_alloc.h new file mode 100644 index 00000000..38c2f655 --- /dev/null +++ b/drivers/net/i40e/base/i40e_alloc.h @@ -0,0 +1,65 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _I40E_ALLOC_H_ +#define _I40E_ALLOC_H_ + +struct i40e_hw; + +/* Memory allocation types */ +enum i40e_memory_type { + i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */ + i40e_mem_asq_buf = 1, + i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */ + i40e_mem_arq_ring = 3, /* ARQ descriptor ring */ + i40e_mem_atq_ring = 4, /* ATQ descriptor ring */ + i40e_mem_pd = 5, /* Page Descriptor */ + i40e_mem_bp = 6, /* Backing Page - 4KB */ + i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ + i40e_mem_reserved +}; + +/* prototype for functions used for dynamic memory allocation */ +enum i40e_status_code i40e_allocate_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem, + enum i40e_memory_type type, + u64 size, u32 alignment); +enum i40e_status_code i40e_free_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem); +enum i40e_status_code i40e_allocate_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem, + u32 size); +enum i40e_status_code i40e_free_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem); + +#endif /* _I40E_ALLOC_H_ */ diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c new file mode 100644 index 00000000..ef3425e1 --- /dev/null +++ b/drivers/net/i40e/base/i40e_common.c @@ -0,0 +1,6698 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "i40e_type.h" +#include "i40e_adminq.h" +#include "i40e_prototype.h" +#include "i40e_virtchnl.h" + + +/** + * i40e_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +#if defined(INTEGRATED_VF) || defined(VF_DRIVER) +enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw) +#else +STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw) +#endif +{ + enum i40e_status_code status = I40E_SUCCESS; + + DEBUGFUNC("i40e_set_mac_type\n"); + + if (hw->vendor_id == I40E_INTEL_VENDOR_ID) { + switch (hw->device_id) { + case I40E_DEV_ID_SFP_XL710: + case I40E_DEV_ID_QEMU: + case I40E_DEV_ID_KX_B: + case I40E_DEV_ID_KX_C: + case I40E_DEV_ID_QSFP_A: + case I40E_DEV_ID_QSFP_B: + case I40E_DEV_ID_QSFP_C: + case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_20G_KR2: + case I40E_DEV_ID_20G_KR2_A: + hw->mac.type = I40E_MAC_XL710; + break; +#ifdef X722_SUPPORT +#ifdef X722_A0_SUPPORT + case I40E_DEV_ID_X722_A0: +#endif + case I40E_DEV_ID_KX_X722: + case I40E_DEV_ID_QSFP_X722: + case I40E_DEV_ID_SFP_X722: + case I40E_DEV_ID_1G_BASE_T_X722: + case I40E_DEV_ID_10G_BASE_T_X722: + hw->mac.type = I40E_MAC_X722; + break; +#endif +#ifdef X722_SUPPORT +#if defined(INTEGRATED_VF) || defined(VF_DRIVER) + case I40E_DEV_ID_X722_VF: + case I40E_DEV_ID_X722_VF_HV: +#ifdef X722_A0_SUPPORT + case I40E_DEV_ID_X722_A0_VF: +#endif + hw->mac.type = I40E_MAC_X722_VF; + break; +#endif /* INTEGRATED_VF || VF_DRIVER */ +#endif /* X722_SUPPORT */ +#if defined(INTEGRATED_VF) || defined(VF_DRIVER) + case I40E_DEV_ID_VF: + case I40E_DEV_ID_VF_HV: + hw->mac.type = I40E_MAC_VF; + break; +#endif + default: + hw->mac.type = I40E_MAC_GENERIC; + break; + } + } else { + status = I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + DEBUGOUT2("i40e_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, status); + return status; +} + +#ifndef I40E_NDIS_SUPPORT +/** + * i40e_aq_str - convert AQ err code to a string + * @hw: pointer to the HW structure + * @aq_err: the AQ error code to convert + **/ +const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) +{ + switch (aq_err) { + case I40E_AQ_RC_OK: + return "OK"; + case I40E_AQ_RC_EPERM: + return "I40E_AQ_RC_EPERM"; + case I40E_AQ_RC_ENOENT: + return "I40E_AQ_RC_ENOENT"; + case I40E_AQ_RC_ESRCH: + return "I40E_AQ_RC_ESRCH"; + case I40E_AQ_RC_EINTR: + return "I40E_AQ_RC_EINTR"; + case I40E_AQ_RC_EIO: + return "I40E_AQ_RC_EIO"; + case I40E_AQ_RC_ENXIO: + return "I40E_AQ_RC_ENXIO"; + case I40E_AQ_RC_E2BIG: + return "I40E_AQ_RC_E2BIG"; + case I40E_AQ_RC_EAGAIN: + return "I40E_AQ_RC_EAGAIN"; + case I40E_AQ_RC_ENOMEM: + return "I40E_AQ_RC_ENOMEM"; + case I40E_AQ_RC_EACCES: + return "I40E_AQ_RC_EACCES"; + case I40E_AQ_RC_EFAULT: + return "I40E_AQ_RC_EFAULT"; + case I40E_AQ_RC_EBUSY: + return "I40E_AQ_RC_EBUSY"; + case I40E_AQ_RC_EEXIST: + return "I40E_AQ_RC_EEXIST"; + case I40E_AQ_RC_EINVAL: + return "I40E_AQ_RC_EINVAL"; + case I40E_AQ_RC_ENOTTY: + return "I40E_AQ_RC_ENOTTY"; + case I40E_AQ_RC_ENOSPC: + return "I40E_AQ_RC_ENOSPC"; + case I40E_AQ_RC_ENOSYS: + return "I40E_AQ_RC_ENOSYS"; + case I40E_AQ_RC_ERANGE: + return "I40E_AQ_RC_ERANGE"; + case I40E_AQ_RC_EFLUSHED: + return "I40E_AQ_RC_EFLUSHED"; + case I40E_AQ_RC_BAD_ADDR: + return "I40E_AQ_RC_BAD_ADDR"; + case I40E_AQ_RC_EMODE: + return "I40E_AQ_RC_EMODE"; + case I40E_AQ_RC_EFBIG: + return "I40E_AQ_RC_EFBIG"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); + return hw->err_str; +} + +/** + * i40e_stat_str - convert status err code to a string + * @hw: pointer to the HW structure + * @stat_err: the status error code to convert + **/ +const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err) +{ + switch (stat_err) { + case I40E_SUCCESS: + return "OK"; + case I40E_ERR_NVM: + return "I40E_ERR_NVM"; + case I40E_ERR_NVM_CHECKSUM: + return "I40E_ERR_NVM_CHECKSUM"; + case I40E_ERR_PHY: + return "I40E_ERR_PHY"; + case I40E_ERR_CONFIG: + return "I40E_ERR_CONFIG"; + case I40E_ERR_PARAM: + return "I40E_ERR_PARAM"; + case I40E_ERR_MAC_TYPE: + return "I40E_ERR_MAC_TYPE"; + case I40E_ERR_UNKNOWN_PHY: + return "I40E_ERR_UNKNOWN_PHY"; + case I40E_ERR_LINK_SETUP: + return "I40E_ERR_LINK_SETUP"; + case I40E_ERR_ADAPTER_STOPPED: + return "I40E_ERR_ADAPTER_STOPPED"; + case I40E_ERR_INVALID_MAC_ADDR: + return "I40E_ERR_INVALID_MAC_ADDR"; + case I40E_ERR_DEVICE_NOT_SUPPORTED: + return "I40E_ERR_DEVICE_NOT_SUPPORTED"; + case I40E_ERR_MASTER_REQUESTS_PENDING: + return "I40E_ERR_MASTER_REQUESTS_PENDING"; + case I40E_ERR_INVALID_LINK_SETTINGS: + return "I40E_ERR_INVALID_LINK_SETTINGS"; + case I40E_ERR_AUTONEG_NOT_COMPLETE: + return "I40E_ERR_AUTONEG_NOT_COMPLETE"; + case I40E_ERR_RESET_FAILED: + return "I40E_ERR_RESET_FAILED"; + case I40E_ERR_SWFW_SYNC: + return "I40E_ERR_SWFW_SYNC"; + case I40E_ERR_NO_AVAILABLE_VSI: + return "I40E_ERR_NO_AVAILABLE_VSI"; + case I40E_ERR_NO_MEMORY: + return "I40E_ERR_NO_MEMORY"; + case I40E_ERR_BAD_PTR: + return "I40E_ERR_BAD_PTR"; + case I40E_ERR_RING_FULL: + return "I40E_ERR_RING_FULL"; + case I40E_ERR_INVALID_PD_ID: + return "I40E_ERR_INVALID_PD_ID"; + case I40E_ERR_INVALID_QP_ID: + return "I40E_ERR_INVALID_QP_ID"; + case I40E_ERR_INVALID_CQ_ID: + return "I40E_ERR_INVALID_CQ_ID"; + case I40E_ERR_INVALID_CEQ_ID: + return "I40E_ERR_INVALID_CEQ_ID"; + case I40E_ERR_INVALID_AEQ_ID: + return "I40E_ERR_INVALID_AEQ_ID"; + case I40E_ERR_INVALID_SIZE: + return "I40E_ERR_INVALID_SIZE"; + case I40E_ERR_INVALID_ARP_INDEX: + return "I40E_ERR_INVALID_ARP_INDEX"; + case I40E_ERR_INVALID_FPM_FUNC_ID: + return "I40E_ERR_INVALID_FPM_FUNC_ID"; + case I40E_ERR_QP_INVALID_MSG_SIZE: + return "I40E_ERR_QP_INVALID_MSG_SIZE"; + case I40E_ERR_QP_TOOMANY_WRS_POSTED: + return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; + case I40E_ERR_INVALID_FRAG_COUNT: + return "I40E_ERR_INVALID_FRAG_COUNT"; + case I40E_ERR_QUEUE_EMPTY: + return "I40E_ERR_QUEUE_EMPTY"; + case I40E_ERR_INVALID_ALIGNMENT: + return "I40E_ERR_INVALID_ALIGNMENT"; + case I40E_ERR_FLUSHED_QUEUE: + return "I40E_ERR_FLUSHED_QUEUE"; + case I40E_ERR_INVALID_PUSH_PAGE_INDEX: + return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; + case I40E_ERR_INVALID_IMM_DATA_SIZE: + return "I40E_ERR_INVALID_IMM_DATA_SIZE"; + case I40E_ERR_TIMEOUT: + return "I40E_ERR_TIMEOUT"; + case I40E_ERR_OPCODE_MISMATCH: + return "I40E_ERR_OPCODE_MISMATCH"; + case I40E_ERR_CQP_COMPL_ERROR: + return "I40E_ERR_CQP_COMPL_ERROR"; + case I40E_ERR_INVALID_VF_ID: + return "I40E_ERR_INVALID_VF_ID"; + case I40E_ERR_INVALID_HMCFN_ID: + return "I40E_ERR_INVALID_HMCFN_ID"; + case I40E_ERR_BACKING_PAGE_ERROR: + return "I40E_ERR_BACKING_PAGE_ERROR"; + case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: + return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; + case I40E_ERR_INVALID_PBLE_INDEX: + return "I40E_ERR_INVALID_PBLE_INDEX"; + case I40E_ERR_INVALID_SD_INDEX: + return "I40E_ERR_INVALID_SD_INDEX"; + case I40E_ERR_INVALID_PAGE_DESC_INDEX: + return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; + case I40E_ERR_INVALID_SD_TYPE: + return "I40E_ERR_INVALID_SD_TYPE"; + case I40E_ERR_MEMCPY_FAILED: + return "I40E_ERR_MEMCPY_FAILED"; + case I40E_ERR_INVALID_HMC_OBJ_INDEX: + return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; + case I40E_ERR_INVALID_HMC_OBJ_COUNT: + return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; + case I40E_ERR_INVALID_SRQ_ARM_LIMIT: + return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; + case I40E_ERR_SRQ_ENABLED: + return "I40E_ERR_SRQ_ENABLED"; + case I40E_ERR_ADMIN_QUEUE_ERROR: + return "I40E_ERR_ADMIN_QUEUE_ERROR"; + case I40E_ERR_ADMIN_QUEUE_TIMEOUT: + return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; + case I40E_ERR_BUF_TOO_SHORT: + return "I40E_ERR_BUF_TOO_SHORT"; + case I40E_ERR_ADMIN_QUEUE_FULL: + return "I40E_ERR_ADMIN_QUEUE_FULL"; + case I40E_ERR_ADMIN_QUEUE_NO_WORK: + return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; + case I40E_ERR_BAD_IWARP_CQE: + return "I40E_ERR_BAD_IWARP_CQE"; + case I40E_ERR_NVM_BLANK_MODE: + return "I40E_ERR_NVM_BLANK_MODE"; + case I40E_ERR_NOT_IMPLEMENTED: + return "I40E_ERR_NOT_IMPLEMENTED"; + case I40E_ERR_PE_DOORBELL_NOT_ENABLED: + return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; + case I40E_ERR_DIAG_TEST_FAILED: + return "I40E_ERR_DIAG_TEST_FAILED"; + case I40E_ERR_NOT_READY: + return "I40E_ERR_NOT_READY"; + case I40E_NOT_SUPPORTED: + return "I40E_NOT_SUPPORTED"; + case I40E_ERR_FIRMWARE_API_VERSION: + return "I40E_ERR_FIRMWARE_API_VERSION"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); + return hw->err_str; +} + +#endif /* I40E_NDIS_SUPPORT */ +/** + * i40e_debug_aq + * @hw: debug mask related to admin queue + * @mask: debug mask + * @desc: pointer to admin queue descriptor + * @buffer: pointer to command buffer + * @buf_len: max length of buffer + * + * Dumps debug log about adminq command with descriptor contents. + **/ +void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, + void *buffer, u16 buf_len) +{ + struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u16 len = LE16_TO_CPU(aq_desc->datalen); + u8 *buf = (u8 *)buffer; + u16 i = 0; + + if ((!(mask & hw->debug_mask)) || (desc == NULL)) + return; + + i40e_debug(hw, mask, + "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", + LE16_TO_CPU(aq_desc->opcode), + LE16_TO_CPU(aq_desc->flags), + LE16_TO_CPU(aq_desc->datalen), + LE16_TO_CPU(aq_desc->retval)); + i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", + LE32_TO_CPU(aq_desc->cookie_high), + LE32_TO_CPU(aq_desc->cookie_low)); + i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", + LE32_TO_CPU(aq_desc->params.internal.param0), + LE32_TO_CPU(aq_desc->params.internal.param1)); + i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", + LE32_TO_CPU(aq_desc->params.external.addr_high), + LE32_TO_CPU(aq_desc->params.external.addr_low)); + + if ((buffer != NULL) && (aq_desc->datalen != 0)) { + i40e_debug(hw, mask, "AQ CMD Buffer:\n"); + if (buf_len < len) + len = buf_len; + /* write the full 16-byte chunks */ + for (i = 0; i < (len - 16); i += 16) + i40e_debug(hw, mask, + "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + i, buf[i], buf[i+1], buf[i+2], buf[i+3], + buf[i+4], buf[i+5], buf[i+6], buf[i+7], + buf[i+8], buf[i+9], buf[i+10], buf[i+11], + buf[i+12], buf[i+13], buf[i+14], buf[i+15]); + /* the most we could have left is 16 bytes, pad with zeros */ + if (i < len) { + char d_buf[16]; + int j; + + memset(d_buf, 0, sizeof(d_buf)); + for (j = 0; i < len; j++, i++) + d_buf[j] = buf[i]; + i40e_debug(hw, mask, + "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + i, d_buf[0], d_buf[1], d_buf[2], d_buf[3], + d_buf[4], d_buf[5], d_buf[6], d_buf[7], + d_buf[8], d_buf[9], d_buf[10], d_buf[11], + d_buf[12], d_buf[13], d_buf[14], d_buf[15]); + } + } +} + +/** + * i40e_check_asq_alive + * @hw: pointer to the hw struct + * + * Returns true if Queue is enabled else false. + **/ +bool i40e_check_asq_alive(struct i40e_hw *hw) +{ + if (hw->aq.asq.len) +#ifdef PF_DRIVER +#ifdef INTEGRATED_VF + if (!i40e_is_vf(hw)) + return !!(rd32(hw, hw->aq.asq.len) & + I40E_PF_ATQLEN_ATQENABLE_MASK); +#else + return !!(rd32(hw, hw->aq.asq.len) & + I40E_PF_ATQLEN_ATQENABLE_MASK); +#endif /* INTEGRATED_VF */ +#endif /* PF_DRIVER */ +#ifdef VF_DRIVER +#ifdef INTEGRATED_VF + if (i40e_is_vf(hw)) + return !!(rd32(hw, hw->aq.asq.len) & + I40E_VF_ATQLEN1_ATQENABLE_MASK); +#else + return !!(rd32(hw, hw->aq.asq.len) & + I40E_VF_ATQLEN1_ATQENABLE_MASK); +#endif /* INTEGRATED_VF */ +#endif /* VF_DRIVER */ + return false; +} + +/** + * i40e_aq_queue_shutdown + * @hw: pointer to the hw struct + * @unloading: is the driver unloading itself + * + * Tell the Firmware that we're shutting down the AdminQ and whether + * or not the driver is unloading as well. + **/ +enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, + bool unloading) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_queue_shutdown *cmd = + (struct i40e_aqc_queue_shutdown *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_queue_shutdown); + + if (unloading) + cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING); + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} +#ifdef X722_SUPPORT + +/** + * i40e_aq_get_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * @set: set true to set the table, false to get the table + * + * Internal function to get or set RSS look up table + **/ +STATIC enum i40e_status_code i40e_aq_get_set_rss_lut(struct i40e_hw *hw, + u16 vsi_id, bool pf_lut, + u8 *lut, u16 lut_size, + bool set) +{ + enum i40e_status_code status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_lut *cmd_resp = + (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; + + if (set) + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_lut); + else + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_lut); + + /* Indirect command */ + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + CPU_TO_LE16((u16)((vsi_id << + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); + cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); + + if (pf_lut) + cmd_resp->flags |= CPU_TO_LE16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + else + cmd_resp->flags |= CPU_TO_LE16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + + status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); + + return status; +} + +/** + * i40e_aq_get_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * get the RSS lookup table, PF or VSI type + **/ +enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, + false); +} + +/** + * i40e_aq_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * set the RSS lookup table, PF or VSI type + **/ +enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); +} + +/** + * i40e_aq_get_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * @set: set true to set the key, false to get the key + * + * get the RSS key per VSI + **/ +STATIC enum i40e_status_code i40e_aq_get_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key, + bool set) +{ + enum i40e_status_code status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_key *cmd_resp = + (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; + u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); + + if (set) + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_key); + else + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_key); + + /* Indirect command */ + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + CPU_TO_LE16((u16)((vsi_id << + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); + cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); + + status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); + + return status; +} + +/** + * i40e_aq_get_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + **/ +enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); +} + +/** + * i40e_aq_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + * set the RSS key per VSI + **/ +enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); +} +#endif /* X722_SUPPORT */ + +/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT i40e_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum i40e_rx_l2_ptype to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + { PTYPE, \ + 1, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ + I40E_RX_PTYPE_##OUTER_FRAG, \ + I40E_RX_PTYPE_TUNNEL_##T, \ + I40E_RX_PTYPE_TUNNEL_END_##TE, \ + I40E_RX_PTYPE_##TEF, \ + I40E_RX_PTYPE_INNER_PROT_##I, \ + I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ + { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG +#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG +#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { + /* L2 Packet types */ + I40E_PTT_UNUSED_ENTRY(0), + I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), + I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(4), + I40E_PTT_UNUSED_ENTRY(5), + I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(8), + I40E_PTT_UNUSED_ENTRY(9), + I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + + /* Non Tunneled IPv4 */ + I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(25), + I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(32), + I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(39), + I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(47), + I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(54), + I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(62), + I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(69), + I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(77), + I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(84), + I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + I40E_PTT_UNUSED_ENTRY(91), + I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(98), + I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(105), + I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(113), + I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(120), + I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(128), + I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(135), + I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(143), + I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(150), + I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + I40E_PTT_UNUSED_ENTRY(154), + I40E_PTT_UNUSED_ENTRY(155), + I40E_PTT_UNUSED_ENTRY(156), + I40E_PTT_UNUSED_ENTRY(157), + I40E_PTT_UNUSED_ENTRY(158), + I40E_PTT_UNUSED_ENTRY(159), + + I40E_PTT_UNUSED_ENTRY(160), + I40E_PTT_UNUSED_ENTRY(161), + I40E_PTT_UNUSED_ENTRY(162), + I40E_PTT_UNUSED_ENTRY(163), + I40E_PTT_UNUSED_ENTRY(164), + I40E_PTT_UNUSED_ENTRY(165), + I40E_PTT_UNUSED_ENTRY(166), + I40E_PTT_UNUSED_ENTRY(167), + I40E_PTT_UNUSED_ENTRY(168), + I40E_PTT_UNUSED_ENTRY(169), + + I40E_PTT_UNUSED_ENTRY(170), + I40E_PTT_UNUSED_ENTRY(171), + I40E_PTT_UNUSED_ENTRY(172), + I40E_PTT_UNUSED_ENTRY(173), + I40E_PTT_UNUSED_ENTRY(174), + I40E_PTT_UNUSED_ENTRY(175), + I40E_PTT_UNUSED_ENTRY(176), + I40E_PTT_UNUSED_ENTRY(177), + I40E_PTT_UNUSED_ENTRY(178), + I40E_PTT_UNUSED_ENTRY(179), + + I40E_PTT_UNUSED_ENTRY(180), + I40E_PTT_UNUSED_ENTRY(181), + I40E_PTT_UNUSED_ENTRY(182), + I40E_PTT_UNUSED_ENTRY(183), + I40E_PTT_UNUSED_ENTRY(184), + I40E_PTT_UNUSED_ENTRY(185), + I40E_PTT_UNUSED_ENTRY(186), + I40E_PTT_UNUSED_ENTRY(187), + I40E_PTT_UNUSED_ENTRY(188), + I40E_PTT_UNUSED_ENTRY(189), + + I40E_PTT_UNUSED_ENTRY(190), + I40E_PTT_UNUSED_ENTRY(191), + I40E_PTT_UNUSED_ENTRY(192), + I40E_PTT_UNUSED_ENTRY(193), + I40E_PTT_UNUSED_ENTRY(194), + I40E_PTT_UNUSED_ENTRY(195), + I40E_PTT_UNUSED_ENTRY(196), + I40E_PTT_UNUSED_ENTRY(197), + I40E_PTT_UNUSED_ENTRY(198), + I40E_PTT_UNUSED_ENTRY(199), + + I40E_PTT_UNUSED_ENTRY(200), + I40E_PTT_UNUSED_ENTRY(201), + I40E_PTT_UNUSED_ENTRY(202), + I40E_PTT_UNUSED_ENTRY(203), + I40E_PTT_UNUSED_ENTRY(204), + I40E_PTT_UNUSED_ENTRY(205), + I40E_PTT_UNUSED_ENTRY(206), + I40E_PTT_UNUSED_ENTRY(207), + I40E_PTT_UNUSED_ENTRY(208), + I40E_PTT_UNUSED_ENTRY(209), + + I40E_PTT_UNUSED_ENTRY(210), + I40E_PTT_UNUSED_ENTRY(211), + I40E_PTT_UNUSED_ENTRY(212), + I40E_PTT_UNUSED_ENTRY(213), + I40E_PTT_UNUSED_ENTRY(214), + I40E_PTT_UNUSED_ENTRY(215), + I40E_PTT_UNUSED_ENTRY(216), + I40E_PTT_UNUSED_ENTRY(217), + I40E_PTT_UNUSED_ENTRY(218), + I40E_PTT_UNUSED_ENTRY(219), + + I40E_PTT_UNUSED_ENTRY(220), + I40E_PTT_UNUSED_ENTRY(221), + I40E_PTT_UNUSED_ENTRY(222), + I40E_PTT_UNUSED_ENTRY(223), + I40E_PTT_UNUSED_ENTRY(224), + I40E_PTT_UNUSED_ENTRY(225), + I40E_PTT_UNUSED_ENTRY(226), + I40E_PTT_UNUSED_ENTRY(227), + I40E_PTT_UNUSED_ENTRY(228), + I40E_PTT_UNUSED_ENTRY(229), + + I40E_PTT_UNUSED_ENTRY(230), + I40E_PTT_UNUSED_ENTRY(231), + I40E_PTT_UNUSED_ENTRY(232), + I40E_PTT_UNUSED_ENTRY(233), + I40E_PTT_UNUSED_ENTRY(234), + I40E_PTT_UNUSED_ENTRY(235), + I40E_PTT_UNUSED_ENTRY(236), + I40E_PTT_UNUSED_ENTRY(237), + I40E_PTT_UNUSED_ENTRY(238), + I40E_PTT_UNUSED_ENTRY(239), + + I40E_PTT_UNUSED_ENTRY(240), + I40E_PTT_UNUSED_ENTRY(241), + I40E_PTT_UNUSED_ENTRY(242), + I40E_PTT_UNUSED_ENTRY(243), + I40E_PTT_UNUSED_ENTRY(244), + I40E_PTT_UNUSED_ENTRY(245), + I40E_PTT_UNUSED_ENTRY(246), + I40E_PTT_UNUSED_ENTRY(247), + I40E_PTT_UNUSED_ENTRY(248), + I40E_PTT_UNUSED_ENTRY(249), + + I40E_PTT_UNUSED_ENTRY(250), + I40E_PTT_UNUSED_ENTRY(251), + I40E_PTT_UNUSED_ENTRY(252), + I40E_PTT_UNUSED_ENTRY(253), + I40E_PTT_UNUSED_ENTRY(254), + I40E_PTT_UNUSED_ENTRY(255) +}; + + +/** + * i40e_validate_mac_addr - Validate unicast MAC address + * @mac_addr: pointer to MAC address + * + * Tests a MAC address to ensure it is a valid Individual Address + **/ +enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr) +{ + enum i40e_status_code status = I40E_SUCCESS; + + DEBUGFUNC("i40e_validate_mac_addr"); + + /* Broadcast addresses ARE multicast addresses + * Make sure it is not a multicast address + * Reject the zero address + */ + if (I40E_IS_MULTICAST(mac_addr) || + (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)) + status = I40E_ERR_INVALID_MAC_ADDR; + + return status; +} +#ifdef PF_DRIVER + +/** + * i40e_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This assigns the MAC type and PHY code and inits the NVM. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The i40e_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw) +{ + enum i40e_status_code status = I40E_SUCCESS; + u32 port, ari, func_rid; + + DEBUGFUNC("i40e_init_shared_code"); + + i40e_set_mac_type(hw); + + switch (hw->mac.type) { + case I40E_MAC_XL710: +#ifdef X722_SUPPORT + case I40E_MAC_X722: +#endif + break; + default: + return I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + hw->phy.get_link_info = true; + + /* Determine port number and PF number*/ + port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) + >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; + hw->port = (u8)port; + ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> + I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; + func_rid = rd32(hw, I40E_PF_FUNC_RID); + if (ari) + hw->pf_id = (u8)(func_rid & 0xff); + else + hw->pf_id = (u8)(func_rid & 0x7); + +#ifdef X722_SUPPORT + if (hw->mac.type == I40E_MAC_X722) + hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE; + +#endif + status = i40e_init_nvm(hw); + return status; +} + +/** + * i40e_aq_mac_address_read - Retrieve the MAC addresses + * @hw: pointer to the hw struct + * @flags: a return indicator of what addresses were added to the addr store + * @addrs: the requestor's mac addr store + * @cmd_details: pointer to command details structure or NULL + **/ +STATIC enum i40e_status_code i40e_aq_mac_address_read(struct i40e_hw *hw, + u16 *flags, + struct i40e_aqc_mac_address_read_data *addrs, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_mac_address_read *cmd_data = + (struct i40e_aqc_mac_address_read *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); + + status = i40e_asq_send_command(hw, &desc, addrs, + sizeof(*addrs), cmd_details); + *flags = LE16_TO_CPU(cmd_data->command_flags); + + return status; +} + +/** + * i40e_aq_mac_address_write - Change the MAC addresses + * @hw: pointer to the hw struct + * @flags: indicates which MAC to be written + * @mac_addr: address to write + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw, + u16 flags, u8 *mac_addr, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_mac_address_write *cmd_data = + (struct i40e_aqc_mac_address_write *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_mac_address_write); + cmd_data->command_flags = CPU_TO_LE16(flags); + cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]); + cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) | + ((u32)mac_addr[3] << 16) | + ((u32)mac_addr[4] << 8) | + mac_addr[5]); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_get_mac_addr - get MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to MAC address + * + * Reads the adapter's MAC address from register + **/ +enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) +{ + struct i40e_aqc_mac_address_read_data addrs; + enum i40e_status_code status; + u16 flags = 0; + + status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); + + if (flags & I40E_AQC_LAN_ADDR_VALID) + memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac)); + + return status; +} + +/** + * i40e_get_port_mac_addr - get Port MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to Port MAC address + * + * Reads the adapter's Port MAC address + **/ +enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) +{ + struct i40e_aqc_mac_address_read_data addrs; + enum i40e_status_code status; + u16 flags = 0; + + status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); + if (status) + return status; + + if (flags & I40E_AQC_PORT_ADDR_VALID) + memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac)); + else + status = I40E_ERR_INVALID_MAC_ADDR; + + return status; +} + +/** + * i40e_pre_tx_queue_cfg - pre tx queue configure + * @hw: pointer to the HW structure + * @queue: target pf queue index + * @enable: state change request + * + * Handles hw requirement to indicate intention to enable + * or disable target queue. + **/ +void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) +{ + u32 abs_queue_idx = hw->func_caps.base_queue + queue; + u32 reg_block = 0; + u32 reg_val; + + if (abs_queue_idx >= 128) { + reg_block = abs_queue_idx / 128; + abs_queue_idx %= 128; + } + + reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); + reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; + reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); + + if (enable) + reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; + else + reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; + + wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); +} + +/** + * i40e_read_pba_string - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + enum i40e_status_code status = I40E_SUCCESS; + u16 pba_word = 0; + u16 pba_size = 0; + u16 pba_ptr = 0; + u16 i = 0; + + status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); + if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) { + DEBUGOUT("Failed to read PBA flags or flag is invalid.\n"); + return status; + } + + status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); + if (status != I40E_SUCCESS) { + DEBUGOUT("Failed to read PBA Block pointer.\n"); + return status; + } + + status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); + if (status != I40E_SUCCESS) { + DEBUGOUT("Failed to read PBA Block size.\n"); + return status; + } + + /* Subtract one to get PBA word count (PBA Size word is included in + * total size) + */ + pba_size--; + if (pba_num_size < (((u32)pba_size * 2) + 1)) { + DEBUGOUT("Buffer to small for PBA data.\n"); + return I40E_ERR_PARAM; + } + + for (i = 0; i < pba_size; i++) { + status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); + if (status != I40E_SUCCESS) { + DEBUGOUT1("Failed to read PBA Block word %d.\n", i); + return status; + } + + pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; + pba_num[(i * 2) + 1] = pba_word & 0xFF; + } + pba_num[(pba_size * 2)] = '\0'; + + return status; +} + +/** + * i40e_get_media_type - Gets media type + * @hw: pointer to the hardware structure + **/ +STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) +{ + enum i40e_media_type media; + + switch (hw->phy.link_info.phy_type) { + case I40E_PHY_TYPE_10GBASE_SR: + case I40E_PHY_TYPE_10GBASE_LR: + case I40E_PHY_TYPE_1000BASE_SX: + case I40E_PHY_TYPE_1000BASE_LX: + case I40E_PHY_TYPE_40GBASE_SR4: + case I40E_PHY_TYPE_40GBASE_LR4: + media = I40E_MEDIA_TYPE_FIBER; + break; + case I40E_PHY_TYPE_100BASE_TX: + case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_10GBASE_T: + media = I40E_MEDIA_TYPE_BASET; + break; + case I40E_PHY_TYPE_10GBASE_CR1_CU: + case I40E_PHY_TYPE_40GBASE_CR4_CU: + case I40E_PHY_TYPE_10GBASE_CR1: + case I40E_PHY_TYPE_40GBASE_CR4: + case I40E_PHY_TYPE_10GBASE_SFPP_CU: + case I40E_PHY_TYPE_40GBASE_AOC: + case I40E_PHY_TYPE_10GBASE_AOC: + media = I40E_MEDIA_TYPE_DA; + break; + case I40E_PHY_TYPE_1000BASE_KX: + case I40E_PHY_TYPE_10GBASE_KX4: + case I40E_PHY_TYPE_10GBASE_KR: + case I40E_PHY_TYPE_40GBASE_KR4: + case I40E_PHY_TYPE_20GBASE_KR2: + media = I40E_MEDIA_TYPE_BACKPLANE; + break; + case I40E_PHY_TYPE_SGMII: + case I40E_PHY_TYPE_XAUI: + case I40E_PHY_TYPE_XFI: + case I40E_PHY_TYPE_XLAUI: + case I40E_PHY_TYPE_XLPPI: + default: + media = I40E_MEDIA_TYPE_UNKNOWN; + break; + } + + return media; +} + +#define I40E_PF_RESET_WAIT_COUNT 200 +/** + * i40e_pf_reset - Reset the PF + * @hw: pointer to the hardware structure + * + * Assuming someone else has triggered a global reset, + * assure the global reset is complete and then reset the PF + **/ +enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw) +{ + u32 cnt = 0; + u32 cnt1 = 0; + u32 reg = 0; + u32 grst_del; + + /* Poll for Global Reset steady state in case of recent GRST. + * The grst delay value is in 100ms units, and we'll wait a + * couple counts longer to be sure we don't just miss the end. + */ + grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & + I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> + I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; + + /* It can take upto 15 secs for GRST steady state */ + grst_del = grst_del * 20; /* bump it to 16 secs max to be safe */ + + for (cnt = 0; cnt < grst_del; cnt++) { + reg = rd32(hw, I40E_GLGEN_RSTAT); + if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) + break; + i40e_msec_delay(100); + } + if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { + DEBUGOUT("Global reset polling failed to complete.\n"); + return I40E_ERR_RESET_FAILED; + } + + /* Now Wait for the FW to be ready */ + for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { + reg = rd32(hw, I40E_GLNVM_ULD); + reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); + if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { + DEBUGOUT1("Core and Global modules ready %d\n", cnt1); + break; + } + i40e_msec_delay(10); + } + if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { + DEBUGOUT("wait for FW Reset complete timedout\n"); + DEBUGOUT1("I40E_GLNVM_ULD = 0x%x\n", reg); + return I40E_ERR_RESET_FAILED; + } + + /* If there was a Global Reset in progress when we got here, + * we don't need to do the PF Reset + */ + if (!cnt) { + reg = rd32(hw, I40E_PFGEN_CTRL); + wr32(hw, I40E_PFGEN_CTRL, + (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); + for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) { + reg = rd32(hw, I40E_PFGEN_CTRL); + if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) + break; + i40e_msec_delay(1); + } + if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { + DEBUGOUT("PF reset polling failed to complete.\n"); + return I40E_ERR_RESET_FAILED; + } + } + + i40e_clear_pxe_mode(hw); + + + return I40E_SUCCESS; +} + +/** + * i40e_clear_hw - clear out any left over hw state + * @hw: pointer to the hw struct + * + * Clear queues and interrupts, typically called at init time, + * but after the capabilities have been found so we know how many + * queues and msix vectors have been allocated. + **/ +void i40e_clear_hw(struct i40e_hw *hw) +{ + u32 num_queues, base_queue; + u32 num_pf_int; + u32 num_vf_int; + u32 num_vfs; + u32 i, j; + u32 val; + u32 eol = 0x7ff; + + /* get number of interrupts, queues, and vfs */ + val = rd32(hw, I40E_GLPCI_CNF2); + num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> + I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; + num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> + I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; + + val = rd32(hw, I40E_PFLAN_QALLOC); + base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> + I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; + j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> + I40E_PFLAN_QALLOC_LASTQ_SHIFT; + if (val & I40E_PFLAN_QALLOC_VALID_MASK) + num_queues = (j - base_queue) + 1; + else + num_queues = 0; + + val = rd32(hw, I40E_PF_VT_PFALLOC); + i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> + I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; + j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> + I40E_PF_VT_PFALLOC_LASTVF_SHIFT; + if (val & I40E_PF_VT_PFALLOC_VALID_MASK) + num_vfs = (j - i) + 1; + else + num_vfs = 0; + + /* stop all the interrupts */ + wr32(hw, I40E_PFINT_ICR0_ENA, 0); + val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; + for (i = 0; i < num_pf_int - 2; i++) + wr32(hw, I40E_PFINT_DYN_CTLN(i), val); + + /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ + val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; + wr32(hw, I40E_PFINT_LNKLST0, val); + for (i = 0; i < num_pf_int - 2; i++) + wr32(hw, I40E_PFINT_LNKLSTN(i), val); + val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; + for (i = 0; i < num_vfs; i++) + wr32(hw, I40E_VPINT_LNKLST0(i), val); + for (i = 0; i < num_vf_int - 2; i++) + wr32(hw, I40E_VPINT_LNKLSTN(i), val); + + /* warn the HW of the coming Tx disables */ + for (i = 0; i < num_queues; i++) { + u32 abs_queue_idx = base_queue + i; + u32 reg_block = 0; + + if (abs_queue_idx >= 128) { + reg_block = abs_queue_idx / 128; + abs_queue_idx %= 128; + } + + val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); + val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; + val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); + val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; + + wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); + } + i40e_usec_delay(400); + + /* stop all the queues */ + for (i = 0; i < num_queues; i++) { + wr32(hw, I40E_QINT_TQCTL(i), 0); + wr32(hw, I40E_QTX_ENA(i), 0); + wr32(hw, I40E_QINT_RQCTL(i), 0); + wr32(hw, I40E_QRX_ENA(i), 0); + } + + /* short wait for all queue disables to settle */ + i40e_usec_delay(50); +} + +/** + * i40e_clear_pxe_mode - clear pxe operations mode + * @hw: pointer to the hw struct + * + * Make sure all PXE mode settings are cleared, including things + * like descriptor fetch/write-back mode. + **/ +void i40e_clear_pxe_mode(struct i40e_hw *hw) +{ + if (i40e_check_asq_alive(hw)) + i40e_aq_clear_pxe_mode(hw, NULL); +} + +/** + * i40e_led_is_mine - helper to find matching led + * @hw: pointer to the hw struct + * @idx: index into GPIO registers + * + * returns: 0 if no match, otherwise the value of the GPIO_CTL register + */ +static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) +{ + u32 gpio_val = 0; + u32 port; + + if (!hw->func_caps.led[idx]) + return 0; + + gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); + port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> + I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; + + /* if PRT_NUM_NA is 1 then this LED is not port specific, OR + * if it is not our port then ignore + */ + if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || + (port != hw->port)) + return 0; + + return gpio_val; +} + +#define I40E_COMBINED_ACTIVITY 0xA +#define I40E_FILTER_ACTIVITY 0xE +#define I40E_LINK_ACTIVITY 0xC +#define I40E_MAC_ACTIVITY 0xD +#define I40E_LED0 22 + +/** + * i40e_led_get - return current on/off mode + * @hw: pointer to the hw struct + * + * The value returned is the 'mode' field as defined in the + * GPIO register definitions: 0x0 = off, 0xf = on, and other + * values are variations of possible behaviors relating to + * blink, link, and wire. + **/ +u32 i40e_led_get(struct i40e_hw *hw) +{ + u32 current_mode = 0; + u32 mode = 0; + int i; + + /* as per the documentation GPIO 22-29 are the LED + * GPIO pins named LED0..LED7 + */ + for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { + u32 gpio_val = i40e_led_is_mine(hw, i); + + if (!gpio_val) + continue; + + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + continue; + default: + break; + } + + mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> + I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; + break; + } + + return mode; +} + +/** + * i40e_led_set - set new on/off mode + * @hw: pointer to the hw struct + * @mode: 0=off, 0xf=on (else see manual for mode details) + * @blink: true if the LED should blink when on, false if steady + * + * if this function is used to turn on the blink it should + * be used to disable the blink when restoring the original state. + **/ +void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) +{ + u32 current_mode = 0; + int i; + + if (mode & 0xfffffff0) + DEBUGOUT1("invalid mode passed in %X\n", mode); + + /* as per the documentation GPIO 22-29 are the LED + * GPIO pins named LED0..LED7 + */ + for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { + u32 gpio_val = i40e_led_is_mine(hw, i); + + if (!gpio_val) + continue; + + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + continue; + default: + break; + } + + gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; + /* this & is a bit of paranoia, but serves as a range check */ + gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & + I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); + + if (mode == I40E_LINK_ACTIVITY) + blink = false; + + if (blink) + gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); + else + gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); + + wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); + break; + } +} + +/* Admin command wrappers */ + +/** + * i40e_aq_get_phy_capabilities + * @hw: pointer to the hw struct + * @abilities: structure for PHY capabilities to be filled + * @qualified_modules: report Qualified Modules + * @report_init: report init capabilities (active are default) + * @cmd_details: pointer to command details structure or NULL + * + * Returns the various PHY abilities supported on the Port. + **/ +enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, + bool qualified_modules, bool report_init, + struct i40e_aq_get_phy_abilities_resp *abilities, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); + + if (!abilities) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_phy_abilities); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (abilities_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + if (qualified_modules) + desc.params.external.param0 |= + CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); + + if (report_init) + desc.params.external.param0 |= + CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); + + status = i40e_asq_send_command(hw, &desc, abilities, abilities_size, + cmd_details); + + if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) + status = I40E_ERR_UNKNOWN_PHY; + + if (report_init) + hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type); + + return status; +} + +/** + * i40e_aq_set_phy_config + * @hw: pointer to the hw struct + * @config: structure with PHY configuration to be set + * @cmd_details: pointer to command details structure or NULL + * + * Set the various PHY configuration parameters + * supported on the Port.One or more of the Set PHY config parameters may be + * ignored in an MFP mode as the PF may not have the privilege to set some + * of the PHY Config parameters. This status will be indicated by the + * command response. + **/ +enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, + struct i40e_aq_set_phy_config *config, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aq_set_phy_config *cmd = + (struct i40e_aq_set_phy_config *)&desc.params.raw; + enum i40e_status_code status; + + if (!config) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_config); + + *cmd = *config; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_set_fc + * @hw: pointer to the hw struct + * + * Set the requested flow control mode using set_phy_config. + **/ +enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_restart) +{ + enum i40e_fc_mode fc_mode = hw->fc.requested_mode; + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_aq_set_phy_config config; + enum i40e_status_code status; + u8 pause_mask = 0x0; + + *aq_failures = 0x0; + + switch (fc_mode) { + case I40E_FC_FULL: + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; + break; + case I40E_FC_RX_PAUSE: + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; + break; + case I40E_FC_TX_PAUSE: + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; + break; + default: + break; + } + + /* Get the current phy config */ + status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, + NULL); + if (status) { + *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; + return status; + } + + memset(&config, 0, sizeof(config)); + /* clear the old pause settings */ + config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & + ~(I40E_AQ_PHY_FLAG_PAUSE_RX); + /* set the new abilities */ + config.abilities |= pause_mask; + /* If the abilities have changed, then set the new config */ + if (config.abilities != abilities.abilities) { + /* Auto restart link so settings take effect */ + if (atomic_restart) + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* Copy over all the old settings */ + config.phy_type = abilities.phy_type; + config.link_speed = abilities.link_speed; + config.eee_capability = abilities.eee_capability; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + status = i40e_aq_set_phy_config(hw, &config, NULL); + + if (status) + *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; + } + /* Update the link info */ + status = i40e_update_link_info(hw); + if (status) { + /* Wait a little bit (on 40G cards it sometimes takes a really + * long time for link to come back from the atomic reset) + * and try once more + */ + i40e_msec_delay(1000); + status = i40e_update_link_info(hw); + } + if (status) + *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; + + return status; +} + +/** + * i40e_aq_set_mac_config + * @hw: pointer to the hw struct + * @max_frame_size: Maximum Frame Size to be supported by the port + * @crc_en: Tell HW to append a CRC to outgoing frames + * @pacing: Pacing configurations + * @cmd_details: pointer to command details structure or NULL + * + * Configure MAC settings for frame size, jumbo frame support and the + * addition of a CRC by the hardware. + **/ +enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw, + u16 max_frame_size, + bool crc_en, u16 pacing, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aq_set_mac_config *cmd = + (struct i40e_aq_set_mac_config *)&desc.params.raw; + enum i40e_status_code status; + + if (max_frame_size == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_mac_config); + + cmd->max_frame_size = CPU_TO_LE16(max_frame_size); + cmd->params = ((u8)pacing & 0x0F) << 3; + if (crc_en) + cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_clear_pxe_mode + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * + * Tell the firmware that the driver is taking over from PXE + **/ +enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + enum i40e_status_code status; + struct i40e_aq_desc desc; + struct i40e_aqc_clear_pxe *cmd = + (struct i40e_aqc_clear_pxe *)&desc.params.raw; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_clear_pxe_mode); + + cmd->rx_cnt = 0x2; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + wr32(hw, I40E_GLLAN_RCTL_0, 0x1); + + return status; +} + +/** + * i40e_aq_set_link_restart_an + * @hw: pointer to the hw struct + * @enable_link: if true: enable link, if false: disable link + * @cmd_details: pointer to command details structure or NULL + * + * Sets up the link and restarts the Auto-Negotiation over the link. + **/ +enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw, + bool enable_link, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_link_restart_an *cmd = + (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_link_restart_an); + + cmd->command = I40E_AQ_PHY_RESTART_AN; + if (enable_link) + cmd->command |= I40E_AQ_PHY_LINK_ENABLE; + else + cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_link_info + * @hw: pointer to the hw struct + * @enable_lse: enable/disable LinkStatusEvent reporting + * @link: pointer to link status structure - optional + * @cmd_details: pointer to command details structure or NULL + * + * Returns the link status of the adapter. + **/ +enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw, + bool enable_lse, struct i40e_link_status *link, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_link_status *resp = + (struct i40e_aqc_get_link_status *)&desc.params.raw; + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + enum i40e_status_code status; + bool tx_pause, rx_pause; + u16 command_flags; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); + + if (enable_lse) + command_flags = I40E_AQ_LSE_ENABLE; + else + command_flags = I40E_AQ_LSE_DISABLE; + resp->command_flags = CPU_TO_LE16(command_flags); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status != I40E_SUCCESS) + goto aq_get_link_info_exit; + + /* save off old link status information */ + i40e_memcpy(&hw->phy.link_info_old, hw_link_info, + sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA); + + /* update link status */ + hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; + hw->phy.media_type = i40e_get_media_type(hw); + hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; + hw_link_info->link_info = resp->link_info; + hw_link_info->an_info = resp->an_info; + hw_link_info->ext_info = resp->ext_info; + hw_link_info->loopback = resp->loopback; + hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size); + hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; + + /* update fc info */ + tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); + rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); + if (tx_pause & rx_pause) + hw->fc.current_mode = I40E_FC_FULL; + else if (tx_pause) + hw->fc.current_mode = I40E_FC_TX_PAUSE; + else if (rx_pause) + hw->fc.current_mode = I40E_FC_RX_PAUSE; + else + hw->fc.current_mode = I40E_FC_NONE; + + if (resp->config & I40E_AQ_CONFIG_CRC_ENA) + hw_link_info->crc_enable = true; + else + hw_link_info->crc_enable = false; + + if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE)) + hw_link_info->lse_enable = true; + else + hw_link_info->lse_enable = false; + + if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && + hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) + hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; + + /* save link status information */ + if (link) + i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info), + I40E_NONDMA_TO_NONDMA); + + /* flag cleared so helper functions don't call AQ again */ + hw->phy.get_link_info = false; + +aq_get_link_info_exit: + return status; +} + +/** + * i40e_aq_set_phy_int_mask + * @hw: pointer to the hw struct + * @mask: interrupt mask to be set + * @cmd_details: pointer to command details structure or NULL + * + * Set link interrupt mask. + **/ +enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, + u16 mask, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_phy_int_mask *cmd = + (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_int_mask); + + cmd->event_mask = CPU_TO_LE16(mask); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_local_advt_reg + * @hw: pointer to the hw struct + * @advt_reg: local AN advertisement register value + * @cmd_details: pointer to command details structure or NULL + * + * Get the Local AN advertisement register value. + **/ +enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw, + u64 *advt_reg, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_an_advt_reg *resp = + (struct i40e_aqc_an_advt_reg *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_local_advt_reg); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status != I40E_SUCCESS) + goto aq_get_local_advt_reg_exit; + + *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32; + *advt_reg |= LE32_TO_CPU(resp->local_an_reg0); + +aq_get_local_advt_reg_exit: + return status; +} + +/** + * i40e_aq_set_local_advt_reg + * @hw: pointer to the hw struct + * @advt_reg: local AN advertisement register value + * @cmd_details: pointer to command details structure or NULL + * + * Get the Local AN advertisement register value. + **/ +enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw, + u64 advt_reg, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_an_advt_reg *cmd = + (struct i40e_aqc_an_advt_reg *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_local_advt_reg); + + cmd->local_an_reg0 = CPU_TO_LE32(I40E_LO_DWORD(advt_reg)); + cmd->local_an_reg1 = CPU_TO_LE16(I40E_HI_DWORD(advt_reg)); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_partner_advt + * @hw: pointer to the hw struct + * @advt_reg: AN partner advertisement register value + * @cmd_details: pointer to command details structure or NULL + * + * Get the link partner AN advertisement register value. + **/ +enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw, + u64 *advt_reg, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_an_advt_reg *resp = + (struct i40e_aqc_an_advt_reg *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_partner_advt); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status != I40E_SUCCESS) + goto aq_get_partner_advt_exit; + + *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32; + *advt_reg |= LE32_TO_CPU(resp->local_an_reg0); + +aq_get_partner_advt_exit: + return status; +} + +/** + * i40e_aq_set_lb_modes + * @hw: pointer to the hw struct + * @lb_modes: loopback mode to be set + * @cmd_details: pointer to command details structure or NULL + * + * Sets loopback modes. + **/ +enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, + u16 lb_modes, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_lb_mode *cmd = + (struct i40e_aqc_set_lb_mode *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_lb_modes); + + cmd->lb_mode = CPU_TO_LE16(lb_modes); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_phy_debug + * @hw: pointer to the hw struct + * @cmd_flags: debug command flags + * @cmd_details: pointer to command details structure or NULL + * + * Reset the external PHY. + **/ +enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_phy_debug *cmd = + (struct i40e_aqc_set_phy_debug *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_debug); + + cmd->command_flags = cmd_flags; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_add_vsi + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a vsi context struct + * @cmd_details: pointer to command details structure or NULL + * + * Add a VSI context to the hardware. +**/ +enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_get_update_vsi *cmd = + (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; + struct i40e_aqc_add_get_update_vsi_completion *resp = + (struct i40e_aqc_add_get_update_vsi_completion *) + &desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_vsi); + + cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid); + cmd->connection_type = vsi_ctx->connection_type; + cmd->vf_id = vsi_ctx->vf_num; + cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags); + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + + status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), cmd_details); + + if (status != I40E_SUCCESS) + goto aq_add_vsi_exit; + + vsi_ctx->seid = LE16_TO_CPU(resp->seid); + vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number); + vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); + vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); + +aq_add_vsi_exit: + return status; +} + +/** + * i40e_aq_set_default_vsi + * @hw: pointer to the hw struct + * @seid: vsi number + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, + u16 seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *) + &desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); + cmd->seid = CPU_TO_LE16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_unicast_promiscuous + * @hw: pointer to the hw struct + * @seid: vsi number + * @set: set unicast promiscuous enable/disable + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, + u16 seid, bool set, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (set) { + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || + (hw->aq.api_maj_ver > 1)) + flags |= I40E_AQC_SET_VSI_PROMISC_TX; + } + + cmd->promiscuous_flags = CPU_TO_LE16(flags); + + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || + (hw->aq.api_maj_ver > 1)) + cmd->valid_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_TX); + + cmd->seid = CPU_TO_LE16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_multicast_promiscuous + * @hw: pointer to the hw struct + * @seid: vsi number + * @set: set multicast promiscuous enable/disable + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, + u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (set) + flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; + + cmd->promiscuous_flags = CPU_TO_LE16(flags); + + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); + + cmd->seid = CPU_TO_LE16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_mc_promisc_on_vlan + * @hw: pointer to the hw struct + * @seid: vsi number + * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN + * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (enable) + flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; + + cmd->promiscuous_flags = CPU_TO_LE16(flags); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); + cmd->seid = CPU_TO_LE16(seid); + cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_uc_promisc_on_vlan + * @hw: pointer to the hw struct + * @seid: vsi number + * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN + * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (enable) + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + + cmd->promiscuous_flags = CPU_TO_LE16(flags); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + cmd->seid = CPU_TO_LE16(seid); + cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_broadcast + * @hw: pointer to the hw struct + * @seid: vsi number + * @set_filter: true to set filter, false to clear filter + * @cmd_details: pointer to command details structure or NULL + * + * Set or clear the broadcast promiscuous flag (filter) for a given VSI. + **/ +enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, + u16 seid, bool set_filter, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (set_filter) + cmd->promiscuous_flags + |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); + else + cmd->promiscuous_flags + &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); + + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); + cmd->seid = CPU_TO_LE16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting + * @hw: pointer to the hw struct + * @seid: vsi number + * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, + u16 seid, bool enable, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + if (enable) + flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; + + cmd->promiscuous_flags = CPU_TO_LE16(flags); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN); + cmd->seid = CPU_TO_LE16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_get_vsi_params - get VSI configuration info + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a vsi context struct + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_get_update_vsi *cmd = + (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; + struct i40e_aqc_add_get_update_vsi_completion *resp = + (struct i40e_aqc_add_get_update_vsi_completion *) + &desc.params.raw; + enum i40e_status_code status; + + UNREFERENCED_1PARAMETER(cmd_details); + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_vsi_parameters); + + cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + + status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), NULL); + + if (status != I40E_SUCCESS) + goto aq_get_vsi_params_exit; + + vsi_ctx->seid = LE16_TO_CPU(resp->seid); + vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number); + vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); + vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); + +aq_get_vsi_params_exit: + return status; +} + +/** + * i40e_aq_update_vsi_params + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a vsi context struct + * @cmd_details: pointer to command details structure or NULL + * + * Update a VSI context. + **/ +enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_get_update_vsi *cmd = + (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; + struct i40e_aqc_add_get_update_vsi_completion *resp = + (struct i40e_aqc_add_get_update_vsi_completion *) + &desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_update_vsi_parameters); + cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid); + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + + status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), cmd_details); + + vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used); + vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); + + return status; +} + +/** + * i40e_aq_get_switch_config + * @hw: pointer to the hardware structure + * @buf: pointer to the result buffer + * @buf_size: length of input buffer + * @start_seid: seid to start for the report, 0 == beginning + * @cmd_details: pointer to command details structure or NULL + * + * Fill the buf with switch configuration returned from AdminQ command + **/ +enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw, + struct i40e_aqc_get_switch_config_resp *buf, + u16 buf_size, u16 *start_seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_switch_seid *scfg = + (struct i40e_aqc_switch_seid *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_switch_config); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + scfg->seid = CPU_TO_LE16(*start_seid); + + status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); + *start_seid = LE16_TO_CPU(scfg->seid); + + return status; +} + +/** + * i40e_aq_set_switch_config + * @hw: pointer to the hardware structure + * @flags: bit flag values to set + * @valid_flags: which bit flags to set + * @cmd_details: pointer to command details structure or NULL + * + * Set switch configuration bits + **/ +enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, + u16 flags, u16 valid_flags, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_switch_config *scfg = + (struct i40e_aqc_set_switch_config *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_switch_config); + scfg->flags = CPU_TO_LE16(flags); + scfg->valid_flags = CPU_TO_LE16(valid_flags); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_firmware_version + * @hw: pointer to the hw struct + * @fw_major_version: firmware major version + * @fw_minor_version: firmware minor version + * @fw_build: firmware build number + * @api_major_version: major queue version + * @api_minor_version: minor queue version + * @cmd_details: pointer to command details structure or NULL + * + * Get the firmware version from the admin queue commands + **/ +enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw, + u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, + u16 *api_major_version, u16 *api_minor_version, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_version *resp = + (struct i40e_aqc_get_version *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status == I40E_SUCCESS) { + if (fw_major_version != NULL) + *fw_major_version = LE16_TO_CPU(resp->fw_major); + if (fw_minor_version != NULL) + *fw_minor_version = LE16_TO_CPU(resp->fw_minor); + if (fw_build != NULL) + *fw_build = LE32_TO_CPU(resp->fw_build); + if (api_major_version != NULL) + *api_major_version = LE16_TO_CPU(resp->api_major); + if (api_minor_version != NULL) + *api_minor_version = LE16_TO_CPU(resp->api_minor); + + /* A workaround to fix the API version in SW */ + if (api_major_version && api_minor_version && + fw_major_version && fw_minor_version && + ((*api_major_version == 1) && (*api_minor_version == 1)) && + (((*fw_major_version == 4) && (*fw_minor_version >= 2)) || + (*fw_major_version > 4))) + *api_minor_version = 2; + } + + return status; +} + +/** + * i40e_aq_send_driver_version + * @hw: pointer to the hw struct + * @dv: driver's major, minor version + * @cmd_details: pointer to command details structure or NULL + * + * Send the driver version to the firmware + **/ +enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw, + struct i40e_driver_version *dv, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_driver_version *cmd = + (struct i40e_aqc_driver_version *)&desc.params.raw; + enum i40e_status_code status; + u16 len; + + if (dv == NULL) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); + + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); + cmd->driver_major_ver = dv->major_version; + cmd->driver_minor_ver = dv->minor_version; + cmd->driver_build_ver = dv->build_version; + cmd->driver_subbuild_ver = dv->subbuild_version; + + len = 0; + while (len < sizeof(dv->driver_string) && + (dv->driver_string[len] < 0x80) && + dv->driver_string[len]) + len++; + status = i40e_asq_send_command(hw, &desc, dv->driver_string, + len, cmd_details); + + return status; +} + +/** + * i40e_get_link_status - get status of the HW network link + * @hw: pointer to the hw struct + * @link_up: pointer to bool (true/false = linkup/linkdown) + * + * Variable link_up true if link is up, false if link is down. + * The variable link_up is invalid if returned value of status != I40E_SUCCESS + * + * Side effect: LinkStatusEvent reporting becomes enabled + **/ +enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up) +{ + enum i40e_status_code status = I40E_SUCCESS; + + if (hw->phy.get_link_info) { + status = i40e_update_link_info(hw); + + if (status != I40E_SUCCESS) + i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", + status); + } + + *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; + + return status; +} + +/** + * i40e_updatelink_status - update status of the HW network link + * @hw: pointer to the hw struct + **/ +enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + enum i40e_status_code status = I40E_SUCCESS; + + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + if (status) + return status; + + if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) { + status = i40e_aq_get_phy_capabilities(hw, false, false, + &abilities, NULL); + if (status) + return status; + + memcpy(hw->phy.link_info.module_type, &abilities.module_type, + sizeof(hw->phy.link_info.module_type)); + } + return status; +} + + +/** + * i40e_get_link_speed + * @hw: pointer to the hw struct + * + * Returns the link speed of the adapter. + **/ +enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw) +{ + enum i40e_aq_link_speed speed = I40E_LINK_SPEED_UNKNOWN; + enum i40e_status_code status = I40E_SUCCESS; + + if (hw->phy.get_link_info) { + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + + if (status != I40E_SUCCESS) + goto i40e_link_speed_exit; + } + + speed = hw->phy.link_info.link_speed; + +i40e_link_speed_exit: + return speed; +} + +/** + * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC + * @hw: pointer to the hw struct + * @uplink_seid: the MAC or other gizmo SEID + * @downlink_seid: the VSI SEID + * @enabled_tc: bitmap of TCs to be enabled + * @default_port: true for default port VSI, false for control port + * @veb_seid: pointer to where to put the resulting VEB SEID + * @enable_stats: true to turn on VEB stats + * @cmd_details: pointer to command details structure or NULL + * + * This asks the FW to add a VEB between the uplink and downlink + * elements. If the uplink SEID is 0, this will be a floating VEB. + **/ +enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, + u16 downlink_seid, u8 enabled_tc, + bool default_port, u16 *veb_seid, + bool enable_stats, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_veb *cmd = + (struct i40e_aqc_add_veb *)&desc.params.raw; + struct i40e_aqc_add_veb_completion *resp = + (struct i40e_aqc_add_veb_completion *)&desc.params.raw; + enum i40e_status_code status; + u16 veb_flags = 0; + + /* SEIDs need to either both be set or both be 0 for floating VEB */ + if (!!uplink_seid != !!downlink_seid) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); + + cmd->uplink_seid = CPU_TO_LE16(uplink_seid); + cmd->downlink_seid = CPU_TO_LE16(downlink_seid); + cmd->enable_tcs = enabled_tc; + if (!uplink_seid) + veb_flags |= I40E_AQC_ADD_VEB_FLOATING; + if (default_port) + veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; + else + veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; + + /* reverse logic here: set the bitflag to disable the stats */ + if (!enable_stats) + veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; + + cmd->veb_flags = CPU_TO_LE16(veb_flags); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && veb_seid) + *veb_seid = LE16_TO_CPU(resp->veb_seid); + + return status; +} + +/** + * i40e_aq_get_veb_parameters - Retrieve VEB parameters + * @hw: pointer to the hw struct + * @veb_seid: the SEID of the VEB to query + * @switch_id: the uplink switch id + * @floating: set to true if the VEB is floating + * @statistic_index: index of the stats counter block for this VEB + * @vebs_used: number of VEB's used by function + * @vebs_free: total VEB's not reserved by any function + * @cmd_details: pointer to command details structure or NULL + * + * This retrieves the parameters for a particular VEB, specified by + * uplink_seid, and returns them to the caller. + **/ +enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw, + u16 veb_seid, u16 *switch_id, + bool *floating, u16 *statistic_index, + u16 *vebs_used, u16 *vebs_free, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_veb_parameters_completion *cmd_resp = + (struct i40e_aqc_get_veb_parameters_completion *) + &desc.params.raw; + enum i40e_status_code status; + + if (veb_seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_veb_parameters); + cmd_resp->seid = CPU_TO_LE16(veb_seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + if (status) + goto get_veb_exit; + + if (switch_id) + *switch_id = LE16_TO_CPU(cmd_resp->switch_id); + if (statistic_index) + *statistic_index = LE16_TO_CPU(cmd_resp->statistic_index); + if (vebs_used) + *vebs_used = LE16_TO_CPU(cmd_resp->vebs_used); + if (vebs_free) + *vebs_free = LE16_TO_CPU(cmd_resp->vebs_free); + if (floating) { + u16 flags = LE16_TO_CPU(cmd_resp->veb_flags); + + if (flags & I40E_AQC_ADD_VEB_FLOATING) + *floating = true; + else + *floating = false; + } + +get_veb_exit: + return status; +} + +/** + * i40e_aq_add_macvlan + * @hw: pointer to the hw struct + * @seid: VSI for the mac address + * @mv_list: list of macvlans to be added + * @count: length of the list + * @cmd_details: pointer to command details structure or NULL + * + * Add MAC/VLAN addresses to the HW filtering + **/ +enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_add_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_macvlan *cmd = + (struct i40e_aqc_macvlan *)&desc.params.raw; + enum i40e_status_code status; + u16 buf_size; + int i; + + if (count == 0 || !mv_list || !hw) + return I40E_ERR_PARAM; + + buf_size = count * sizeof(*mv_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); + cmd->num_addresses = CPU_TO_LE16(count); + cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); + cmd->seid[1] = 0; + cmd->seid[2] = 0; + + for (i = 0; i < count; i++) + if (I40E_IS_MULTICAST(mv_list[i].mac_addr)) + mv_list[i].flags |= + CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, + cmd_details); + + return status; +} + +/** + * i40e_aq_remove_macvlan + * @hw: pointer to the hw struct + * @seid: VSI for the mac address + * @mv_list: list of macvlans to be removed + * @count: length of the list + * @cmd_details: pointer to command details structure or NULL + * + * Remove MAC/VLAN addresses from the HW filtering + **/ +enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_remove_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_macvlan *cmd = + (struct i40e_aqc_macvlan *)&desc.params.raw; + enum i40e_status_code status; + u16 buf_size; + + if (count == 0 || !mv_list || !hw) + return I40E_ERR_PARAM; + + buf_size = count * sizeof(*mv_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); + cmd->num_addresses = CPU_TO_LE16(count); + cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); + cmd->seid[1] = 0; + cmd->seid[2] = 0; + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, + cmd_details); + + return status; +} + +/** + * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule + * @hw: pointer to the hw struct + * @opcode: AQ opcode for add or delete mirror rule + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @id: Destination VSI SEID or Rule ID + * @count: length of the list + * @mr_list: list of mirrored VSI SEIDs or VLAN IDs + * @cmd_details: pointer to command details structure or NULL + * @rule_id: Rule ID returned from FW + * @rule_used: Number of rules used in internal switch + * @rule_free: Number of rules free in internal switch + * + * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for + * VEBs/VEPA elements only + **/ +static enum i40e_status_code i40e_mirrorrule_op(struct i40e_hw *hw, + u16 opcode, u16 sw_seid, u16 rule_type, u16 id, + u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_delete_mirror_rule *cmd = + (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; + struct i40e_aqc_add_delete_mirror_rule_completion *resp = + (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; + enum i40e_status_code status; + u16 buf_size; + + buf_size = count * sizeof(*mr_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, opcode); + cmd->seid = CPU_TO_LE16(sw_seid); + cmd->rule_type = CPU_TO_LE16(rule_type & + I40E_AQC_MIRROR_RULE_TYPE_MASK); + cmd->num_entries = CPU_TO_LE16(count); + /* Dest VSI for add, rule_id for delete */ + cmd->destination = CPU_TO_LE16(id); + if (mr_list) { + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | + I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + } + + status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, + cmd_details); + if (status == I40E_SUCCESS || + hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { + if (rule_id) + *rule_id = LE16_TO_CPU(resp->rule_id); + if (rules_used) + *rules_used = LE16_TO_CPU(resp->mirror_rules_used); + if (rules_free) + *rules_free = LE16_TO_CPU(resp->mirror_rules_free); + } + return status; +} + +/** + * i40e_aq_add_mirrorrule - add a mirror rule + * @hw: pointer to the hw struct + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @dest_vsi: SEID of VSI to which packets will be mirrored + * @count: length of the list + * @mr_list: list of mirrored VSI SEIDs or VLAN IDs + * @cmd_details: pointer to command details structure or NULL + * @rule_id: Rule ID returned from FW + * @rule_used: Number of rules used in internal switch + * @rule_free: Number of rules free in internal switch + * + * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only + **/ +enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free) +{ + if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || + rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { + if (count == 0 || !mr_list) + return I40E_ERR_PARAM; + } + + return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, + rule_type, dest_vsi, count, mr_list, + cmd_details, rule_id, rules_used, rules_free); +} + +/** + * i40e_aq_delete_mirrorrule - delete a mirror rule + * @hw: pointer to the hw struct + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @count: length of the list + * @rule_id: Rule ID that is returned in the receive desc as part of + * add_mirrorrule. + * @mr_list: list of mirrored VLAN IDs to be removed + * @cmd_details: pointer to command details structure or NULL + * @rule_used: Number of rules used in internal switch + * @rule_free: Number of rules free in internal switch + * + * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only + **/ +enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rules_used, u16 *rules_free) +{ + /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ + if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) { + if (!rule_id) + return I40E_ERR_PARAM; + } else { + /* count and mr_list shall be valid for rule_type INGRESS VLAN + * mirroring. For other rule_type, count and rule_type should + * not matter. + */ + if (count == 0 || !mr_list) + return I40E_ERR_PARAM; + } + + return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, + rule_type, rule_id, count, mr_list, + cmd_details, NULL, rules_used, rules_free); +} + +/** + * i40e_aq_add_vlan - Add VLAN ids to the HW filtering + * @hw: pointer to the hw struct + * @seid: VSI for the vlan filters + * @v_list: list of vlan filters to be added + * @count: length of the list + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_add_remove_vlan_element_data *v_list, + u8 count, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_macvlan *cmd = + (struct i40e_aqc_macvlan *)&desc.params.raw; + enum i40e_status_code status; + u16 buf_size; + + if (count == 0 || !v_list || !hw) + return I40E_ERR_PARAM; + + buf_size = count * sizeof(*v_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan); + cmd->num_addresses = CPU_TO_LE16(count); + cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID); + cmd->seid[1] = 0; + cmd->seid[2] = 0; + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, v_list, buf_size, + cmd_details); + + return status; +} + +/** + * i40e_aq_remove_vlan - Remove VLANs from the HW filtering + * @hw: pointer to the hw struct + * @seid: VSI for the vlan filters + * @v_list: list of macvlans to be removed + * @count: length of the list + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_add_remove_vlan_element_data *v_list, + u8 count, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_macvlan *cmd = + (struct i40e_aqc_macvlan *)&desc.params.raw; + enum i40e_status_code status; + u16 buf_size; + + if (count == 0 || !v_list || !hw) + return I40E_ERR_PARAM; + + buf_size = count * sizeof(*v_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan); + cmd->num_addresses = CPU_TO_LE16(count); + cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID); + cmd->seid[1] = 0; + cmd->seid[2] = 0; + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, v_list, buf_size, + cmd_details); + + return status; +} + +/** + * i40e_aq_send_msg_to_vf + * @hw: pointer to the hardware structure + * @vfid: vf id to send msg + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cmd_details: pointer to command details + * + * send msg to vf + **/ +enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, + u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_pf_vf_message *cmd = + (struct i40e_aqc_pf_vf_message *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); + cmd->id = CPU_TO_LE32(vfid); + desc.cookie_high = CPU_TO_LE32(v_opcode); + desc.cookie_low = CPU_TO_LE32(v_retval); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI); + if (msglen) { + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | + I40E_AQ_FLAG_RD)); + if (msglen > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(msglen); + } + status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); + + return status; +} + +/** + * i40e_aq_debug_read_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Read the register using the admin queue commands + **/ +enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw, + u32 reg_addr, u64 *reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_debug_reg_read_write *cmd_resp = + (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; + enum i40e_status_code status; + + if (reg_val == NULL) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); + + cmd_resp->address = CPU_TO_LE32(reg_addr); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status == I40E_SUCCESS) { + *reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) | + (u64)LE32_TO_CPU(cmd_resp->value_low); + } + + return status; +} + +/** + * i40e_aq_debug_write_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Write to a register using the admin queue commands + **/ +enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw, + u32 reg_addr, u64 reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_debug_reg_read_write *cmd = + (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); + + cmd->address = CPU_TO_LE32(reg_addr); + cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32)); + cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF)); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_hmc_resource_profile + * @hw: pointer to the hw struct + * @profile: type of profile the HMC is to be set as + * @pe_vf_enabled_count: the number of PE enabled VFs the system has + * @cmd_details: pointer to command details structure or NULL + * + * query the HMC profile of the device. + **/ +enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw, + enum i40e_aq_hmc_profile *profile, + u8 *pe_vf_enabled_count, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aq_get_set_hmc_resource_profile *resp = + (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_query_hmc_resource_profile); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + *profile = (enum i40e_aq_hmc_profile)(resp->pm_profile & + I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK); + *pe_vf_enabled_count = resp->pe_vf_enabled & + I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK; + + return status; +} + +/** + * i40e_aq_set_hmc_resource_profile + * @hw: pointer to the hw struct + * @profile: type of profile the HMC is to be set as + * @pe_vf_enabled_count: the number of PE enabled VFs the system has + * @cmd_details: pointer to command details structure or NULL + * + * set the HMC profile of the device. + **/ +enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, + enum i40e_aq_hmc_profile profile, + u8 pe_vf_enabled_count, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aq_get_set_hmc_resource_profile *cmd = + (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_hmc_resource_profile); + + cmd->pm_profile = (u8)profile; + cmd->pe_vf_enabled = pe_vf_enabled_count; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_request_resource + * @hw: pointer to the hw struct + * @resource: resource id + * @access: access type + * @sdp_number: resource number + * @timeout: the maximum time in ms that the driver may hold the resource + * @cmd_details: pointer to command details structure or NULL + * + * requests common resource using the admin queue commands + **/ +enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + enum i40e_aq_resource_access_type access, + u8 sdp_number, u64 *timeout, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_request_resource *cmd_resp = + (struct i40e_aqc_request_resource *)&desc.params.raw; + enum i40e_status_code status; + + DEBUGFUNC("i40e_aq_request_resource"); + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); + + cmd_resp->resource_id = CPU_TO_LE16(resource); + cmd_resp->access_type = CPU_TO_LE16(access); + cmd_resp->resource_number = CPU_TO_LE32(sdp_number); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + /* The completion specifies the maximum time in ms that the driver + * may hold the resource in the Timeout field. + * If the resource is held by someone else, the command completes with + * busy return value and the timeout field indicates the maximum time + * the current owner of the resource has to free it. + */ + if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) + *timeout = LE32_TO_CPU(cmd_resp->timeout); + + return status; +} + +/** + * i40e_aq_release_resource + * @hw: pointer to the hw struct + * @resource: resource id + * @sdp_number: resource number + * @cmd_details: pointer to command details structure or NULL + * + * release common resource using the admin queue commands + **/ +enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + u8 sdp_number, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_request_resource *cmd = + (struct i40e_aqc_request_resource *)&desc.params.raw; + enum i40e_status_code status; + + DEBUGFUNC("i40e_aq_release_resource"); + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); + + cmd->resource_id = CPU_TO_LE16(resource); + cmd->resource_number = CPU_TO_LE32(sdp_number); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_read_nvm + * @hw: pointer to the hw struct + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be read (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @cmd_details: pointer to command details structure or NULL + * + * Read the NVM using the admin queue commands + **/ +enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_update *cmd = + (struct i40e_aqc_nvm_update *)&desc.params.raw; + enum i40e_status_code status; + + DEBUGFUNC("i40e_aq_read_nvm"); + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) { + status = I40E_ERR_PARAM; + goto i40e_aq_read_nvm_exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; + cmd->module_pointer = module_pointer; + cmd->offset = CPU_TO_LE32(offset); + cmd->length = CPU_TO_LE16(length); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (length > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); + +i40e_aq_read_nvm_exit: + return status; +} + +/** + * i40e_aq_read_nvm_config - read an nvm config block + * @hw: pointer to the hw struct + * @cmd_flags: NVM access admin command bits + * @field_id: field or feature id + * @data: buffer for result + * @buf_size: buffer size + * @element_count: pointer to count of elements read by FW + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw, + u8 cmd_flags, u32 field_id, void *data, + u16 buf_size, u16 *element_count, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_config_read *cmd = + (struct i40e_aqc_nvm_config_read *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + cmd->cmd_flags = CPU_TO_LE16(cmd_flags); + cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id)); + if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK) + cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16)); + else + cmd->element_id_msw = 0; + + status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details); + + if (!status && element_count) + *element_count = LE16_TO_CPU(cmd->element_count); + + return status; +} + +/** + * i40e_aq_write_nvm_config - write an nvm config block + * @hw: pointer to the hw struct + * @cmd_flags: NVM access admin command bits + * @data: buffer for result + * @buf_size: buffer size + * @element_count: count of elements to be written + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw, + u8 cmd_flags, void *data, u16 buf_size, + u16 element_count, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_config_write *cmd = + (struct i40e_aqc_nvm_config_write *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + cmd->element_count = CPU_TO_LE16(element_count); + cmd->cmd_flags = CPU_TO_LE16(cmd_flags); + status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details); + + return status; +} + +/** + * i40e_aq_oem_post_update - triggers an OEM specific flow after update + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + UNREFERENCED_2PARAMETER(buff, buff_size); + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH) + status = I40E_ERR_NOT_IMPLEMENTED; + + return status; +} + +/** + * i40e_aq_erase_nvm + * @hw: pointer to the hw struct + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in the module (expressed in 4 KB from module's beginning) + * @length: length of the section to be erased (expressed in 4 KB) + * @last_command: tells if this is the last command in a series + * @cmd_details: pointer to command details structure or NULL + * + * Erase the NVM sector using the admin queue commands + **/ +enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, bool last_command, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_update *cmd = + (struct i40e_aqc_nvm_update *)&desc.params.raw; + enum i40e_status_code status; + + DEBUGFUNC("i40e_aq_erase_nvm"); + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) { + status = I40E_ERR_PARAM; + goto i40e_aq_erase_nvm_exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; + cmd->module_pointer = module_pointer; + cmd->offset = CPU_TO_LE32(offset); + cmd->length = CPU_TO_LE16(length); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +i40e_aq_erase_nvm_exit: + return status; +} + +/** + * i40e_parse_discover_capabilities + * @hw: pointer to the hw struct + * @buff: pointer to a buffer containing device/function capability records + * @cap_count: number of capability records in the list + * @list_type_opc: type of capabilities list to parse + * + * Parse the device/function capabilities list. + **/ +STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, + u32 cap_count, + enum i40e_admin_queue_opc list_type_opc) +{ + struct i40e_aqc_list_capabilities_element_resp *cap; + u32 valid_functions, num_functions; + u32 number, logical_id, phys_id; + struct i40e_hw_capabilities *p; + u8 major_rev; + u32 i = 0; + u16 id; + + cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; + + if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) + p = (struct i40e_hw_capabilities *)&hw->dev_caps; + else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) + p = (struct i40e_hw_capabilities *)&hw->func_caps; + else + return; + + for (i = 0; i < cap_count; i++, cap++) { + id = LE16_TO_CPU(cap->id); + number = LE32_TO_CPU(cap->number); + logical_id = LE32_TO_CPU(cap->logical_id); + phys_id = LE32_TO_CPU(cap->phys_id); + major_rev = cap->major_rev; + + switch (id) { + case I40E_AQ_CAP_ID_SWITCH_MODE: + p->switch_mode = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Switch mode = %d\n", + p->switch_mode); + break; + case I40E_AQ_CAP_ID_MNG_MODE: + p->management_mode = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Management Mode = %d\n", + p->management_mode); + break; + case I40E_AQ_CAP_ID_NPAR_ACTIVE: + p->npar_enable = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: NPAR enable = %d\n", + p->npar_enable); + break; + case I40E_AQ_CAP_ID_OS2BMC_CAP: + p->os2bmc = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: OS2BMC = %d\n", p->os2bmc); + break; + case I40E_AQ_CAP_ID_FUNCTIONS_VALID: + p->valid_functions = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Valid Functions = %d\n", + p->valid_functions); + break; + case I40E_AQ_CAP_ID_SRIOV: + if (number == 1) + p->sr_iov_1_1 = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: SR-IOV = %d\n", + p->sr_iov_1_1); + break; + case I40E_AQ_CAP_ID_VF: + p->num_vfs = number; + p->vf_base_id = logical_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: VF count = %d\n", + p->num_vfs); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: VF base_id = %d\n", + p->vf_base_id); + break; + case I40E_AQ_CAP_ID_VMDQ: + if (number == 1) + p->vmdq = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: VMDQ = %d\n", p->vmdq); + break; + case I40E_AQ_CAP_ID_8021QBG: + if (number == 1) + p->evb_802_1_qbg = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: 802.1Qbg = %d\n", number); + break; + case I40E_AQ_CAP_ID_8021QBR: + if (number == 1) + p->evb_802_1_qbh = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: 802.1Qbh = %d\n", number); + break; + case I40E_AQ_CAP_ID_VSI: + p->num_vsis = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: VSI count = %d\n", + p->num_vsis); + break; + case I40E_AQ_CAP_ID_DCB: + if (number == 1) { + p->dcb = true; + p->enabled_tcmap = logical_id; + p->maxtc = phys_id; + } + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: DCB = %d\n", p->dcb); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: TC Mapping = %d\n", + logical_id); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: TC Max = %d\n", p->maxtc); + break; + case I40E_AQ_CAP_ID_FCOE: + if (number == 1) + p->fcoe = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: FCOE = %d\n", p->fcoe); + break; + case I40E_AQ_CAP_ID_ISCSI: + if (number == 1) + p->iscsi = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: iSCSI = %d\n", p->iscsi); + break; + case I40E_AQ_CAP_ID_RSS: + p->rss = true; + p->rss_table_size = number; + p->rss_table_entry_width = logical_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: RSS = %d\n", p->rss); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: RSS table size = %d\n", + p->rss_table_size); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: RSS table width = %d\n", + p->rss_table_entry_width); + break; + case I40E_AQ_CAP_ID_RXQ: + p->num_rx_qp = number; + p->base_queue = phys_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Rx QP = %d\n", number); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: base_queue = %d\n", + p->base_queue); + break; + case I40E_AQ_CAP_ID_TXQ: + p->num_tx_qp = number; + p->base_queue = phys_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Tx QP = %d\n", number); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: base_queue = %d\n", + p->base_queue); + break; + case I40E_AQ_CAP_ID_MSIX: + p->num_msix_vectors = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: MSIX vector count = %d\n", + p->num_msix_vectors_vf); + break; + case I40E_AQ_CAP_ID_VF_MSIX: + p->num_msix_vectors_vf = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: MSIX VF vector count = %d\n", + p->num_msix_vectors_vf); + break; + case I40E_AQ_CAP_ID_FLEX10: + if (major_rev == 1) { + if (number == 1) { + p->flex10_enable = true; + p->flex10_capable = true; + } + } else { + /* Capability revision >= 2 */ + if (number & 1) + p->flex10_enable = true; + if (number & 2) + p->flex10_capable = true; + } + p->flex10_mode = logical_id; + p->flex10_status = phys_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Flex10 mode = %d\n", + p->flex10_mode); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Flex10 status = %d\n", + p->flex10_status); + break; + case I40E_AQ_CAP_ID_CEM: + if (number == 1) + p->mgmt_cem = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: CEM = %d\n", p->mgmt_cem); + break; + case I40E_AQ_CAP_ID_IWARP: + if (number == 1) + p->iwarp = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: iWARP = %d\n", p->iwarp); + break; + case I40E_AQ_CAP_ID_LED: + if (phys_id < I40E_HW_CAP_MAX_GPIO) + p->led[phys_id] = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: LED - PIN %d\n", phys_id); + break; + case I40E_AQ_CAP_ID_SDP: + if (phys_id < I40E_HW_CAP_MAX_GPIO) + p->sdp[phys_id] = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: SDP - PIN %d\n", phys_id); + break; + case I40E_AQ_CAP_ID_MDIO: + if (number == 1) { + p->mdio_port_num = phys_id; + p->mdio_port_mode = logical_id; + } + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: MDIO port number = %d\n", + p->mdio_port_num); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: MDIO port mode = %d\n", + p->mdio_port_mode); + break; + case I40E_AQ_CAP_ID_1588: + if (number == 1) + p->ieee_1588 = true; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: IEEE 1588 = %d\n", + p->ieee_1588); + break; + case I40E_AQ_CAP_ID_FLOW_DIRECTOR: + p->fd = true; + p->fd_filters_guaranteed = number; + p->fd_filters_best_effort = logical_id; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Flow Director = 1\n"); + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: Guaranteed FD filters = %d\n", + p->fd_filters_guaranteed); + break; + case I40E_AQ_CAP_ID_WSR_PROT: + p->wr_csr_prot = (u64)number; + p->wr_csr_prot |= (u64)logical_id << 32; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: wr_csr_prot = 0x%llX\n\n", + (p->wr_csr_prot & 0xffff)); + break; +#ifdef X722_SUPPORT + case I40E_AQ_CAP_ID_WOL_AND_PROXY: + hw->num_wol_proxy_filters = (u16)number; + hw->wol_proxy_vsi_seid = (u16)logical_id; + p->apm_wol_support = phys_id & I40E_WOL_SUPPORT_MASK; + if (phys_id & I40E_ACPI_PROGRAMMING_METHOD_MASK) + p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK; + else + p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL; + p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0; + p->proxy_support = p->proxy_support; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: WOL proxy filters = %d\n", + hw->num_wol_proxy_filters); + break; +#endif + default: + break; + } + } + + if (p->fcoe) + i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); + +#ifdef I40E_FCOE_ENA + /* Software override ensuring FCoE is disabled if npar or mfp + * mode because it is not supported in these modes. + */ + if (p->npar_enable || p->flex10_enable) + p->fcoe = false; +#else + /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */ + p->fcoe = false; +#endif + + /* count the enabled ports (aka the "not disabled" ports) */ + hw->num_ports = 0; + for (i = 0; i < 4; i++) { + u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); + u64 port_cfg = 0; + + /* use AQ read to get the physical register offset instead + * of the port relative offset + */ + i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); + if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) + hw->num_ports++; + } + + valid_functions = p->valid_functions; + num_functions = 0; + while (valid_functions) { + if (valid_functions & 1) + num_functions++; + valid_functions >>= 1; + } + + /* partition id is 1-based, and functions are evenly spread + * across the ports as partitions + */ + hw->partition_id = (hw->pf_id / hw->num_ports) + 1; + hw->num_partitions = num_functions / hw->num_ports; + + /* additional HW specific goodies that might + * someday be HW version specific + */ + p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; +} + +/** + * i40e_aq_discover_capabilities + * @hw: pointer to the hw struct + * @buff: a virtual buffer to hold the capabilities + * @buff_size: Size of the virtual buffer + * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM + * @list_type_opc: capabilities type to discover - pass in the command opcode + * @cmd_details: pointer to command details structure or NULL + * + * Get the device capabilities descriptions from the firmware + **/ +enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw, + void *buff, u16 buff_size, u16 *data_size, + enum i40e_admin_queue_opc list_type_opc, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aqc_list_capabilites *cmd; + struct i40e_aq_desc desc; + enum i40e_status_code status = I40E_SUCCESS; + + cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; + + if (list_type_opc != i40e_aqc_opc_list_func_capabilities && + list_type_opc != i40e_aqc_opc_list_dev_capabilities) { + status = I40E_ERR_PARAM; + goto exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + *data_size = LE16_TO_CPU(desc.datalen); + + if (status) + goto exit; + + i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count), + list_type_opc); + +exit: + return status; +} + +/** + * i40e_aq_update_nvm + * @hw: pointer to the hw struct + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be written (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @cmd_details: pointer to command details structure or NULL + * + * Update the NVM using the admin queue commands + **/ +enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_update *cmd = + (struct i40e_aqc_nvm_update *)&desc.params.raw; + enum i40e_status_code status; + + DEBUGFUNC("i40e_aq_update_nvm"); + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) { + status = I40E_ERR_PARAM; + goto i40e_aq_update_nvm_exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; + cmd->module_pointer = module_pointer; + cmd->offset = CPU_TO_LE32(offset); + cmd->length = CPU_TO_LE16(length); + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (length > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); + +i40e_aq_update_nvm_exit: + return status; +} + +/** + * i40e_aq_get_lldp_mib + * @hw: pointer to the hw struct + * @bridge_type: type of bridge requested + * @mib_type: Local, Remote or both Local and Remote MIBs + * @buff: pointer to a user supplied buffer to store the MIB block + * @buff_size: size of the buffer (in bytes) + * @local_len : length of the returned Local LLDP MIB + * @remote_len: length of the returned Remote LLDP MIB + * @cmd_details: pointer to command details structure or NULL + * + * Requests the complete LLDP MIB (entire packet). + **/ +enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, + u8 mib_type, void *buff, u16 buff_size, + u16 *local_len, u16 *remote_len, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_get_mib *cmd = + (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; + struct i40e_aqc_lldp_get_mib *resp = + (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; + enum i40e_status_code status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); + /* Indirect Command */ + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + + cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; + cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & + I40E_AQ_LLDP_BRIDGE_TYPE_MASK); + + desc.datalen = CPU_TO_LE16(buff_size); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + if (local_len != NULL) + *local_len = LE16_TO_CPU(resp->local_len); + if (remote_len != NULL) + *remote_len = LE16_TO_CPU(resp->remote_len); + } + + return status; +} + + /** + * i40e_aq_set_lldp_mib - Set the LLDP MIB + * @hw: pointer to the hw struct + * @mib_type: Local, Remote or both Local and Remote MIBs + * @buff: pointer to a user supplied buffer to store the MIB block + * @buff_size: size of the buffer (in bytes) + * @cmd_details: pointer to command details structure or NULL + * + * Set the LLDP MIB. + **/ +enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw, + u8 mib_type, void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_set_local_mib *cmd = + (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; + enum i40e_status_code status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_lldp_set_local_mib); + /* Indirect Command */ + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(buff_size); + + cmd->type = mib_type; + cmd->length = CPU_TO_LE16(buff_size); + cmd->address_high = CPU_TO_LE32(I40E_HI_WORD((u64)buff)); + cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buff)); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + return status; +} + +/** + * i40e_aq_cfg_lldp_mib_change_event + * @hw: pointer to the hw struct + * @enable_update: Enable or Disable event posting + * @cmd_details: pointer to command details structure or NULL + * + * Enable or Disable posting of an event on ARQ when LLDP MIB + * associated with the interface changes + **/ +enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, + bool enable_update, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_update_mib *cmd = + (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); + + if (!enable_update) + cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_add_lldp_tlv + * @hw: pointer to the hw struct + * @bridge_type: type of bridge + * @buff: buffer with TLV to add + * @buff_size: length of the buffer + * @tlv_len: length of the TLV to be added + * @mib_len: length of the LLDP MIB returned in response + * @cmd_details: pointer to command details structure or NULL + * + * Add the specified TLV to LLDP Local MIB for the given bridge type, + * it is responsibility of the caller to make sure that the TLV is not + * already present in the LLDPDU. + * In return firmware will write the complete LLDP MIB with the newly + * added TLV in the response buffer. + **/ +enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type, + void *buff, u16 buff_size, u16 tlv_len, + u16 *mib_len, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_add_tlv *cmd = + (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw; + enum i40e_status_code status; + + if (buff_size == 0 || !buff || tlv_len == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_add_tlv); + + /* Indirect Command */ + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(buff_size); + + cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & + I40E_AQ_LLDP_BRIDGE_TYPE_MASK); + cmd->len = CPU_TO_LE16(tlv_len); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + if (mib_len != NULL) + *mib_len = LE16_TO_CPU(desc.datalen); + } + + return status; +} + +/** + * i40e_aq_update_lldp_tlv + * @hw: pointer to the hw struct + * @bridge_type: type of bridge + * @buff: buffer with TLV to update + * @buff_size: size of the buffer holding original and updated TLVs + * @old_len: Length of the Original TLV + * @new_len: Length of the Updated TLV + * @offset: offset of the updated TLV in the buff + * @mib_len: length of the returned LLDP MIB + * @cmd_details: pointer to command details structure or NULL + * + * Update the specified TLV to the LLDP Local MIB for the given bridge type. + * Firmware will place the complete LLDP MIB in response buffer with the + * updated TLV. + **/ +enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw, + u8 bridge_type, void *buff, u16 buff_size, + u16 old_len, u16 new_len, u16 offset, + u16 *mib_len, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_update_tlv *cmd = + (struct i40e_aqc_lldp_update_tlv *)&desc.params.raw; + enum i40e_status_code status; + + if (buff_size == 0 || !buff || offset == 0 || + old_len == 0 || new_len == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_tlv); + + /* Indirect Command */ + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(buff_size); + + cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & + I40E_AQ_LLDP_BRIDGE_TYPE_MASK); + cmd->old_len = CPU_TO_LE16(old_len); + cmd->new_offset = CPU_TO_LE16(offset); + cmd->new_len = CPU_TO_LE16(new_len); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + if (mib_len != NULL) + *mib_len = LE16_TO_CPU(desc.datalen); + } + + return status; +} + +/** + * i40e_aq_delete_lldp_tlv + * @hw: pointer to the hw struct + * @bridge_type: type of bridge + * @buff: pointer to a user supplied buffer that has the TLV + * @buff_size: length of the buffer + * @tlv_len: length of the TLV to be deleted + * @mib_len: length of the returned LLDP MIB + * @cmd_details: pointer to command details structure or NULL + * + * Delete the specified TLV from LLDP Local MIB for the given bridge type. + * The firmware places the entire LLDP MIB in the response buffer. + **/ +enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw, + u8 bridge_type, void *buff, u16 buff_size, + u16 tlv_len, u16 *mib_len, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_add_tlv *cmd = + (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw; + enum i40e_status_code status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_delete_tlv); + + /* Indirect Command */ + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(buff_size); + cmd->len = CPU_TO_LE16(tlv_len); + cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & + I40E_AQ_LLDP_BRIDGE_TYPE_MASK); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + if (mib_len != NULL) + *mib_len = LE16_TO_CPU(desc.datalen); + } + + return status; +} + +/** + * i40e_aq_stop_lldp + * @hw: pointer to the hw struct + * @shutdown_agent: True if LLDP Agent needs to be Shutdown + * @cmd_details: pointer to command details structure or NULL + * + * Stop or Shutdown the embedded LLDP Agent + **/ +enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_stop *cmd = + (struct i40e_aqc_lldp_stop *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); + + if (shutdown_agent) + cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_start_lldp + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * + * Start the embedded LLDP Agent on all ports. + **/ +enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_start *cmd = + (struct i40e_aqc_lldp_start *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); + + cmd->command = I40E_AQ_LLDP_AGENT_START; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_cee_dcb_config + * @hw: pointer to the hw struct + * @buff: response buffer that stores CEE operational configuration + * @buff_size: size of the buffer passed + * @cmd_details: pointer to command details structure or NULL + * + * Get CEE DCBX mode operational configuration from firmware + **/ +enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, + cmd_details); + + return status; +} + +/** + * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW + * @hw: pointer to the hw struct + * @start_agent: True if DCBx Agent needs to be Started + * False if DCBx Agent needs to be Stopped + * @cmd_details: pointer to command details structure or NULL + * + * Start/Stop the embedded dcbx Agent + **/ +enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw, + bool start_agent, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_stop_start_specific_agent *cmd = + (struct i40e_aqc_lldp_stop_start_specific_agent *) + &desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_lldp_stop_start_spec_agent); + + if (start_agent) + cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_add_udp_tunnel + * @hw: pointer to the hw struct + * @udp_port: the UDP port to add + * @header_len: length of the tunneling header length in DWords + * @protocol_index: protocol index type + * @filter_index: pointer to filter index + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw, + u16 udp_port, u8 protocol_index, + u8 *filter_index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_udp_tunnel *cmd = + (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; + struct i40e_aqc_del_udp_tunnel_completion *resp = + (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); + + cmd->udp_port = CPU_TO_LE16(udp_port); + cmd->protocol_type = protocol_index; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && filter_index) + *filter_index = resp->index; + + return status; +} + +/** + * i40e_aq_del_udp_tunnel + * @hw: pointer to the hw struct + * @index: filter index + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_remove_udp_tunnel *cmd = + (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); + + cmd->index = index; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_switch_resource_alloc (0x0204) + * @hw: pointer to the hw struct + * @num_entries: pointer to u8 to store the number of resource entries returned + * @buf: pointer to a user supplied buffer. This buffer must be large enough + * to store the resource information for all resource types. Each + * resource type is a i40e_aqc_switch_resource_alloc_data structure. + * @count: size, in bytes, of the buffer provided + * @cmd_details: pointer to command details structure or NULL + * + * Query the resources allocated to a function. + **/ +enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw, + u8 *num_entries, + struct i40e_aqc_switch_resource_alloc_element_resp *buf, + u16 count, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_switch_resource_alloc *cmd_resp = + (struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw; + enum i40e_status_code status; + u16 length = count * sizeof(*buf); + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_switch_resource_alloc); + + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (length > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details); + + if (!status && num_entries) + *num_entries = cmd_resp->num_entries; + + return status; +} + +/** + * i40e_aq_delete_element - Delete switch element + * @hw: pointer to the hw struct + * @seid: the SEID to delete from the switch + * @cmd_details: pointer to command details structure or NULL + * + * This deletes a switch element from the switch. + **/ +enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_switch_seid *cmd = + (struct i40e_aqc_switch_seid *)&desc.params.raw; + enum i40e_status_code status; + + if (seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); + + cmd->seid = CPU_TO_LE16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40_aq_add_pvirt - Instantiate a Port Virtualizer on a port + * @hw: pointer to the hw struct + * @flags: component flags + * @mac_seid: uplink seid (MAC SEID) + * @vsi_seid: connected vsi seid + * @ret_seid: seid of create pv component + * + * This instantiates an i40e port virtualizer with specified flags. + * Depending on specified flags the port virtualizer can act as a + * 802.1Qbr port virtualizer or a 802.1Qbg S-component. + */ +enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags, + u16 mac_seid, u16 vsi_seid, + u16 *ret_seid) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_update_pv *cmd = + (struct i40e_aqc_add_update_pv *)&desc.params.raw; + struct i40e_aqc_add_update_pv_completion *resp = + (struct i40e_aqc_add_update_pv_completion *)&desc.params.raw; + enum i40e_status_code status; + + if (vsi_seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_pv); + cmd->command_flags = CPU_TO_LE16(flags); + cmd->uplink_seid = CPU_TO_LE16(mac_seid); + cmd->connected_seid = CPU_TO_LE16(vsi_seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + if (!status && ret_seid) + *ret_seid = LE16_TO_CPU(resp->pv_seid); + + return status; +} + +/** + * i40e_aq_add_tag - Add an S/E-tag + * @hw: pointer to the hw struct + * @direct_to_queue: should s-tag direct flow to a specific queue + * @vsi_seid: VSI SEID to use this tag + * @tag: value of the tag + * @queue_num: queue number, only valid is direct_to_queue is true + * @tags_used: return value, number of tags in use by this PF + * @tags_free: return value, number of unallocated tags + * @cmd_details: pointer to command details structure or NULL + * + * This associates an S- or E-tag to a VSI in the switch complex. It returns + * the number of tags allocated by the PF, and the number of unallocated + * tags available. + **/ +enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue, + u16 vsi_seid, u16 tag, u16 queue_num, + u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_tag *cmd = + (struct i40e_aqc_add_tag *)&desc.params.raw; + struct i40e_aqc_add_remove_tag_completion *resp = + (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw; + enum i40e_status_code status; + + if (vsi_seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_tag); + + cmd->seid = CPU_TO_LE16(vsi_seid); + cmd->tag = CPU_TO_LE16(tag); + if (direct_to_queue) { + cmd->flags = CPU_TO_LE16(I40E_AQC_ADD_TAG_FLAG_TO_QUEUE); + cmd->queue_number = CPU_TO_LE16(queue_num); + } + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) { + if (tags_used != NULL) + *tags_used = LE16_TO_CPU(resp->tags_used); + if (tags_free != NULL) + *tags_free = LE16_TO_CPU(resp->tags_free); + } + + return status; +} + +/** + * i40e_aq_remove_tag - Remove an S- or E-tag + * @hw: pointer to the hw struct + * @vsi_seid: VSI SEID this tag is associated with + * @tag: value of the S-tag to delete + * @tags_used: return value, number of tags in use by this PF + * @tags_free: return value, number of unallocated tags + * @cmd_details: pointer to command details structure or NULL + * + * This deletes an S- or E-tag from a VSI in the switch complex. It returns + * the number of tags allocated by the PF, and the number of unallocated + * tags available. + **/ +enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid, + u16 tag, u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_remove_tag *cmd = + (struct i40e_aqc_remove_tag *)&desc.params.raw; + struct i40e_aqc_add_remove_tag_completion *resp = + (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw; + enum i40e_status_code status; + + if (vsi_seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_tag); + + cmd->seid = CPU_TO_LE16(vsi_seid); + cmd->tag = CPU_TO_LE16(tag); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) { + if (tags_used != NULL) + *tags_used = LE16_TO_CPU(resp->tags_used); + if (tags_free != NULL) + *tags_free = LE16_TO_CPU(resp->tags_free); + } + + return status; +} + +/** + * i40e_aq_add_mcast_etag - Add a multicast E-tag + * @hw: pointer to the hw struct + * @pv_seid: Port Virtualizer of this SEID to associate E-tag with + * @etag: value of E-tag to add + * @num_tags_in_buf: number of unicast E-tags in indirect buffer + * @buf: address of indirect buffer + * @tags_used: return value, number of E-tags in use by this port + * @tags_free: return value, number of unallocated M-tags + * @cmd_details: pointer to command details structure or NULL + * + * This associates a multicast E-tag to a port virtualizer. It will return + * the number of tags allocated by the PF, and the number of unallocated + * tags available. + * + * The indirect buffer pointed to by buf is a list of 2-byte E-tags, + * num_tags_in_buf long. + **/ +enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid, + u16 etag, u8 num_tags_in_buf, void *buf, + u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_mcast_etag *cmd = + (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw; + struct i40e_aqc_add_remove_mcast_etag_completion *resp = + (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw; + enum i40e_status_code status; + u16 length = sizeof(u16) * num_tags_in_buf; + + if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0)) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_multicast_etag); + + cmd->pv_seid = CPU_TO_LE16(pv_seid); + cmd->etag = CPU_TO_LE16(etag); + cmd->num_unicast_etags = num_tags_in_buf; + + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (length > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details); + + if (!status) { + if (tags_used != NULL) + *tags_used = LE16_TO_CPU(resp->mcast_etags_used); + if (tags_free != NULL) + *tags_free = LE16_TO_CPU(resp->mcast_etags_free); + } + + return status; +} + +/** + * i40e_aq_remove_mcast_etag - Remove a multicast E-tag + * @hw: pointer to the hw struct + * @pv_seid: Port Virtualizer SEID this M-tag is associated with + * @etag: value of the E-tag to remove + * @tags_used: return value, number of tags in use by this port + * @tags_free: return value, number of unallocated tags + * @cmd_details: pointer to command details structure or NULL + * + * This deletes an E-tag from the port virtualizer. It will return + * the number of tags allocated by the port, and the number of unallocated + * tags available. + **/ +enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pv_seid, + u16 etag, u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_mcast_etag *cmd = + (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw; + struct i40e_aqc_add_remove_mcast_etag_completion *resp = + (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw; + enum i40e_status_code status; + + + if (pv_seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_multicast_etag); + + cmd->pv_seid = CPU_TO_LE16(pv_seid); + cmd->etag = CPU_TO_LE16(etag); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) { + if (tags_used != NULL) + *tags_used = LE16_TO_CPU(resp->mcast_etags_used); + if (tags_free != NULL) + *tags_free = LE16_TO_CPU(resp->mcast_etags_free); + } + + return status; +} + +/** + * i40e_aq_update_tag - Update an S/E-tag + * @hw: pointer to the hw struct + * @vsi_seid: VSI SEID using this S-tag + * @old_tag: old tag value + * @new_tag: new tag value + * @tags_used: return value, number of tags in use by this PF + * @tags_free: return value, number of unallocated tags + * @cmd_details: pointer to command details structure or NULL + * + * This updates the value of the tag currently attached to this VSI + * in the switch complex. It will return the number of tags allocated + * by the PF, and the number of unallocated tags available. + **/ +enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid, + u16 old_tag, u16 new_tag, u16 *tags_used, + u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_update_tag *cmd = + (struct i40e_aqc_update_tag *)&desc.params.raw; + struct i40e_aqc_update_tag_completion *resp = + (struct i40e_aqc_update_tag_completion *)&desc.params.raw; + enum i40e_status_code status; + + if (vsi_seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_tag); + + cmd->seid = CPU_TO_LE16(vsi_seid); + cmd->old_tag = CPU_TO_LE16(old_tag); + cmd->new_tag = CPU_TO_LE16(new_tag); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) { + if (tags_used != NULL) + *tags_used = LE16_TO_CPU(resp->tags_used); + if (tags_free != NULL) + *tags_free = LE16_TO_CPU(resp->tags_free); + } + + return status; +} + +/** + * i40e_aq_dcb_ignore_pfc - Ignore PFC for given TCs + * @hw: pointer to the hw struct + * @tcmap: TC map for request/release any ignore PFC condition + * @request: request or release ignore PFC condition + * @tcmap_ret: return TCs for which PFC is currently ignored + * @cmd_details: pointer to command details structure or NULL + * + * This sends out request/release to ignore PFC condition for a TC. + * It will return the TCs for which PFC is currently ignored. + **/ +enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap, + bool request, u8 *tcmap_ret, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_pfc_ignore *cmd_resp = + (struct i40e_aqc_pfc_ignore *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_ignore_pfc); + + if (request) + cmd_resp->command_flags = I40E_AQC_PFC_IGNORE_SET; + + cmd_resp->tc_bitmap = tcmap; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) { + if (tcmap_ret != NULL) + *tcmap_ret = cmd_resp->tc_bitmap; + } + + return status; +} + +/** + * i40e_aq_dcb_updated - DCB Updated Command + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * + * When LLDP is handled in PF this command is used by the PF + * to notify EMP that a DCB setting is modified. + * When LLDP is handled in EMP this command is used by the PF + * to notify EMP whenever one of the following parameters get + * modified: + * - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA + * - PCIRTT in PRTDCB_GENC.PCIRTT + * - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME. + * EMP will return when the shared RPB settings have been + * recomputed and modified. The retval field in the descriptor + * will be set to 0 when RPB is modified. + **/ +enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_add_statistics - Add a statistics block to a VLAN in a switch. + * @hw: pointer to the hw struct + * @seid: defines the SEID of the switch for which the stats are requested + * @vlan_id: the VLAN ID for which the statistics are requested + * @stat_index: index of the statistics counters block assigned to this VLAN + * @cmd_details: pointer to command details structure or NULL + * + * XL710 supports 128 smonVlanStats counters.This command is used to + * allocate a set of smonVlanStats counters to a specific VLAN in a specific + * switch. + **/ +enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid, + u16 vlan_id, u16 *stat_index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_statistics *cmd_resp = + (struct i40e_aqc_add_remove_statistics *)&desc.params.raw; + enum i40e_status_code status; + + if ((seid == 0) || (stat_index == NULL)) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_statistics); + + cmd_resp->seid = CPU_TO_LE16(seid); + cmd_resp->vlan = CPU_TO_LE16(vlan_id); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && stat_index) + *stat_index = LE16_TO_CPU(cmd_resp->stat_index); + + return status; +} + +/** + * i40e_aq_remove_statistics - Remove a statistics block to a VLAN in a switch. + * @hw: pointer to the hw struct + * @seid: defines the SEID of the switch for which the stats are requested + * @vlan_id: the VLAN ID for which the statistics are requested + * @stat_index: index of the statistics counters block assigned to this VLAN + * @cmd_details: pointer to command details structure or NULL + * + * XL710 supports 128 smonVlanStats counters.This command is used to + * deallocate a set of smonVlanStats counters to a specific VLAN in a specific + * switch. + **/ +enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid, + u16 vlan_id, u16 stat_index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_statistics *cmd = + (struct i40e_aqc_add_remove_statistics *)&desc.params.raw; + enum i40e_status_code status; + + if (seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_statistics); + + cmd->seid = CPU_TO_LE16(seid); + cmd->vlan = CPU_TO_LE16(vlan_id); + cmd->stat_index = CPU_TO_LE16(stat_index); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_port_parameters - set physical port parameters. + * @hw: pointer to the hw struct + * @bad_frame_vsi: defines the VSI to which bad frames are forwarded + * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI + * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded + * @double_vlan: if set double VLAN is enabled + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw, + u16 bad_frame_vsi, bool save_bad_pac, + bool pad_short_pac, bool double_vlan, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aqc_set_port_parameters *cmd; + enum i40e_status_code status; + struct i40e_aq_desc desc; + u16 command_flags = 0; + + cmd = (struct i40e_aqc_set_port_parameters *)&desc.params.raw; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_port_parameters); + + cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi); + if (save_bad_pac) + command_flags |= I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS; + if (pad_short_pac) + command_flags |= I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS; + if (double_vlan) + command_flags |= I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA; + cmd->command_flags = CPU_TO_LE16(command_flags); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler + * @hw: pointer to the hw struct + * @seid: seid for the physical port/switching component/vsi + * @buff: Indirect buffer to hold data parameters and response + * @buff_size: Indirect buffer size + * @opcode: Tx scheduler AQ command opcode + * @cmd_details: pointer to command details structure or NULL + * + * Generic command handler for Tx scheduler AQ commands + **/ +static enum i40e_status_code i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, + void *buff, u16 buff_size, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_tx_sched_ind *cmd = + (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; + enum i40e_status_code status; + bool cmd_param_flag = false; + + switch (opcode) { + case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: + case i40e_aqc_opc_configure_vsi_tc_bw: + case i40e_aqc_opc_enable_switching_comp_ets: + case i40e_aqc_opc_modify_switching_comp_ets: + case i40e_aqc_opc_disable_switching_comp_ets: + case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: + case i40e_aqc_opc_configure_switching_comp_bw_config: + cmd_param_flag = true; + break; + case i40e_aqc_opc_query_vsi_bw_config: + case i40e_aqc_opc_query_vsi_ets_sla_config: + case i40e_aqc_opc_query_switching_comp_ets_config: + case i40e_aqc_opc_query_port_ets_config: + case i40e_aqc_opc_query_switching_comp_bw_config: + cmd_param_flag = false; + break; + default: + return I40E_ERR_PARAM; + } + + i40e_fill_default_direct_cmd_desc(&desc, opcode); + + /* Indirect command */ + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (cmd_param_flag) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = CPU_TO_LE16(buff_size); + + cmd->vsi_seid = CPU_TO_LE16(seid); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + + return status; +} + +/** + * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit + * @hw: pointer to the hw struct + * @seid: VSI seid + * @credit: BW limit credits (0 = disabled) + * @max_credit: Max BW limit credits + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_credit, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_configure_vsi_bw_limit *cmd = + (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_configure_vsi_bw_limit); + + cmd->vsi_seid = CPU_TO_LE16(seid); + cmd->credit = CPU_TO_LE16(credit); + cmd->max_credit = max_credit; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_config_switch_comp_bw_limit - Configure Switching component BW Limit + * @hw: pointer to the hw struct + * @seid: switching component seid + * @credit: BW limit credits (0 = disabled) + * @max_bw: Max BW limit credits + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_bw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_configure_switching_comp_bw_limit *cmd = + (struct i40e_aqc_configure_switching_comp_bw_limit *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_configure_switching_comp_bw_limit); + + cmd->seid = CPU_TO_LE16(seid); + cmd->credit = CPU_TO_LE16(credit); + cmd->max_bw = max_bw; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_config_vsi_ets_sla_bw_limit - Config VSI BW Limit per TC + * @hw: pointer to the hw struct + * @seid: VSI seid + * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit, + cmd_details); +} + +/** + * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC + * @hw: pointer to the hw struct + * @seid: VSI seid + * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_configure_vsi_tc_bw, + cmd_details); +} + +/** + * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port + * @hw: pointer to the hw struct + * @seid: seid of the switching component connected to Physical Port + * @ets_data: Buffer holding ETS parameters + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_ets_data *ets_data, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, + sizeof(*ets_data), opcode, cmd_details); +} + +/** + * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_configure_switching_comp_bw_config, + cmd_details); +} + +/** + * i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit( + struct i40e_hw *hw, u16 seid, + struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_configure_switching_comp_ets_bw_limit, + cmd_details); +} + +/** + * i40e_aq_query_vsi_bw_config - Query VSI BW configuration + * @hw: pointer to the hw struct + * @seid: seid of the VSI + * @bw_data: Buffer to hold VSI BW configuration + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_vsi_bw_config, + cmd_details); +} + +/** + * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC + * @hw: pointer to the hw struct + * @seid: seid of the VSI + * @bw_data: Buffer to hold VSI BW configuration per TC + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_vsi_ets_sla_config, + cmd_details); +} + +/** + * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer to hold switching component's per TC BW config + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_switching_comp_ets_config, + cmd_details); +} + +/** + * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration + * @hw: pointer to the hw struct + * @seid: seid of the VSI or switching component connected to Physical Port + * @bw_data: Buffer to hold current ETS configuration for the Physical Port + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_port_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_port_ets_config, + cmd_details); +} + +/** + * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer to hold switching component's BW configuration + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_switching_comp_bw_config, + cmd_details); +} + +/** + * i40e_validate_filter_settings + * @hw: pointer to the hardware structure + * @settings: Filter control settings + * + * Check and validate the filter control settings passed. + * The function checks for the valid filter/context sizes being + * passed for FCoE and PE. + * + * Returns I40E_SUCCESS if the values passed are valid and within + * range else returns an error. + **/ +STATIC enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings) +{ + u32 fcoe_cntx_size, fcoe_filt_size; + u32 pe_cntx_size, pe_filt_size; + u32 fcoe_fmax; + + u32 val; + + /* Validate FCoE settings passed */ + switch (settings->fcoe_filt_num) { + case I40E_HASH_FILTER_SIZE_1K: + case I40E_HASH_FILTER_SIZE_2K: + case I40E_HASH_FILTER_SIZE_4K: + case I40E_HASH_FILTER_SIZE_8K: + case I40E_HASH_FILTER_SIZE_16K: + case I40E_HASH_FILTER_SIZE_32K: + fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; + fcoe_filt_size <<= (u32)settings->fcoe_filt_num; + break; + default: + return I40E_ERR_PARAM; + } + + switch (settings->fcoe_cntx_num) { + case I40E_DMA_CNTX_SIZE_512: + case I40E_DMA_CNTX_SIZE_1K: + case I40E_DMA_CNTX_SIZE_2K: + case I40E_DMA_CNTX_SIZE_4K: + fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; + fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; + break; + default: + return I40E_ERR_PARAM; + } + + /* Validate PE settings passed */ + switch (settings->pe_filt_num) { + case I40E_HASH_FILTER_SIZE_1K: + case I40E_HASH_FILTER_SIZE_2K: + case I40E_HASH_FILTER_SIZE_4K: + case I40E_HASH_FILTER_SIZE_8K: + case I40E_HASH_FILTER_SIZE_16K: + case I40E_HASH_FILTER_SIZE_32K: + case I40E_HASH_FILTER_SIZE_64K: + case I40E_HASH_FILTER_SIZE_128K: + case I40E_HASH_FILTER_SIZE_256K: + case I40E_HASH_FILTER_SIZE_512K: + case I40E_HASH_FILTER_SIZE_1M: + pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; + pe_filt_size <<= (u32)settings->pe_filt_num; + break; + default: + return I40E_ERR_PARAM; + } + + switch (settings->pe_cntx_num) { + case I40E_DMA_CNTX_SIZE_512: + case I40E_DMA_CNTX_SIZE_1K: + case I40E_DMA_CNTX_SIZE_2K: + case I40E_DMA_CNTX_SIZE_4K: + case I40E_DMA_CNTX_SIZE_8K: + case I40E_DMA_CNTX_SIZE_16K: + case I40E_DMA_CNTX_SIZE_32K: + case I40E_DMA_CNTX_SIZE_64K: + case I40E_DMA_CNTX_SIZE_128K: + case I40E_DMA_CNTX_SIZE_256K: + pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; + pe_cntx_size <<= (u32)settings->pe_cntx_num; + break; + default: + return I40E_ERR_PARAM; + } + + /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ + val = rd32(hw, I40E_GLHMC_FCOEFMAX); + fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) + >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; + if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) + return I40E_ERR_INVALID_SIZE; + + return I40E_SUCCESS; +} + +/** + * i40e_set_filter_control + * @hw: pointer to the hardware structure + * @settings: Filter control settings + * + * Set the Queue Filters for PE/FCoE and enable filters required + * for a single PF. It is expected that these settings are programmed + * at the driver initialization time. + **/ +enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings) +{ + enum i40e_status_code ret = I40E_SUCCESS; + u32 hash_lut_size = 0; + u32 val; + + if (!settings) + return I40E_ERR_PARAM; + + /* Validate the input settings */ + ret = i40e_validate_filter_settings(hw, settings); + if (ret) + return ret; + + /* Read the PF Queue Filter control register */ + val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); + + /* Program required PE hash buckets for the PF */ + val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; + val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & + I40E_PFQF_CTL_0_PEHSIZE_MASK; + /* Program required PE contexts for the PF */ + val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; + val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & + I40E_PFQF_CTL_0_PEDSIZE_MASK; + + /* Program required FCoE hash buckets for the PF */ + val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; + val |= ((u32)settings->fcoe_filt_num << + I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & + I40E_PFQF_CTL_0_PFFCHSIZE_MASK; + /* Program required FCoE DDP contexts for the PF */ + val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; + val |= ((u32)settings->fcoe_cntx_num << + I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & + I40E_PFQF_CTL_0_PFFCDSIZE_MASK; + + /* Program Hash LUT size for the PF */ + val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; + if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) + hash_lut_size = 1; + val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & + I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; + + /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ + if (settings->enable_fdir) + val |= I40E_PFQF_CTL_0_FD_ENA_MASK; + if (settings->enable_ethtype) + val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; + if (settings->enable_macvlan) + val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; + + i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); + + return I40E_SUCCESS; +} + +/** + * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter + * @hw: pointer to the hw struct + * @mac_addr: MAC address to use in the filter + * @ethtype: Ethertype to use in the filter + * @flags: Flags that needs to be applied to the filter + * @vsi_seid: seid of the control VSI + * @queue: VSI queue number to send the packet to + * @is_add: Add control packet filter if True else remove + * @stats: Structure to hold information on control filter counts + * @cmd_details: pointer to command details structure or NULL + * + * This command will Add or Remove control packet filter for a control VSI. + * In return it will update the total number of perfect filter count in + * the stats member. + **/ +enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_control_packet_filter *cmd = + (struct i40e_aqc_add_remove_control_packet_filter *) + &desc.params.raw; + struct i40e_aqc_add_remove_control_packet_filter_completion *resp = + (struct i40e_aqc_add_remove_control_packet_filter_completion *) + &desc.params.raw; + enum i40e_status_code status; + + if (vsi_seid == 0) + return I40E_ERR_PARAM; + + if (is_add) { + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_control_packet_filter); + cmd->queue = CPU_TO_LE16(queue); + } else { + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_control_packet_filter); + } + + if (mac_addr) + i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS, + I40E_NONDMA_TO_NONDMA); + + cmd->etype = CPU_TO_LE16(ethtype); + cmd->flags = CPU_TO_LE16(flags); + cmd->seid = CPU_TO_LE16(vsi_seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && stats) { + stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used); + stats->etype_used = LE16_TO_CPU(resp->etype_used); + stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free); + stats->etype_free = LE16_TO_CPU(resp->etype_free); + } + + return status; +} + +/** + * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control + * @hw: pointer to the hw struct + * @seid: VSI seid to add ethertype filter from + **/ +#define I40E_FLOW_CONTROL_ETHTYPE 0x8808 +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 seid) +{ + u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; + u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; + enum i40e_status_code status; + + status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, + seid, 0, true, NULL, + NULL); + if (status) + DEBUGOUT("Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); +} + +/** + * i40e_aq_add_cloud_filters + * @hw: pointer to the hardware structure + * @seid: VSI seid to add cloud filters from + * @filters: Buffer which contains the filters to be added + * @filter_count: number of filters contained in the buffer + * + * Set the cloud filters for a given VSI. The contents of the + * i40e_aqc_add_remove_cloud_filters_element_data are filled + * in by the caller of the function. + * + **/ +enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_add_remove_cloud_filters_element_data *filters, + u8 filter_count) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_cloud_filters *cmd = + (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; + u16 buff_len; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_cloud_filters); + + buff_len = filter_count * sizeof(*filters); + desc.datalen = CPU_TO_LE16(buff_len); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->num_filters = filter_count; + cmd->seid = CPU_TO_LE16(seid); + + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); + + return status; +} + +/** + * i40e_aq_remove_cloud_filters + * @hw: pointer to the hardware structure + * @seid: VSI seid to remove cloud filters from + * @filters: Buffer which contains the filters to be removed + * @filter_count: number of filters contained in the buffer + * + * Remove the cloud filters for a given VSI. The contents of the + * i40e_aqc_add_remove_cloud_filters_element_data are filled + * in by the caller of the function. + * + **/ +enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_add_remove_cloud_filters_element_data *filters, + u8 filter_count) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_cloud_filters *cmd = + (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; + enum i40e_status_code status; + u16 buff_len; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_cloud_filters); + + buff_len = filter_count * sizeof(*filters); + desc.datalen = CPU_TO_LE16(buff_len); + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->num_filters = filter_count; + cmd->seid = CPU_TO_LE16(seid); + + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); + + return status; +} + +/** + * i40e_aq_alternate_write + * @hw: pointer to the hardware structure + * @reg_addr0: address of first dword to be read + * @reg_val0: value to be written under 'reg_addr0' + * @reg_addr1: address of second dword to be read + * @reg_val1: value to be written under 'reg_addr1' + * + * Write one or two dwords to alternate structure. Fields are indicated + * by 'reg_addr0' and 'reg_addr1' register numbers. + * + **/ +enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw, + u32 reg_addr0, u32 reg_val0, + u32 reg_addr1, u32 reg_val1) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_write *cmd_resp = + (struct i40e_aqc_alternate_write *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write); + cmd_resp->address0 = CPU_TO_LE32(reg_addr0); + cmd_resp->address1 = CPU_TO_LE32(reg_addr1); + cmd_resp->data0 = CPU_TO_LE32(reg_val0); + cmd_resp->data1 = CPU_TO_LE32(reg_val1); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + +/** + * i40e_aq_alternate_write_indirect + * @hw: pointer to the hardware structure + * @addr: address of a first register to be modified + * @dw_count: number of alternate structure fields to write + * @buffer: pointer to the command buffer + * + * Write 'dw_count' dwords from 'buffer' to alternate structure + * starting at 'addr'. + * + **/ +enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw, + u32 addr, u32 dw_count, void *buffer) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_ind_write *cmd_resp = + (struct i40e_aqc_alternate_ind_write *)&desc.params.raw; + enum i40e_status_code status; + + if (buffer == NULL) + return I40E_ERR_PARAM; + + /* Indirect command */ + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_alternate_write_indirect); + + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD); + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); + if (dw_count > (I40E_AQ_LARGE_BUF/4)) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + cmd_resp->address = CPU_TO_LE32(addr); + cmd_resp->length = CPU_TO_LE32(dw_count); + + status = i40e_asq_send_command(hw, &desc, buffer, + I40E_LO_DWORD(4*dw_count), NULL); + + return status; +} + +/** + * i40e_aq_alternate_read + * @hw: pointer to the hardware structure + * @reg_addr0: address of first dword to be read + * @reg_val0: pointer for data read from 'reg_addr0' + * @reg_addr1: address of second dword to be read + * @reg_val1: pointer for data read from 'reg_addr1' + * + * Read one or two dwords from alternate structure. Fields are indicated + * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer + * is not passed then only register at 'reg_addr0' is read. + * + **/ +enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw, + u32 reg_addr0, u32 *reg_val0, + u32 reg_addr1, u32 *reg_val1) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_write *cmd_resp = + (struct i40e_aqc_alternate_write *)&desc.params.raw; + enum i40e_status_code status; + + if (reg_val0 == NULL) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); + cmd_resp->address0 = CPU_TO_LE32(reg_addr0); + cmd_resp->address1 = CPU_TO_LE32(reg_addr1); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + if (status == I40E_SUCCESS) { + *reg_val0 = LE32_TO_CPU(cmd_resp->data0); + + if (reg_val1 != NULL) + *reg_val1 = LE32_TO_CPU(cmd_resp->data1); + } + + return status; +} + +/** + * i40e_aq_alternate_read_indirect + * @hw: pointer to the hardware structure + * @addr: address of the alternate structure field + * @dw_count: number of alternate structure fields to read + * @buffer: pointer to the command buffer + * + * Read 'dw_count' dwords from alternate structure starting at 'addr' and + * place them in 'buffer'. The buffer should be allocated by caller. + * + **/ +enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw, + u32 addr, u32 dw_count, void *buffer) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_ind_write *cmd_resp = + (struct i40e_aqc_alternate_ind_write *)&desc.params.raw; + enum i40e_status_code status; + + if (buffer == NULL) + return I40E_ERR_PARAM; + + /* Indirect command */ + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_alternate_read_indirect); + + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD); + desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF); + if (dw_count > (I40E_AQ_LARGE_BUF/4)) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + cmd_resp->address = CPU_TO_LE32(addr); + cmd_resp->length = CPU_TO_LE32(dw_count); + + status = i40e_asq_send_command(hw, &desc, buffer, + I40E_LO_DWORD(4*dw_count), NULL); + + return status; +} + +/** + * i40e_aq_alternate_clear + * @hw: pointer to the HW structure. + * + * Clear the alternate structures of the port from which the function + * is called. + * + **/ +enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_alternate_clear_port); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + +/** + * i40e_aq_alternate_write_done + * @hw: pointer to the HW structure. + * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS + * @reset_needed: indicates the SW should trigger GLOBAL reset + * + * Indicates to the FW that alternate structures have been changed. + * + **/ +enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw, + u8 bios_mode, bool *reset_needed) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_write_done *cmd = + (struct i40e_aqc_alternate_write_done *)&desc.params.raw; + enum i40e_status_code status; + + if (reset_needed == NULL) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_alternate_write_done); + + cmd->cmd_flags = CPU_TO_LE16(bios_mode); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + if (!status && reset_needed) + *reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) & + I40E_AQ_ALTERNATE_RESET_NEEDED) != 0); + + return status; +} + +/** + * i40e_aq_set_oem_mode + * @hw: pointer to the HW structure. + * @oem_mode: the OEM mode to be used + * + * Sets the device to a specific operating mode. Currently the only supported + * mode is no_clp, which causes FW to refrain from using Alternate RAM. + * + **/ +enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw, + u8 oem_mode) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_write_done *cmd = + (struct i40e_aqc_alternate_write_done *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_alternate_set_mode); + + cmd->cmd_flags = CPU_TO_LE16(oem_mode); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + +/** + * i40e_aq_resume_port_tx + * @hw: pointer to the hardware structure + * @cmd_details: pointer to command details structure or NULL + * + * Resume port's Tx traffic + **/ +enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_set_pci_config_data - store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status word from PCI config space + * + * Stores the PCI bus info (speed, width, type) within the i40e_hw structure + **/ +void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) +{ + hw->bus.type = i40e_bus_type_pci_express; + + switch (link_status & I40E_PCI_LINK_WIDTH) { + case I40E_PCI_LINK_WIDTH_1: + hw->bus.width = i40e_bus_width_pcie_x1; + break; + case I40E_PCI_LINK_WIDTH_2: + hw->bus.width = i40e_bus_width_pcie_x2; + break; + case I40E_PCI_LINK_WIDTH_4: + hw->bus.width = i40e_bus_width_pcie_x4; + break; + case I40E_PCI_LINK_WIDTH_8: + hw->bus.width = i40e_bus_width_pcie_x8; + break; + default: + hw->bus.width = i40e_bus_width_unknown; + break; + } + + switch (link_status & I40E_PCI_LINK_SPEED) { + case I40E_PCI_LINK_SPEED_2500: + hw->bus.speed = i40e_bus_speed_2500; + break; + case I40E_PCI_LINK_SPEED_5000: + hw->bus.speed = i40e_bus_speed_5000; + break; + case I40E_PCI_LINK_SPEED_8000: + hw->bus.speed = i40e_bus_speed_8000; + break; + default: + hw->bus.speed = i40e_bus_speed_unknown; + break; + } +} + +/** + * i40e_aq_debug_dump + * @hw: pointer to the hardware structure + * @cluster_id: specific cluster to dump + * @table_id: table id within cluster + * @start_index: index of line in the block to read + * @buff_size: dump buffer size + * @buff: dump buffer + * @ret_buff_size: actual buffer size returned + * @ret_next_table: next block to read + * @ret_next_index: next index to read + * + * Dump internal FW/HW data for debug purposes. + * + **/ +enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_debug_dump_internals *cmd = + (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; + struct i40e_aqc_debug_dump_internals *resp = + (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; + enum i40e_status_code status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_debug_dump_internals); + /* Indirect Command */ + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + cmd->cluster_id = cluster_id; + cmd->table_id = table_id; + cmd->idx = CPU_TO_LE32(start_index); + + desc.datalen = CPU_TO_LE16(buff_size); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + if (ret_buff_size != NULL) + *ret_buff_size = LE16_TO_CPU(desc.datalen); + if (ret_next_table != NULL) + *ret_next_table = resp->table_id; + if (ret_next_index != NULL) + *ret_next_index = LE32_TO_CPU(resp->idx); + } + + return status; +} + +/** + * i40e_read_bw_from_alt_ram + * @hw: pointer to the hardware structure + * @max_bw: pointer for max_bw read + * @min_bw: pointer for min_bw read + * @min_valid: pointer for bool that is true if min_bw is a valid value + * @max_valid: pointer for bool that is true if max_bw is a valid value + * + * Read bw from the alternate ram for the given pf + **/ +enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, + bool *min_valid, bool *max_valid) +{ + enum i40e_status_code status; + u32 max_bw_addr, min_bw_addr; + + /* Calculate the address of the min/max bw registers */ + max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MAX_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MIN_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + + /* Read the bandwidths from alt ram */ + status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, + min_bw_addr, min_bw); + + if (*min_bw & I40E_ALT_BW_VALID_MASK) + *min_valid = true; + else + *min_valid = false; + + if (*max_bw & I40E_ALT_BW_VALID_MASK) + *max_valid = true; + else + *max_valid = false; + + return status; +} + +/** + * i40e_aq_configure_partition_bw + * @hw: pointer to the hardware structure + * @bw_data: Buffer holding valid pfs and bw limits + * @cmd_details: pointer to command details + * + * Configure partitions guaranteed/max bw + **/ +enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + enum i40e_status_code status; + struct i40e_aq_desc desc; + u16 bwd_size = sizeof(*bw_data); + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_configure_partition_bw); + + /* Indirect command */ + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD); + + if (bwd_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = CPU_TO_LE16(bwd_size); + + status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details); + + return status; +} + +/** + * i40e_read_phy_register + * @hw: pointer to the HW structure + * @page: registers page number + * @reg: register address in the page + * @phy_adr: PHY address on MDIO interface + * @value: PHY register value + * + * Reads specified PHY register value + **/ +enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, + u16 *value) +{ + enum i40e_status_code status = I40E_ERR_TIMEOUT; + u32 command = 0; + u16 retry = 1000; + u8 port_num = (u8)hw->func_caps.mdio_port_num; + + command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | + (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_OPCODE_ADDRESS) | + (I40E_MDIO_STCODE) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = I40E_SUCCESS; + break; + } + i40e_usec_delay(10); + retry--; + } while (retry); + + if (status) { + i40e_debug(hw, I40E_DEBUG_PHY, + "PHY: Can't write command to external PHY.\n"); + goto phy_read_end; + } + + command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_OPCODE_READ) | + (I40E_MDIO_STCODE) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + status = I40E_ERR_TIMEOUT; + retry = 1000; + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = I40E_SUCCESS; + break; + } + i40e_usec_delay(10); + retry--; + } while (retry); + + if (!status) { + command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); + *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> + I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; + } else { + i40e_debug(hw, I40E_DEBUG_PHY, + "PHY: Can't read register value from external PHY.\n"); + } + +phy_read_end: + return status; +} + +/** + * i40e_write_phy_register + * @hw: pointer to the HW structure + * @page: registers page number + * @reg: register address in the page + * @phy_adr: PHY address on MDIO interface + * @value: PHY register value + * + * Writes value to specified PHY register + **/ +enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, + u16 value) +{ + enum i40e_status_code status = I40E_ERR_TIMEOUT; + u32 command = 0; + u16 retry = 1000; + u8 port_num = (u8)hw->func_caps.mdio_port_num; + + command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | + (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_OPCODE_ADDRESS) | + (I40E_MDIO_STCODE) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = I40E_SUCCESS; + break; + } + i40e_usec_delay(10); + retry--; + } while (retry); + if (status) { + i40e_debug(hw, I40E_DEBUG_PHY, + "PHY: Can't write command to external PHY.\n"); + goto phy_write_end; + } + + command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; + wr32(hw, I40E_GLGEN_MSRWD(port_num), command); + + command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_OPCODE_WRITE) | + (I40E_MDIO_STCODE) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + status = I40E_ERR_TIMEOUT; + retry = 1000; + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = I40E_SUCCESS; + break; + } + i40e_usec_delay(10); + retry--; + } while (retry); + +phy_write_end: + return status; +} + +/** + * i40e_get_phy_address + * @hw: pointer to the HW structure + * @dev_num: PHY port num that address we want + * @phy_addr: Returned PHY address + * + * Gets PHY address for current port + **/ +u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) +{ + u8 port_num = (u8)hw->func_caps.mdio_port_num; + u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); + + return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; +} + +/** + * i40e_blink_phy_led + * @hw: pointer to the HW structure + * @time: time how long led will blinks in secs + * @interval: gap between LED on and off in msecs + * + * Blinks PHY link LED + **/ +enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval) +{ + enum i40e_status_code status = I40E_SUCCESS; + u32 i; + u16 led_ctl = 0; + u16 gpio_led_port; + u16 led_reg; + u16 led_addr = I40E_PHY_LED_PROV_REG_1; + u8 phy_addr = 0; + u8 port_num; + + i = rd32(hw, I40E_PFGEN_PORTNUM); + port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); + phy_addr = i40e_get_phy_address(hw, port_num); + + for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, + led_addr++) { + status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, &led_reg); + if (status) + goto phy_blinking_end; + led_ctl = led_reg; + if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { + led_reg = 0; + status = i40e_write_phy_register(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + led_reg); + if (status) + goto phy_blinking_end; + break; + } + } + + if (time > 0 && interval > 0) { + for (i = 0; i < time * 1000; i += interval) { + status = i40e_read_phy_register(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + &led_reg); + if (status) + goto restore_config; + if (led_reg & I40E_PHY_LED_MANUAL_ON) + led_reg = 0; + else + led_reg = I40E_PHY_LED_MANUAL_ON; + status = i40e_write_phy_register(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + led_reg); + if (status) + goto restore_config; + i40e_msec_delay(interval); + } + } + +restore_config: + status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr, + phy_addr, led_ctl); + +phy_blinking_end: + return status; +} + +/** + * i40e_led_get_phy - return current on/off mode + * @hw: pointer to the hw struct + * @led_addr: address of led register to use + * @val: original value of register to use + * + **/ +enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, + u16 *val) +{ + enum i40e_status_code status = I40E_SUCCESS; + u16 gpio_led_port; + u8 phy_addr = 0; + u16 reg_val; + u16 temp_addr; + u8 port_num; + u32 i; + + temp_addr = I40E_PHY_LED_PROV_REG_1; + i = rd32(hw, I40E_PFGEN_PORTNUM); + port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); + phy_addr = i40e_get_phy_address(hw, port_num); + + for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, + temp_addr++) { + status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, + temp_addr, phy_addr, ®_val); + if (status) + return status; + *val = reg_val; + if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { + *led_addr = temp_addr; + break; + } + } + return status; +} + +/** + * i40e_led_set_phy + * @hw: pointer to the HW structure + * @on: true or false + * @mode: original val plus bit for set or ignore + * Set led's on or off when controlled by the PHY + * + **/ +enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on, + u16 led_addr, u32 mode) +{ + enum i40e_status_code status = I40E_SUCCESS; + u16 led_ctl = 0; + u16 led_reg = 0; + u8 phy_addr = 0; + u8 port_num; + u32 i; + + i = rd32(hw, I40E_PFGEN_PORTNUM); + port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); + phy_addr = i40e_get_phy_address(hw, port_num); + + status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr, + phy_addr, &led_reg); + if (status) + return status; + led_ctl = led_reg; + if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { + led_reg = 0; + status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, led_reg); + if (status) + return status; + } + status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, &led_reg); + if (status) + goto restore_config; + if (on) + led_reg = I40E_PHY_LED_MANUAL_ON; + else + led_reg = 0; + status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, led_reg); + if (status) + goto restore_config; + if (mode & I40E_PHY_LED_MODE_ORIG) { + led_ctl = (mode & I40E_PHY_LED_MODE_MASK); + status = i40e_write_phy_register(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, led_ctl); + } + return status; +restore_config: + status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr, + phy_addr, led_ctl); + return status; +} +#endif /* PF_DRIVER */ + +/** + * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: ptr to register value + * @cmd_details: pointer to command details structure or NULL + * + * Use the firmware to read the Rx control register, + * especially useful if the Rx unit is under heavy pressure + **/ +enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = + (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; + enum i40e_status_code status; + + if (reg_val == NULL) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); + + cmd_resp->address = CPU_TO_LE32(reg_addr); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status == I40E_SUCCESS) + *reg_val = LE32_TO_CPU(cmd_resp->value); + + return status; +} + +/** + * i40e_read_rx_ctl - read from an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + **/ +u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) +{ + enum i40e_status_code status = I40E_SUCCESS; + bool use_register; + int retry = 5; + u32 val = 0; + + use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); + if (!use_register) { +do_retry: + status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); + if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { + i40e_msec_delay(1); + retry--; + goto do_retry; + } + } + + /* if the AQ access failed, try the old-fashioned way */ + if (status || use_register) + val = rd32(hw, reg_addr); + + return val; +} + +/** + * i40e_aq_rx_ctl_write_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Use the firmware to write to an Rx control register, + * especially useful if the Rx unit is under heavy pressure + **/ +enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_rx_ctl_reg_read_write *cmd = + (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); + + cmd->address = CPU_TO_LE32(reg_addr); + cmd->value = CPU_TO_LE32(reg_val); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_write_rx_ctl - write to an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + **/ +void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) +{ + enum i40e_status_code status = I40E_SUCCESS; + bool use_register; + int retry = 5; + + use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); + if (!use_register) { +do_retry: + status = i40e_aq_rx_ctl_write_register(hw, reg_addr, + reg_val, NULL); + if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { + i40e_msec_delay(1); + retry--; + goto do_retry; + } + } + + /* if the AQ access failed, try the old-fashioned way */ + if (status || use_register) + wr32(hw, reg_addr, reg_val); +} +#ifdef VF_DRIVER + +/** + * i40e_aq_send_msg_to_pf + * @hw: pointer to the hardware structure + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cmd_details: pointer to command details + * + * Send message to PF driver using admin queue. By default, this message + * is sent asynchronously, i.e. i40e_asq_send_command() does not wait for + * completion before returning. + **/ +enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum i40e_virtchnl_ops v_opcode, + enum i40e_status_code v_retval, + u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_asq_cmd_details details; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI); + desc.cookie_high = CPU_TO_LE32(v_opcode); + desc.cookie_low = CPU_TO_LE32(v_retval); + if (msglen) { + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF + | I40E_AQ_FLAG_RD)); + if (msglen > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(msglen); + } + if (!cmd_details) { + i40e_memset(&details, 0, sizeof(details), I40E_NONDMA_MEM); + details.async = true; + cmd_details = &details; + } + status = i40e_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg, + msglen, cmd_details); + return status; +} + +/** + * i40e_vf_parse_hw_config + * @hw: pointer to the hardware structure + * @msg: pointer to the virtual channel VF resource structure + * + * Given a VF resource message from the PF, populate the hw struct + * with appropriate information. + **/ +void i40e_vf_parse_hw_config(struct i40e_hw *hw, + struct i40e_virtchnl_vf_resource *msg) +{ + struct i40e_virtchnl_vsi_resource *vsi_res; + int i; + + vsi_res = &msg->vsi_res[0]; + + hw->dev_caps.num_vsis = msg->num_vsis; + hw->dev_caps.num_rx_qp = msg->num_queue_pairs; + hw->dev_caps.num_tx_qp = msg->num_queue_pairs; + hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; + hw->dev_caps.dcb = msg->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_L2; + hw->dev_caps.fcoe = (msg->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0; + hw->dev_caps.iwarp = (msg->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0; + for (i = 0; i < msg->num_vsis; i++) { + if (vsi_res->vsi_type == I40E_VSI_SRIOV) { + i40e_memcpy(hw->mac.perm_addr, + vsi_res->default_mac_addr, + I40E_ETH_LENGTH_OF_ADDRESS, + I40E_NONDMA_TO_NONDMA); + i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr, + I40E_ETH_LENGTH_OF_ADDRESS, + I40E_NONDMA_TO_NONDMA); + } + vsi_res++; + } +} + +/** + * i40e_vf_reset + * @hw: pointer to the hardware structure + * + * Send a VF_RESET message to the PF. Does not wait for response from PF + * as none will be forthcoming. Immediately after calling this function, + * the admin queue should be shut down and (optionally) reinitialized. + **/ +enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw) +{ + return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF, + I40E_SUCCESS, NULL, 0, NULL); +} +#endif /* VF_DRIVER */ +#ifdef X722_SUPPORT + +/** + * i40e_aq_set_arp_proxy_config + * @hw: pointer to the HW structure + * @proxy_config - pointer to proxy config command table struct + * @cmd_details: pointer to command details + * + * Set ARP offload parameters from pre-populated + * i40e_aqc_arp_proxy_data struct + **/ +enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw, + struct i40e_aqc_arp_proxy_data *proxy_config, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + if (!proxy_config) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config); + + desc.params.external.addr_high = + CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config)); + desc.params.external.addr_low = + CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config)); + + status = i40e_asq_send_command(hw, &desc, proxy_config, + sizeof(struct i40e_aqc_arp_proxy_data), + cmd_details); + + return status; +} + +/** + * i40e_aq_opc_set_ns_proxy_table_entry + * @hw: pointer to the HW structure + * @ns_proxy_table_entry: pointer to NS table entry command struct + * @cmd_details: pointer to command details + * + * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters + * from pre-populated i40e_aqc_ns_proxy_data struct + **/ +enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw, + struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + enum i40e_status_code status; + + if (!ns_proxy_table_entry) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_ns_proxy_table_entry); + + desc.params.external.addr_high = + CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry)); + desc.params.external.addr_low = + CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry)); + + status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry, + sizeof(struct i40e_aqc_ns_proxy_data), + cmd_details); + + return status; +} + +/** + * i40e_aq_set_clear_wol_filter + * @hw: pointer to the hw struct + * @filter_index: index of filter to modify (0-7) + * @filter: buffer containing filter to be set + * @set_filter: true to set filter, false to clear filter + * @no_wol_tco: if true, pass through packets cannot cause wake-up + * if false, pass through packets may cause wake-up + * @filter_valid: true if filter action is valid + * @no_wol_tco_valid: true if no WoL in TCO traffic action valid + * @cmd_details: pointer to command details structure or NULL + * + * Set or clear WoL filter for port attached to the PF + **/ +enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw, + u8 filter_index, + struct i40e_aqc_set_wol_filter_data *filter, + bool set_filter, bool no_wol_tco, + bool filter_valid, bool no_wol_tco_valid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_wol_filter *cmd = + (struct i40e_aqc_set_wol_filter *)&desc.params.raw; + enum i40e_status_code status; + u16 cmd_flags = 0; + u16 valid_flags = 0; + u16 buff_len = 0; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter); + + if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS) + return I40E_ERR_PARAM; + cmd->filter_index = CPU_TO_LE16(filter_index); + + if (set_filter) { + if (!filter) + return I40E_ERR_PARAM; + cmd_flags |= I40E_AQC_SET_WOL_FILTER; + buff_len = sizeof(*filter); + } + if (no_wol_tco) + cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL; + cmd->cmd_flags = CPU_TO_LE16(cmd_flags); + + if (filter_valid) + valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID; + if (no_wol_tco_valid) + valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID; + cmd->valid_flags = CPU_TO_LE16(valid_flags); + + cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter)); + cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter)); + + status = i40e_asq_send_command(hw, &desc, filter, + buff_len, cmd_details); + + return status; +} + +/** + * i40e_aq_get_wake_event_reason + * @hw: pointer to the hw struct + * @wake_reason: return value, index of matching filter + * @cmd_details: pointer to command details structure or NULL + * + * Get information for the reason of a Wake Up event + **/ +enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw, + u16 *wake_reason, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_wake_reason_completion *resp = + (struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status == I40E_SUCCESS) + *wake_reason = LE16_TO_CPU(resp->wake_reason); + + return status; +} + +#endif /* X722_SUPPORT */ diff --git a/drivers/net/i40e/base/i40e_dcb.c b/drivers/net/i40e/base/i40e_dcb.c new file mode 100644 index 00000000..26c344fd --- /dev/null +++ b/drivers/net/i40e/base/i40e_dcb.c @@ -0,0 +1,1305 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "i40e_adminq.h" +#include "i40e_prototype.h" +#include "i40e_dcb.h" + +/** + * i40e_get_dcbx_status + * @hw: pointer to the hw struct + * @status: Embedded DCBX Engine Status + * + * Get the DCBX status from the Firmware + **/ +enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) +{ + u32 reg; + + if (!status) + return I40E_ERR_PARAM; + + reg = rd32(hw, I40E_PRTDCB_GENS); + *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >> + I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT); + + return I40E_SUCCESS; +} + +/** + * i40e_parse_ieee_etscfg_tlv + * @tlv: IEEE 802.1Qaz ETS CFG TLV + * @dcbcfg: Local store to update ETS CFG data + * + * Parses IEEE 802.1Qaz ETS CFG TLV + **/ +static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + struct i40e_dcb_ets_config *etscfg; + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + u8 priority; + int i; + + /* First Octet post subtype + * -------------------------- + * |will-|CBS | Re- | Max | + * |ing | |served| TCs | + * -------------------------- + * |1bit | 1bit|3 bits|3bits| + */ + etscfg = &dcbcfg->etscfg; + etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >> + I40E_IEEE_ETS_WILLING_SHIFT); + etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >> + I40E_IEEE_ETS_CBS_SHIFT); + etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >> + I40E_IEEE_ETS_MAXTC_SHIFT); + + /* Move offset to Priority Assignment Table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> + I40E_IEEE_ETS_PRIO_1_SHIFT); + etscfg->prioritytable[i * 2] = priority; + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> + I40E_IEEE_ETS_PRIO_0_SHIFT); + etscfg->prioritytable[i * 2 + 1] = priority; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + etscfg->tcbwtable[i] = buf[offset++]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + etscfg->tsatable[i] = buf[offset++]; +} + +/** + * i40e_parse_ieee_etsrec_tlv + * @tlv: IEEE 802.1Qaz ETS REC TLV + * @dcbcfg: Local store to update ETS REC data + * + * Parses IEEE 802.1Qaz ETS REC TLV + **/ +static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + u8 priority; + int i; + + /* Move offset to priority table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> + I40E_IEEE_ETS_PRIO_1_SHIFT); + dcbcfg->etsrec.prioritytable[i*2] = priority; + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> + I40E_IEEE_ETS_PRIO_0_SHIFT); + dcbcfg->etsrec.prioritytable[i*2 + 1] = priority; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etsrec.tcbwtable[i] = buf[offset++]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etsrec.tsatable[i] = buf[offset++]; +} + +/** + * i40e_parse_ieee_pfccfg_tlv + * @tlv: IEEE 802.1Qaz PFC CFG TLV + * @dcbcfg: Local store to update PFC CFG data + * + * Parses IEEE 802.1Qaz PFC CFG TLV + **/ +static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + /* ---------------------------------------- + * |will-|MBC | Re- | PFC | PFC Enable | + * |ing | |served| cap | | + * ----------------------------------------- + * |1bit | 1bit|2 bits|4bits| 1 octet | + */ + dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >> + I40E_IEEE_PFC_WILLING_SHIFT); + dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >> + I40E_IEEE_PFC_MBC_SHIFT); + dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >> + I40E_IEEE_PFC_CAP_SHIFT); + dcbcfg->pfc.pfcenable = buf[1]; +} + +/** + * i40e_parse_ieee_app_tlv + * @tlv: IEEE 802.1Qaz APP TLV + * @dcbcfg: Local store to update APP PRIO data + * + * Parses IEEE 802.1Qaz APP PRIO TLV + **/ +static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 typelength; + u16 offset = 0; + u16 length; + int i = 0; + u8 *buf; + + typelength = I40E_NTOHS(tlv->typelength); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + buf = tlv->tlvinfo; + + /* The App priority table starts 5 octets after TLV header */ + length -= (sizeof(tlv->ouisubtype) + 1); + + /* Move offset to App Priority Table */ + offset++; + + /* Application Priority Table (3 octets) + * Octets:| 1 | 2 | 3 | + * ----------------------------------------- + * |Priority|Rsrvd| Sel | Protocol ID | + * ----------------------------------------- + * Bits:|23 21|20 19|18 16|15 0| + * ----------------------------------------- + */ + while (offset < length) { + dcbcfg->app[i].priority = (u8)((buf[offset] & + I40E_IEEE_APP_PRIO_MASK) >> + I40E_IEEE_APP_PRIO_SHIFT); + dcbcfg->app[i].selector = (u8)((buf[offset] & + I40E_IEEE_APP_SEL_MASK) >> + I40E_IEEE_APP_SEL_SHIFT); + dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) | + buf[offset + 2]; + /* Move to next app */ + offset += 3; + i++; + if (i >= I40E_DCBX_MAX_APPS) + break; + } + + dcbcfg->numapps = i; +} + +/** + * i40e_parse_ieee_etsrec_tlv + * @tlv: IEEE 802.1Qaz TLV + * @dcbcfg: Local store to update ETS REC data + * + * Get the TLV subtype and send it to parsing function + * based on the subtype value + **/ +static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u32 ouisubtype; + u8 subtype; + + ouisubtype = I40E_NTOHL(tlv->ouisubtype); + subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> + I40E_LLDP_TLV_SUBTYPE_SHIFT); + switch (subtype) { + case I40E_IEEE_SUBTYPE_ETS_CFG: + i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_ETS_REC: + i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_PFC_CFG: + i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_APP_PRI: + i40e_parse_ieee_app_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * i40e_parse_cee_pgcfg_tlv + * @tlv: CEE DCBX PG CFG TLV + * @dcbcfg: Local store to update ETS CFG data + * + * Parses CEE DCBX PG CFG TLV + **/ +static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + struct i40e_dcb_ets_config *etscfg; + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + u8 priority; + int i; + + etscfg = &dcbcfg->etscfg; + + if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK) + etscfg->willing = 1; + + etscfg->cbs = 0; + /* Priority Group Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + etscfg->prioritytable[i * 2] = priority; + priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + etscfg->prioritytable[i * 2 + 1] = priority; + offset++; + } + + /* PG Percentage Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + etscfg->tcbwtable[i] = buf[offset++]; + + /* Number of TCs supported (1 octet) */ + etscfg->maxtcs = buf[offset]; +} + +/** + * i40e_parse_cee_pfccfg_tlv + * @tlv: CEE DCBX PFC CFG TLV + * @dcbcfg: Local store to update PFC CFG data + * + * Parses CEE DCBX PFC CFG TLV + **/ +static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK) + dcbcfg->pfc.willing = 1; + + /* ------------------------ + * | PFC Enable | PFC TCs | + * ------------------------ + * | 1 octet | 1 octet | + */ + dcbcfg->pfc.pfcenable = buf[0]; + dcbcfg->pfc.pfccap = buf[1]; +} + +/** + * i40e_parse_cee_app_tlv + * @tlv: CEE DCBX APP TLV + * @dcbcfg: Local store to update APP PRIO data + * + * Parses CEE DCBX APP PRIO TLV + **/ +static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 length, typelength, offset = 0; + struct i40e_cee_app_prio *app; + u8 i; + + typelength = I40E_NTOHS(tlv->hdr.typelen); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + + dcbcfg->numapps = length / sizeof(*app); + if (!dcbcfg->numapps) + return; + + for (i = 0; i < dcbcfg->numapps; i++) { + u8 up, selector; + + app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset); + for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) { + if (app->prio_map & BIT(up)) + break; + } + dcbcfg->app[i].priority = up; + + /* Get Selector from lower 2 bits, and convert to IEEE */ + selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK); + switch (selector) { + case I40E_CEE_APP_SEL_ETHTYPE: + dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; + break; + case I40E_CEE_APP_SEL_TCPIP: + dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; + break; + default: + /* Keep selector as it is for unknown types */ + dcbcfg->app[i].selector = selector; + } + + dcbcfg->app[i].protocolid = I40E_NTOHS(app->protocol); + /* Move to next app */ + offset += sizeof(*app); + } +} + +/** + * i40e_parse_cee_tlv + * @tlv: CEE DCBX TLV + * @dcbcfg: Local store to update DCBX config data + * + * Get the TLV subtype and send it to parsing function + * based on the subtype value + **/ +static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 len, tlvlen, sublen, typelength; + struct i40e_cee_feat_tlv *sub_tlv; + u8 subtype, feat_tlv_count = 0; + u32 ouisubtype; + + ouisubtype = I40E_NTOHL(tlv->ouisubtype); + subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> + I40E_LLDP_TLV_SUBTYPE_SHIFT); + /* Return if not CEE DCBX */ + if (subtype != I40E_CEE_DCBX_TYPE) + return; + + typelength = I40E_NTOHS(tlv->typelength); + tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + len = sizeof(tlv->typelength) + sizeof(ouisubtype) + + sizeof(struct i40e_cee_ctrl_tlv); + /* Return if no CEE DCBX Feature TLVs */ + if (tlvlen <= len) + return; + + sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len); + while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) { + typelength = I40E_NTOHS(sub_tlv->hdr.typelen); + sublen = (u16)((typelength & + I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> + I40E_LLDP_TLV_TYPE_SHIFT); + switch (subtype) { + case I40E_CEE_SUBTYPE_PG_CFG: + i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg); + break; + case I40E_CEE_SUBTYPE_PFC_CFG: + i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg); + break; + case I40E_CEE_SUBTYPE_APP_PRI: + i40e_parse_cee_app_tlv(sub_tlv, dcbcfg); + break; + default: + return; /* Invalid Sub-type return */ + } + feat_tlv_count++; + /* Move to next sub TLV */ + sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv + + sizeof(sub_tlv->hdr.typelen) + + sublen); + } +} + +/** + * i40e_parse_org_tlv + * @tlv: Organization specific TLV + * @dcbcfg: Local store to update ETS REC data + * + * Currently only IEEE 802.1Qaz TLV is supported, all others + * will be returned + **/ +static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u32 ouisubtype; + u32 oui; + + ouisubtype = I40E_NTOHL(tlv->ouisubtype); + oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >> + I40E_LLDP_TLV_OUI_SHIFT); + switch (oui) { + case I40E_IEEE_8021QAZ_OUI: + i40e_parse_ieee_tlv(tlv, dcbcfg); + break; + case I40E_CEE_DCBX_OUI: + i40e_parse_cee_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * i40e_lldp_to_dcb_config + * @lldpmib: LLDPDU to be parsed + * @dcbcfg: store for LLDPDU data + * + * Parse DCB configuration from the LLDPDU + **/ +enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib, + struct i40e_dcbx_config *dcbcfg) +{ + enum i40e_status_code ret = I40E_SUCCESS; + struct i40e_lldp_org_tlv *tlv; + u16 type; + u16 length; + u16 typelength; + u16 offset = 0; + + if (!lldpmib || !dcbcfg) + return I40E_ERR_PARAM; + + /* set to the start of LLDPDU */ + lldpmib += I40E_LLDP_MIB_HLEN; + tlv = (struct i40e_lldp_org_tlv *)lldpmib; + while (1) { + typelength = I40E_NTOHS(tlv->typelength); + type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> + I40E_LLDP_TLV_TYPE_SHIFT); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + offset += sizeof(typelength) + length; + + /* END TLV or beyond LLDPDU size */ + if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE)) + break; + + switch (type) { + case I40E_TLV_TYPE_ORG: + i40e_parse_org_tlv(tlv, dcbcfg); + break; + default: + break; + } + + /* Move to next TLV */ + tlv = (struct i40e_lldp_org_tlv *)((char *)tlv + + sizeof(tlv->typelength) + + length); + } + + return ret; +} + +/** + * i40e_aq_get_dcb_config + * @hw: pointer to the hw struct + * @mib_type: mib type for the query + * @bridgetype: bridge type for the query (remote) + * @dcbcfg: store for LLDPDU data + * + * Query DCB configuration from the Firmware + **/ +enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, + u8 bridgetype, + struct i40e_dcbx_config *dcbcfg) +{ + enum i40e_status_code ret = I40E_SUCCESS; + struct i40e_virt_mem mem; + u8 *lldpmib; + + /* Allocate the LLDPDU */ + ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); + if (ret) + return ret; + + lldpmib = (u8 *)mem.va; + ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type, + (void *)lldpmib, I40E_LLDPDU_SIZE, + NULL, NULL, NULL); + if (ret) + goto free_mem; + + /* Parse LLDP MIB to get dcb configuration */ + ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg); + +free_mem: + i40e_free_virt_mem(hw, &mem); + return ret; +} + +/** + * i40e_cee_to_dcb_v1_config + * @cee_cfg: pointer to CEE v1 response configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE v1 configuration from firmware to DCB configuration + **/ +static void i40e_cee_to_dcb_v1_config( + struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg, + struct i40e_dcbx_config *dcbcfg) +{ + u16 status, tlv_status = LE16_TO_CPU(cee_cfg->tlv_status); + u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio); + u8 i, tc, err; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + /* Note that the FW creates the oper_prio_tc nibbles reversed + * from those in the CEE Priority Group sub-TLV. + */ + for (i = 0; i < 4; i++) { + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + dcbcfg->etscfg.prioritytable[i*2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prioritytable[i] = + cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + + status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> + I40E_AQC_CEE_APP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + /* Add APPs if Error is False */ + if (!err) { + /* CEE operating configuration supports FCoE/iSCSI/FIP only */ + dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; + + /* FCoE APP */ + dcbcfg->app[0].priority = + (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> + I40E_AQC_CEE_APP_FCOE_SHIFT; + dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + /* iSCSI APP */ + dcbcfg->app[1].priority = + (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> + I40E_AQC_CEE_APP_ISCSI_SHIFT; + dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; + + /* FIP APP */ + dcbcfg->app[2].priority = + (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> + I40E_AQC_CEE_APP_FIP_SHIFT; + dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; + } +} + +/** + * i40e_cee_to_dcb_config + * @cee_cfg: pointer to CEE configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE configuration from firmware to DCB configuration + **/ +static void i40e_cee_to_dcb_config( + struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg, + struct i40e_dcbx_config *dcbcfg) +{ + u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status); + u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio); + u8 i, tc, err, sync, oper; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + /* Note that the FW creates the oper_prio_tc nibbles reversed + * from those in the CEE Priority Group sub-TLV. + */ + for (i = 0; i < 4; i++) { + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + dcbcfg->etscfg.prioritytable[i*2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prioritytable[i] = + cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + + i = 0; + status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >> + I40E_AQC_CEE_FCOE_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add FCoE APP if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { + /* FCoE APP */ + dcbcfg->app[i].priority = + (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> + I40E_AQC_CEE_APP_FCOE_SHIFT; + dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE; + i++; + } + + status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >> + I40E_AQC_CEE_ISCSI_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add iSCSI APP if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { + /* iSCSI APP */ + dcbcfg->app[i].priority = + (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> + I40E_AQC_CEE_APP_ISCSI_SHIFT; + dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI; + i++; + } + + status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >> + I40E_AQC_CEE_FIP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add FIP APP if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { + /* FIP APP */ + dcbcfg->app[i].priority = + (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> + I40E_AQC_CEE_APP_FIP_SHIFT; + dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP; + i++; + } + dcbcfg->numapps = i; +} + +/** + * i40e_get_ieee_dcb_config + * @hw: pointer to the hw struct + * + * Get IEEE mode DCB configuration from the Firmware + **/ +STATIC enum i40e_status_code i40e_get_ieee_dcb_config(struct i40e_hw *hw) +{ + enum i40e_status_code ret = I40E_SUCCESS; + + /* IEEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; + /* Get Local DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, + &hw->local_dcbx_config); + if (ret) + goto out; + + /* Get Remote DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + &hw->remote_dcbx_config); + /* Don't treat ENOENT as an error for Remote MIBs */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + ret = I40E_SUCCESS; + +out: + return ret; +} + +/** + * i40e_get_dcb_config + * @hw: pointer to the hw struct + * + * Get DCB configuration from the Firmware + **/ +enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw) +{ + enum i40e_status_code ret = I40E_SUCCESS; + struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; + struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; + + /* If Firmware version < v4.33 on X710/XL710, IEEE only */ + if ((hw->mac.type == I40E_MAC_XL710) && + (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || + (hw->aq.fw_maj_ver < 4))) + return i40e_get_ieee_dcb_config(hw); + + /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */ + if ((hw->mac.type == I40E_MAC_XL710) && + ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) { + ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, + sizeof(cee_v1_cfg), NULL); + if (ret == I40E_SUCCESS) { + /* CEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + hw->local_dcbx_config.tlv_status = + LE16_TO_CPU(cee_v1_cfg.tlv_status); + i40e_cee_to_dcb_v1_config(&cee_v1_cfg, + &hw->local_dcbx_config); + } + } else { + ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg, + sizeof(cee_cfg), NULL); + if (ret == I40E_SUCCESS) { + /* CEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + hw->local_dcbx_config.tlv_status = + LE32_TO_CPU(cee_cfg.tlv_status); + i40e_cee_to_dcb_config(&cee_cfg, + &hw->local_dcbx_config); + } + } + + /* CEE mode not enabled try querying IEEE data */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + return i40e_get_ieee_dcb_config(hw); + + if (ret != I40E_SUCCESS) + goto out; + + /* Get CEE DCB Desired Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, + &hw->desired_dcbx_config); + if (ret) + goto out; + + /* Get Remote DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + &hw->remote_dcbx_config); + /* Don't treat ENOENT as an error for Remote MIBs */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + ret = I40E_SUCCESS; + +out: + return ret; +} + +/** + * i40e_init_dcb + * @hw: pointer to the hw struct + * + * Update DCB configuration from the Firmware + **/ +enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw) +{ + enum i40e_status_code ret = I40E_SUCCESS; + struct i40e_lldp_variables lldp_cfg; + u8 adminstatus = 0; + + if (!hw->func_caps.dcb) + return ret; + + /* Read LLDP NVM area */ + ret = i40e_read_lldp_cfg(hw, &lldp_cfg); + if (ret) + return ret; + + /* Get the LLDP AdminStatus for the current port */ + adminstatus = lldp_cfg.adminstatus >> (hw->port * 4); + adminstatus &= 0xF; + + /* LLDP agent disabled */ + if (!adminstatus) { + hw->dcbx_status = I40E_DCBX_STATUS_DISABLED; + return ret; + } + + /* Get DCBX status */ + ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); + if (ret) + return ret; + + /* Check the DCBX Status */ + switch (hw->dcbx_status) { + case I40E_DCBX_STATUS_DONE: + case I40E_DCBX_STATUS_IN_PROGRESS: + /* Get current DCBX configuration */ + ret = i40e_get_dcb_config(hw); + if (ret) + return ret; + break; + case I40E_DCBX_STATUS_DISABLED: + return ret; + case I40E_DCBX_STATUS_NOT_STARTED: + case I40E_DCBX_STATUS_MULTIPLE_PEERS: + default: + break; + } + + /* Configure the LLDP MIB change event */ + ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL); + if (ret) + return ret; + + return ret; +} + +/** + * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format + * @tlv: Fill the ETS config data in IEEE format + * @dcbcfg: Local store which holds the DCB Config + * + * Prepare IEEE 802.1Qaz ETS CFG TLV + **/ +static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 priority0, priority1, maxtcwilling = 0; + struct i40e_dcb_ets_config *etscfg; + u16 offset = 0, typelength, i; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + + typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | + I40E_IEEE_ETS_TLV_LENGTH); + tlv->typelength = I40E_HTONS(typelength); + + ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | + I40E_IEEE_SUBTYPE_ETS_CFG); + tlv->ouisubtype = I40E_HTONL(ouisubtype); + + /* First Octet post subtype + * -------------------------- + * |will-|CBS | Re- | Max | + * |ing | |served| TCs | + * -------------------------- + * |1bit | 1bit|3 bits|3bits| + */ + etscfg = &dcbcfg->etscfg; + if (etscfg->willing) + maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT); + maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK; + buf[offset] = maxtcwilling; + + /* Move offset to Priority Assignment Table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority0 = etscfg->prioritytable[i * 2] & 0xF; + priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF; + buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) | + priority1; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + buf[offset++] = etscfg->tcbwtable[i]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + buf[offset++] = etscfg->tsatable[i]; +} + +/** + * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format + * @tlv: Fill ETS Recommended TLV in IEEE format + * @dcbcfg: Local store which holds the DCB Config + * + * Prepare IEEE 802.1Qaz ETS REC TLV + **/ +static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + struct i40e_dcb_ets_config *etsrec; + u16 offset = 0, typelength, i; + u8 priority0, priority1; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + + typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | + I40E_IEEE_ETS_TLV_LENGTH); + tlv->typelength = I40E_HTONS(typelength); + + ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | + I40E_IEEE_SUBTYPE_ETS_REC); + tlv->ouisubtype = I40E_HTONL(ouisubtype); + + etsrec = &dcbcfg->etsrec; + /* First Octet is reserved */ + /* Move offset to Priority Assignment Table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority0 = etsrec->prioritytable[i * 2] & 0xF; + priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF; + buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) | + priority1; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + buf[offset++] = etsrec->tcbwtable[i]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + buf[offset++] = etsrec->tsatable[i]; +} + + /** + * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format + * @tlv: Fill PFC TLV in IEEE format + * @dcbcfg: Local store to get PFC CFG data + * + * Prepare IEEE 802.1Qaz PFC CFG TLV + **/ +static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelength; + + typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | + I40E_IEEE_PFC_TLV_LENGTH); + tlv->typelength = I40E_HTONS(typelength); + + ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | + I40E_IEEE_SUBTYPE_PFC_CFG); + tlv->ouisubtype = I40E_HTONL(ouisubtype); + + /* ---------------------------------------- + * |will-|MBC | Re- | PFC | PFC Enable | + * |ing | |served| cap | | + * ----------------------------------------- + * |1bit | 1bit|2 bits|4bits| 1 octet | + */ + if (dcbcfg->pfc.willing) + buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT); + + if (dcbcfg->pfc.mbc) + buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT); + + buf[0] |= dcbcfg->pfc.pfccap & 0xF; + buf[1] = dcbcfg->pfc.pfcenable; +} + +/** + * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format + * @tlv: Fill APP TLV in IEEE format + * @dcbcfg: Local store to get APP CFG data + * + * Prepare IEEE 802.1Qaz APP CFG TLV + **/ +static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 typelength, length, offset = 0; + u8 priority, selector, i = 0; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + + /* No APP TLVs then just return */ + if (dcbcfg->numapps == 0) + return; + ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) | + I40E_IEEE_SUBTYPE_APP_PRI); + tlv->ouisubtype = I40E_HTONL(ouisubtype); + + /* Move offset to App Priority Table */ + offset++; + /* Application Priority Table (3 octets) + * Octets:| 1 | 2 | 3 | + * ----------------------------------------- + * |Priority|Rsrvd| Sel | Protocol ID | + * ----------------------------------------- + * Bits:|23 21|20 19|18 16|15 0| + * ----------------------------------------- + */ + while (i < dcbcfg->numapps) { + priority = dcbcfg->app[i].priority & 0x7; + selector = dcbcfg->app[i].selector & 0x7; + buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector; + buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF; + buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF; + /* Move to next app */ + offset += 3; + i++; + if (i >= I40E_DCBX_MAX_APPS) + break; + } + /* length includes size of ouisubtype + 1 reserved + 3*numapps */ + length = sizeof(tlv->ouisubtype) + 1 + (i*3); + typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) | + (length & 0x1FF)); + tlv->typelength = I40E_HTONS(typelength); +} + + /** + * i40e_add_dcb_tlv - Add all IEEE TLVs + * @tlv: pointer to org tlv + * + * add tlv information + **/ +static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg, + u16 tlvid) +{ + switch (tlvid) { + case I40E_IEEE_TLV_ID_ETS_CFG: + i40e_add_ieee_ets_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_TLV_ID_ETS_REC: + i40e_add_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_TLV_ID_PFC_CFG: + i40e_add_ieee_pfc_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_TLV_ID_APP_PRI: + i40e_add_ieee_app_pri_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + + /** + * i40e_set_dcb_config - Set the local LLDP MIB to FW + * @hw: pointer to the hw struct + * + * Set DCB configuration to the Firmware + **/ +enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw) +{ + enum i40e_status_code ret = I40E_SUCCESS; + struct i40e_dcbx_config *dcbcfg; + struct i40e_virt_mem mem; + u8 mib_type, *lldpmib; + u16 miblen; + + /* update the hw local config */ + dcbcfg = &hw->local_dcbx_config; + /* Allocate the LLDPDU */ + ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); + if (ret) + return ret; + + mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB; + if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) { + mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS << + SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT; + } + lldpmib = (u8 *)mem.va; + ret = i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg); + ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL); + + i40e_free_virt_mem(hw, &mem); + return ret; +} + +/** + * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format + * @hw: pointer to the hw struct + * @dcbcfg: store for LLDPDU data + * + * send DCB configuration to FW + **/ +enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, + struct i40e_dcbx_config *dcbcfg) +{ + u16 length, offset = 0, tlvid = I40E_TLV_ID_START; + enum i40e_status_code ret = I40E_SUCCESS; + struct i40e_lldp_org_tlv *tlv; + u16 typelength; + + tlv = (struct i40e_lldp_org_tlv *)lldpmib; + while (1) { + i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++); + typelength = I40E_NTOHS(tlv->typelength); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + if (length) + offset += length + 2; + /* END TLV or beyond LLDPDU size */ + if ((tlvid >= I40E_TLV_ID_END_OF_LLDPPDU) || + (offset > I40E_LLDPDU_SIZE)) + break; + /* Move to next TLV */ + if (length) + tlv = (struct i40e_lldp_org_tlv *)((char *)tlv + + sizeof(tlv->typelength) + length); + } + *miblen = offset; + return ret; +} + + +/** + * i40e_read_lldp_cfg - read LLDP Configuration data from NVM + * @hw: pointer to the HW structure + * @lldp_cfg: pointer to hold lldp configuration variables + * + * Reads the LLDP configuration data from NVM + **/ +enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg) +{ + enum i40e_status_code ret = I40E_SUCCESS; + u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR); + + if (!lldp_cfg) + return I40E_ERR_PARAM; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret != I40E_SUCCESS) + goto err_lldp_cfg; + + ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset, + sizeof(struct i40e_lldp_variables), + (u8 *)lldp_cfg, + true, NULL); + i40e_release_nvm(hw); + +err_lldp_cfg: + return ret; +} diff --git a/drivers/net/i40e/base/i40e_dcb.h b/drivers/net/i40e/base/i40e_dcb.h new file mode 100644 index 00000000..3b709efd --- /dev/null +++ b/drivers/net/i40e/base/i40e_dcb.h @@ -0,0 +1,223 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _I40E_DCB_H_ +#define _I40E_DCB_H_ + +#include "i40e_type.h" + +#define I40E_DCBX_OFFLOAD_DISABLED 0 +#define I40E_DCBX_OFFLOAD_ENABLED 1 + +#define I40E_DCBX_STATUS_NOT_STARTED 0 +#define I40E_DCBX_STATUS_IN_PROGRESS 1 +#define I40E_DCBX_STATUS_DONE 2 +#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3 +#define I40E_DCBX_STATUS_DISABLED 7 + +#define I40E_TLV_TYPE_END 0 +#define I40E_TLV_TYPE_ORG 127 + +#define I40E_IEEE_8021QAZ_OUI 0x0080C2 +#define I40E_IEEE_SUBTYPE_ETS_CFG 9 +#define I40E_IEEE_SUBTYPE_ETS_REC 10 +#define I40E_IEEE_SUBTYPE_PFC_CFG 11 +#define I40E_IEEE_SUBTYPE_APP_PRI 12 + +#define I40E_CEE_DCBX_OUI 0x001b21 +#define I40E_CEE_DCBX_TYPE 2 + +#define I40E_CEE_SUBTYPE_CTRL 1 +#define I40E_CEE_SUBTYPE_PG_CFG 2 +#define I40E_CEE_SUBTYPE_PFC_CFG 3 +#define I40E_CEE_SUBTYPE_APP_PRI 4 + +#define I40E_CEE_MAX_FEAT_TYPE 3 +#define I40E_LLDP_ADMINSTATUS_DISABLED 0 +#define I40E_LLDP_ADMINSTATUS_ENABLED_RX 1 +#define I40E_LLDP_ADMINSTATUS_ENABLED_TX 2 +#define I40E_LLDP_ADMINSTATUS_ENABLED_RXTX 3 + +/* Defines for LLDP TLV header */ +#define I40E_LLDP_MIB_HLEN 14 +#define I40E_LLDP_TLV_LEN_SHIFT 0 +#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT) +#define I40E_LLDP_TLV_TYPE_SHIFT 9 +#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT) +#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0 +#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT) +#define I40E_LLDP_TLV_OUI_SHIFT 8 +#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT) + +/* Defines for IEEE ETS TLV */ +#define I40E_IEEE_ETS_MAXTC_SHIFT 0 +#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT) +#define I40E_IEEE_ETS_CBS_SHIFT 6 +#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT) +#define I40E_IEEE_ETS_WILLING_SHIFT 7 +#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT) +#define I40E_IEEE_ETS_PRIO_0_SHIFT 0 +#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT) +#define I40E_IEEE_ETS_PRIO_1_SHIFT 4 +#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT) +#define I40E_CEE_PGID_PRIO_0_SHIFT 0 +#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT) +#define I40E_CEE_PGID_PRIO_1_SHIFT 4 +#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT) +#define I40E_CEE_PGID_STRICT 15 + +/* Defines for IEEE TSA types */ +#define I40E_IEEE_TSA_STRICT 0 +#define I40E_IEEE_TSA_CBS 1 +#define I40E_IEEE_TSA_ETS 2 +#define I40E_IEEE_TSA_VENDOR 255 + +/* Defines for IEEE PFC TLV */ +#define I40E_IEEE_PFC_CAP_SHIFT 0 +#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT) +#define I40E_IEEE_PFC_MBC_SHIFT 6 +#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT) +#define I40E_IEEE_PFC_WILLING_SHIFT 7 +#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT) + +/* Defines for IEEE APP TLV */ +#define I40E_IEEE_APP_SEL_SHIFT 0 +#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT) +#define I40E_IEEE_APP_PRIO_SHIFT 5 +#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT) + +/* TLV definitions for preparing MIB */ +#define I40E_TLV_ID_CHASSIS_ID 0 +#define I40E_TLV_ID_PORT_ID 1 +#define I40E_TLV_ID_TIME_TO_LIVE 2 +#define I40E_IEEE_TLV_ID_ETS_CFG 3 +#define I40E_IEEE_TLV_ID_ETS_REC 4 +#define I40E_IEEE_TLV_ID_PFC_CFG 5 +#define I40E_IEEE_TLV_ID_APP_PRI 6 +#define I40E_TLV_ID_END_OF_LLDPPDU 7 +#define I40E_TLV_ID_START I40E_IEEE_TLV_ID_ETS_CFG + +#define I40E_IEEE_ETS_TLV_LENGTH 25 +#define I40E_IEEE_PFC_TLV_LENGTH 6 +#define I40E_IEEE_APP_TLV_LENGTH 11 + +#pragma pack(1) + +/* IEEE 802.1AB LLDP TLV structure */ +struct i40e_lldp_generic_tlv { + __be16 typelength; + u8 tlvinfo[1]; +}; + +/* IEEE 802.1AB LLDP Organization specific TLV */ +struct i40e_lldp_org_tlv { + __be16 typelength; + __be32 ouisubtype; + u8 tlvinfo[1]; +}; + +struct i40e_cee_tlv_hdr { + __be16 typelen; + u8 operver; + u8 maxver; +}; + +struct i40e_cee_ctrl_tlv { + struct i40e_cee_tlv_hdr hdr; + __be32 seqno; + __be32 ackno; +}; + +struct i40e_cee_feat_tlv { + struct i40e_cee_tlv_hdr hdr; + u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */ +#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80 +#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40 +#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20 + u8 subtype; + u8 tlvinfo[1]; +}; + +struct i40e_cee_app_prio { + __be16 protocol; + u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */ +#define I40E_CEE_APP_SELECTOR_MASK 0x03 + __be16 lower_oui; + u8 prio_map; +}; +#pragma pack() + +/* + * TODO: The below structures related LLDP/DCBX variables + * and statistics are defined but need to find how to get + * the required information from the Firmware to use them + */ + +/* IEEE 802.1AB LLDP Agent Statistics */ +struct i40e_lldp_stats { + u64 remtablelastchangetime; + u64 remtableinserts; + u64 remtabledeletes; + u64 remtabledrops; + u64 remtableageouts; + u64 txframestotal; + u64 rxframesdiscarded; + u64 rxportframeerrors; + u64 rxportframestotal; + u64 rxporttlvsdiscardedtotal; + u64 rxporttlvsunrecognizedtotal; + u64 remtoomanyneighbors; +}; + +/* IEEE 802.1Qaz DCBX variables */ +struct i40e_dcbx_variables { + u32 defmaxtrafficclasses; + u32 defprioritytcmapping; + u32 deftcbandwidth; + u32 deftsaassignment; +}; + +enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw, + u16 *status); +enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib, + struct i40e_dcbx_config *dcbcfg); +enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, + u8 bridgetype, + struct i40e_dcbx_config *dcbcfg); +enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw); +enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw); +enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw); +enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, + struct i40e_dcbx_config *dcbcfg); + +#endif /* _I40E_DCB_H_ */ diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h new file mode 100644 index 00000000..f8443405 --- /dev/null +++ b/drivers/net/i40e/base/i40e_devids.h @@ -0,0 +1,78 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _I40E_DEVIDS_H_ +#define _I40E_DEVIDS_H_ + +/* Vendor ID */ +#define I40E_INTEL_VENDOR_ID 0x8086 + +/* Device IDs */ +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_QEMU 0x1574 +#define I40E_DEV_ID_KX_B 0x1580 +#define I40E_DEV_ID_KX_C 0x1581 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_20G_KR2 0x1587 +#define I40E_DEV_ID_20G_KR2_A 0x1588 +#define I40E_DEV_ID_10G_BASE_T4 0x1589 +#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT) +#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_VF_HV 0x1571 +#endif /* VF_DRIVER */ +#ifdef X722_SUPPORT +#ifdef X722_A0_SUPPORT +#define I40E_DEV_ID_X722_A0 0x374C +#if defined(INTEGRATED_VF) || defined(VF_DRIVER) +#define I40E_DEV_ID_X722_A0_VF 0x374D +#endif +#endif +#define I40E_DEV_ID_KX_X722 0x37CE +#define I40E_DEV_ID_QSFP_X722 0x37CF +#define I40E_DEV_ID_SFP_X722 0x37D0 +#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 +#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT) +#define I40E_DEV_ID_X722_VF 0x37CD +#define I40E_DEV_ID_X722_VF_HV 0x37D9 +#endif /* VF_DRIVER */ +#endif /* X722_SUPPORT */ + +#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ + (d) == I40E_DEV_ID_QSFP_B || \ + (d) == I40E_DEV_ID_QSFP_C) + +#endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/i40e/base/i40e_diag.c b/drivers/net/i40e/base/i40e_diag.c new file mode 100644 index 00000000..c3c76a0c --- /dev/null +++ b/drivers/net/i40e/base/i40e_diag.c @@ -0,0 +1,175 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "i40e_diag.h" +#include "i40e_prototype.h" + +/** + * i40e_diag_set_loopback + * @hw: pointer to the hw struct + * @mode: loopback mode + * + * Set chosen loopback mode + **/ +enum i40e_status_code i40e_diag_set_loopback(struct i40e_hw *hw, + enum i40e_lb_mode mode) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + if (i40e_aq_set_lb_modes(hw, mode, NULL)) + ret_code = I40E_ERR_DIAG_TEST_FAILED; + + return ret_code; +} + +/** + * i40e_diag_reg_pattern_test + * @hw: pointer to the hw struct + * @reg: reg to be tested + * @mask: bits to be touched + **/ +static enum i40e_status_code i40e_diag_reg_pattern_test(struct i40e_hw *hw, + u32 reg, u32 mask) +{ + const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + u32 pat, val, orig_val; + int i; + + orig_val = rd32(hw, reg); + for (i = 0; i < ARRAY_SIZE(patterns); i++) { + pat = patterns[i]; + wr32(hw, reg, (pat & mask)); + val = rd32(hw, reg); + if ((val & mask) != (pat & mask)) { + return I40E_ERR_DIAG_TEST_FAILED; + } + } + + wr32(hw, reg, orig_val); + val = rd32(hw, reg); + if (val != orig_val) { + return I40E_ERR_DIAG_TEST_FAILED; + } + + return I40E_SUCCESS; +} + +struct i40e_diag_reg_test_info i40e_reg_list[] = { + /* offset mask elements stride */ + {I40E_QTX_CTL(0), 0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, + {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, + {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)}, + {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)}, + {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)}, + {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0}, + {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0}, + {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)}, + {I40E_QINT_TQCTL(0), 0x000000FF, 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)}, + {I40E_QINT_RQCTL(0), 0x000000FF, 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)}, + {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0}, + { 0 } +}; + +/** + * i40e_diag_reg_test + * @hw: pointer to the hw struct + * + * Perform registers diagnostic test + **/ +enum i40e_status_code i40e_diag_reg_test(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u32 reg, mask; + u32 i, j; + + for (i = 0; i40e_reg_list[i].offset != 0 && + ret_code == I40E_SUCCESS; i++) { + + /* set actual reg range for dynamically allocated resources */ + if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) && + hw->func_caps.num_tx_qp != 0) + i40e_reg_list[i].elements = hw->func_caps.num_tx_qp; + if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) || + i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) || + i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) || + i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) || + i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) && + hw->func_caps.num_msix_vectors != 0) + i40e_reg_list[i].elements = + hw->func_caps.num_msix_vectors - 1; + + /* test register access */ + mask = i40e_reg_list[i].mask; + for (j = 0; j < i40e_reg_list[i].elements && + ret_code == I40E_SUCCESS; j++) { + reg = i40e_reg_list[i].offset + + (j * i40e_reg_list[i].stride); + ret_code = i40e_diag_reg_pattern_test(hw, reg, mask); + } + } + + return ret_code; +} + +/** + * i40e_diag_eeprom_test + * @hw: pointer to the hw struct + * + * Perform EEPROM diagnostic test + **/ +enum i40e_status_code i40e_diag_eeprom_test(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code; + u16 reg_val; + + /* read NVM control word and if NVM valid, validate EEPROM checksum*/ + ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, ®_val); + if ((ret_code == I40E_SUCCESS) && + ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) == + BIT(I40E_SR_CONTROL_WORD_1_SHIFT))) + return i40e_validate_nvm_checksum(hw, NULL); + else + return I40E_ERR_DIAG_TEST_FAILED; +} + +/** + * i40e_diag_fw_alive_test + * @hw: pointer to the hw struct + * + * Perform FW alive diagnostic test + **/ +enum i40e_status_code i40e_diag_fw_alive_test(struct i40e_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return I40E_SUCCESS; +} diff --git a/drivers/net/i40e/base/i40e_diag.h b/drivers/net/i40e/base/i40e_diag.h new file mode 100644 index 00000000..105b1191 --- /dev/null +++ b/drivers/net/i40e/base/i40e_diag.h @@ -0,0 +1,61 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _I40E_DIAG_H_ +#define _I40E_DIAG_H_ + +#include "i40e_type.h" + +enum i40e_lb_mode { + I40E_LB_MODE_NONE = 0x0, + I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL, + I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE, + I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL, +}; + +struct i40e_diag_reg_test_info { + u32 offset; /* the base register */ + u32 mask; /* bits that can be tested */ + u32 elements; /* number of elements if array */ + u32 stride; /* bytes between each element */ +}; + +extern struct i40e_diag_reg_test_info i40e_reg_list[]; + +enum i40e_status_code i40e_diag_set_loopback(struct i40e_hw *hw, + enum i40e_lb_mode mode); +enum i40e_status_code i40e_diag_fw_alive_test(struct i40e_hw *hw); +enum i40e_status_code i40e_diag_reg_test(struct i40e_hw *hw); +enum i40e_status_code i40e_diag_eeprom_test(struct i40e_hw *hw); + +#endif /* _I40E_DIAG_H_ */ diff --git a/drivers/net/i40e/base/i40e_hmc.c b/drivers/net/i40e/base/i40e_hmc.c new file mode 100644 index 00000000..75d38412 --- /dev/null +++ b/drivers/net/i40e/base/i40e_hmc.c @@ -0,0 +1,370 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_status.h" +#include "i40e_alloc.h" +#include "i40e_hmc.h" +#include "i40e_type.h" + +/** + * i40e_add_sd_table_entry - Adds a segment descriptor to the table + * @hw: pointer to our hw struct + * @hmc_info: pointer to the HMC configuration information struct + * @sd_index: segment descriptor index to manipulate + * @type: what type of segment descriptor we're manipulating + * @direct_mode_sz: size to alloc in direct mode + **/ +enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 sd_index, + enum i40e_sd_entry_type type, + u64 direct_mode_sz) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_hmc_sd_entry *sd_entry; + enum i40e_memory_type mem_type; + bool dma_mem_alloc_done = false; + struct i40e_dma_mem mem; + u64 alloc_len; + + if (NULL == hmc_info->sd_table.sd_entry) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n"); + goto exit; + } + + if (sd_index >= hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_SD_INDEX; + DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n"); + goto exit; + } + + sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; + if (!sd_entry->valid) { + if (I40E_SD_TYPE_PAGED == type) { + mem_type = i40e_mem_pd; + alloc_len = I40E_HMC_PAGED_BP_SIZE; + } else { + mem_type = i40e_mem_bp_jumbo; + alloc_len = direct_mode_sz; + } + + /* allocate a 4K pd page or 2M backing page */ + ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len, + I40E_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + goto exit; + dma_mem_alloc_done = true; + if (I40E_SD_TYPE_PAGED == type) { + ret_code = i40e_allocate_virt_mem(hw, + &sd_entry->u.pd_table.pd_entry_virt_mem, + sizeof(struct i40e_hmc_pd_entry) * 512); + if (ret_code) + goto exit; + sd_entry->u.pd_table.pd_entry = + (struct i40e_hmc_pd_entry *) + sd_entry->u.pd_table.pd_entry_virt_mem.va; + i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr, + &mem, sizeof(struct i40e_dma_mem), + I40E_NONDMA_TO_NONDMA); + } else { + i40e_memcpy(&sd_entry->u.bp.addr, + &mem, sizeof(struct i40e_dma_mem), + I40E_NONDMA_TO_NONDMA); + sd_entry->u.bp.sd_pd_index = sd_index; + } + /* initialize the sd entry */ + hmc_info->sd_table.sd_entry[sd_index].entry_type = type; + + /* increment the ref count */ + I40E_INC_SD_REFCNT(&hmc_info->sd_table); + } + /* Increment backing page reference count */ + if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type) + I40E_INC_BP_REFCNT(&sd_entry->u.bp); +exit: + if (I40E_SUCCESS != ret_code) + if (dma_mem_alloc_done) + i40e_free_dma_mem(hw, &mem); + + return ret_code; +} + +/** + * i40e_add_pd_table_entry - Adds page descriptor to the specified table + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @pd_index: which page descriptor index to manipulate + * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. + * + * This function: + * 1. Initializes the pd entry + * 2. Adds pd_entry in the pd_table + * 3. Mark the entry valid in i40e_hmc_pd_entry structure + * 4. Initializes the pd_entry's ref count to 1 + * assumptions: + * 1. The memory for pd should be pinned down, physically contiguous and + * aligned on 4K boundary and zeroed memory. + * 2. It should be 4K in size. + **/ +enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 pd_index, + struct i40e_dma_mem *rsrc_pg) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_hmc_pd_table *pd_table; + struct i40e_hmc_pd_entry *pd_entry; + struct i40e_dma_mem mem; + struct i40e_dma_mem *page = &mem; + u32 sd_idx, rel_pd_idx; + u64 *pd_addr; + u64 page_desc; + + if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; + DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n"); + goto exit; + } + + /* find corresponding sd */ + sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD); + if (I40E_SD_TYPE_PAGED != + hmc_info->sd_table.sd_entry[sd_idx].entry_type) + goto exit; + + rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD); + pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + pd_entry = &pd_table->pd_entry[rel_pd_idx]; + if (!pd_entry->valid) { + if (rsrc_pg) { + pd_entry->rsrc_pg = true; + page = rsrc_pg; + } else { + /* allocate a 4K backing page */ + ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp, + I40E_HMC_PAGED_BP_SIZE, + I40E_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + goto exit; + pd_entry->rsrc_pg = false; + } + + i40e_memcpy(&pd_entry->bp.addr, page, + sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA); + pd_entry->bp.sd_pd_index = pd_index; + pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; + /* Set page address and valid bit */ + page_desc = page->pa | 0x1; + + pd_addr = (u64 *)pd_table->pd_page_addr.va; + pd_addr += rel_pd_idx; + + /* Add the backing page physical address in the pd entry */ + i40e_memcpy(pd_addr, &page_desc, sizeof(u64), + I40E_NONDMA_TO_DMA); + + pd_entry->sd_index = sd_idx; + pd_entry->valid = true; + I40E_INC_PD_REFCNT(pd_table); + } + I40E_INC_BP_REFCNT(&pd_entry->bp); +exit: + return ret_code; +} + +/** + * i40e_remove_pd_bp - remove a backing page from a page descriptor + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + * @is_pf: distinguishes a VF from a PF + * + * This function: + * 1. Marks the entry in pd tabe (for paged address mode) or in sd table + * (for direct address mode) invalid. + * 2. Write to register PMPDINV to invalidate the backing page in FV cache + * 3. Decrement the ref count for the pd _entry + * assumptions: + * 1. Caller can deallocate the memory used by backing storage after this + * function returns. + **/ +enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_hmc_pd_entry *pd_entry; + struct i40e_hmc_pd_table *pd_table; + struct i40e_hmc_sd_entry *sd_entry; + u32 sd_idx, rel_pd_idx; + u64 *pd_addr; + + /* calculate index */ + sd_idx = idx / I40E_HMC_PD_CNT_IN_SD; + rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD; + if (sd_idx >= hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; + DEBUGOUT("i40e_remove_pd_bp: bad idx\n"); + goto exit; + } + sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; + if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) { + ret_code = I40E_ERR_INVALID_SD_TYPE; + DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n"); + goto exit; + } + /* get the entry and decrease its ref counter */ + pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + pd_entry = &pd_table->pd_entry[rel_pd_idx]; + I40E_DEC_BP_REFCNT(&pd_entry->bp); + if (pd_entry->bp.ref_cnt) + goto exit; + + /* mark the entry invalid */ + pd_entry->valid = false; + I40E_DEC_PD_REFCNT(pd_table); + pd_addr = (u64 *)pd_table->pd_page_addr.va; + pd_addr += rel_pd_idx; + i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM); + I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); + + /* free memory here */ + if (!pd_entry->rsrc_pg) + ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); + if (I40E_SUCCESS != ret_code) + goto exit; + if (!pd_table->ref_cnt) + i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); +exit: + return ret_code; +} + +/** + * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + **/ +enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, + u32 idx) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_hmc_sd_entry *sd_entry; + + /* get the entry and decrease its ref counter */ + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + I40E_DEC_BP_REFCNT(&sd_entry->u.bp); + if (sd_entry->u.bp.ref_cnt) { + ret_code = I40E_ERR_NOT_READY; + goto exit; + } + I40E_DEC_SD_REFCNT(&hmc_info->sd_table); + + /* mark the entry invalid */ + sd_entry->valid = false; +exit: + return ret_code; +} + +/** + * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor + * @hw: pointer to our hw struct + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + * @is_pf: used to distinguish between VF and PF + **/ +enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf) +{ + struct i40e_hmc_sd_entry *sd_entry; + + if (!is_pf) + return I40E_NOT_SUPPORTED; + + /* get the entry and decrease its ref counter */ + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); + + return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr)); +} + +/** + * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry. + * @hmc_info: pointer to the HMC configuration information structure + * @idx: segment descriptor index to find the relevant page descriptor + **/ +enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, + u32 idx) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_hmc_sd_entry *sd_entry; + + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + + if (sd_entry->u.pd_table.ref_cnt) { + ret_code = I40E_ERR_NOT_READY; + goto exit; + } + + /* mark the entry invalid */ + sd_entry->valid = false; + + I40E_DEC_SD_REFCNT(&hmc_info->sd_table); +exit: + return ret_code; +} + +/** + * i40e_remove_pd_page_new - Removes a PD page from sd entry. + * @hw: pointer to our hw struct + * @hmc_info: pointer to the HMC configuration information structure + * @idx: segment descriptor index to find the relevant page descriptor + * @is_pf: used to distinguish between VF and PF + **/ +enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf) +{ + struct i40e_hmc_sd_entry *sd_entry; + + if (!is_pf) + return I40E_NOT_SUPPORTED; + + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); + + return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr)); +} diff --git a/drivers/net/i40e/base/i40e_hmc.h b/drivers/net/i40e/base/i40e_hmc.h new file mode 100644 index 00000000..343b251f --- /dev/null +++ b/drivers/net/i40e/base/i40e_hmc.h @@ -0,0 +1,245 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _I40E_HMC_H_ +#define _I40E_HMC_H_ + +#define I40E_HMC_MAX_BP_COUNT 512 + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; + +#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */ +#define I40E_HMC_PD_CNT_IN_SD 512 +#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ +#define I40E_HMC_PAGED_BP_SIZE 4096 +#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 +#define I40E_FIRST_VF_FPM_ID 16 + +struct i40e_hmc_obj_info { + u64 base; /* base addr in FPM */ + u32 max_cnt; /* max count available for this hmc func */ + u32 cnt; /* count of objects driver actually wants to create */ + u64 size; /* size in bytes of one object */ +}; + +enum i40e_sd_entry_type { + I40E_SD_TYPE_INVALID = 0, + I40E_SD_TYPE_PAGED = 1, + I40E_SD_TYPE_DIRECT = 2 +}; + +struct i40e_hmc_bp { + enum i40e_sd_entry_type entry_type; + struct i40e_dma_mem addr; /* populate to be used by hw */ + u32 sd_pd_index; + u32 ref_cnt; +}; + +struct i40e_hmc_pd_entry { + struct i40e_hmc_bp bp; + u32 sd_index; + bool rsrc_pg; + bool valid; +}; + +struct i40e_hmc_pd_table { + struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */ + struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */ + struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */ + + u32 ref_cnt; + u32 sd_index; +}; + +struct i40e_hmc_sd_entry { + enum i40e_sd_entry_type entry_type; + bool valid; + + union { + struct i40e_hmc_pd_table pd_table; + struct i40e_hmc_bp bp; + } u; +}; + +struct i40e_hmc_sd_table { + struct i40e_virt_mem addr; /* used to track sd_entry allocations */ + u32 sd_cnt; + u32 ref_cnt; + struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */ +}; + +struct i40e_hmc_info { + u32 signature; + /* equals to pci func num for PF and dynamically allocated for VFs */ + u8 hmc_fn_id; + u16 first_sd_index; /* index of the first available SD */ + + /* hmc objects */ + struct i40e_hmc_obj_info *hmc_obj; + struct i40e_virt_mem hmc_obj_virt_mem; + struct i40e_hmc_sd_table sd_table; +}; + +#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) +#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) +#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) + +#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) +#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) +#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) + +/** + * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware + * @hw: pointer to our hw struct + * @pa: pointer to physical address + * @sd_index: segment descriptor index + * @type: if sd entry is direct or paged + **/ +#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ +{ \ + u32 val1, val2, val3; \ + val1 = (u32)(I40E_HI_DWORD(pa)); \ + val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \ + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ + ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ + BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ + wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ + wr32((hw), I40E_PFHMC_SDCMD, val3); \ +} + +/** + * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware + * @hw: pointer to our hw struct + * @sd_index: segment descriptor index + * @type: if sd entry is direct or paged + **/ +#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ +{ \ + u32 val2, val3; \ + val2 = (I40E_HMC_MAX_BP_COUNT << \ + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ + ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ + wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ + wr32((hw), I40E_PFHMC_SDCMD, val3); \ +} + +/** + * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware + * @hw: pointer to our hw struct + * @sd_idx: segment descriptor index + * @pd_idx: page descriptor index + **/ +#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ + wr32((hw), I40E_PFHMC_PDINV, \ + (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ + ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) + +/** + * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit + * @hmc_info: pointer to the HMC configuration information structure + * @type: type of HMC resources we're searching + * @index: starting index for the object + * @cnt: number of objects we're trying to create + * @sd_idx: pointer to return index of the segment descriptor in question + * @sd_limit: pointer to return the maximum number of segment descriptors + * + * This function calculates the segment descriptor index and index limit + * for the resource defined by i40e_hmc_rsrc_type. + **/ +#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\ +{ \ + u64 fpm_addr, fpm_limit; \ + fpm_addr = (hmc_info)->hmc_obj[(type)].base + \ + (hmc_info)->hmc_obj[(type)].size * (index); \ + fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\ + *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \ + *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \ + /* add one more to the limit to correct our range */ \ + *(sd_limit) += 1; \ +} + +/** + * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit + * @hmc_info: pointer to the HMC configuration information struct + * @type: HMC resource type we're examining + * @idx: starting index for the object + * @cnt: number of objects we're trying to create + * @pd_index: pointer to return page descriptor index + * @pd_limit: pointer to return page descriptor index limit + * + * Calculates the page descriptor index and index limit for the resource + * defined by i40e_hmc_rsrc_type. + **/ +#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\ +{ \ + u64 fpm_adr, fpm_limit; \ + fpm_adr = (hmc_info)->hmc_obj[(type)].base + \ + (hmc_info)->hmc_obj[(type)].size * (idx); \ + fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \ + *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \ + *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \ + /* add one more to the limit to correct our range */ \ + *(pd_limit) += 1; \ +} +enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 sd_index, + enum i40e_sd_entry_type type, + u64 direct_mode_sz); + +enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 pd_index, + struct i40e_dma_mem *rsrc_pg); +enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx); +enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, + u32 idx); +enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); +enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, + u32 idx); +enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); + +#endif /* _I40E_HMC_H_ */ diff --git a/drivers/net/i40e/base/i40e_lan_hmc.c b/drivers/net/i40e/base/i40e_lan_hmc.c new file mode 100644 index 00000000..22606484 --- /dev/null +++ b/drivers/net/i40e/base/i40e_lan_hmc.c @@ -0,0 +1,1411 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_type.h" +#include "i40e_hmc.h" +#include "i40e_lan_hmc.h" +#include "i40e_prototype.h" + +/* lan specific interface functions */ + +/** + * i40e_align_l2obj_base - aligns base object pointer to 512 bytes + * @offset: base address offset needing alignment + * + * Aligns the layer 2 function private memory so it's 512-byte aligned. + **/ +STATIC u64 i40e_align_l2obj_base(u64 offset) +{ + u64 aligned_offset = offset; + + if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0) + aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT - + (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT)); + + return aligned_offset; +} + +/** + * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size + * @txq_num: number of Tx queues needing backing context + * @rxq_num: number of Rx queues needing backing context + * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context + * @fcoe_filt_num: number of FCoE filters needing backing context + * + * Calculates the maximum amount of memory for the function required, based + * on the number of resources it must provide context for. + **/ +u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num, + u32 fcoe_cntx_num, u32 fcoe_filt_num) +{ + u64 fpm_size = 0; + + fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ; + fpm_size = i40e_align_l2obj_base(fpm_size); + + fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ); + fpm_size = i40e_align_l2obj_base(fpm_size); + + fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX); + fpm_size = i40e_align_l2obj_base(fpm_size); + + fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT); + fpm_size = i40e_align_l2obj_base(fpm_size); + + return fpm_size; +} + +/** + * i40e_init_lan_hmc - initialize i40e_hmc_info struct + * @hw: pointer to the HW structure + * @txq_num: number of Tx queues needing backing context + * @rxq_num: number of Rx queues needing backing context + * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context + * @fcoe_filt_num: number of FCoE filters needing backing context + * + * This function will be called once per physical function initialization. + * It will fill out the i40e_hmc_obj_info structure for LAN objects based on + * the driver's provided input, as well as information from the HMC itself + * loaded from NVRAM. + * + * Assumptions: + * - HMC Resource Profile has been selected before calling this function. + **/ +enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, + u32 rxq_num, u32 fcoe_cntx_num, + u32 fcoe_filt_num) +{ + struct i40e_hmc_obj_info *obj, *full_obj; + enum i40e_status_code ret_code = I40E_SUCCESS; + u64 l2fpm_size; + u32 size_exp; + + hw->hmc.signature = I40E_HMC_INFO_SIGNATURE; + hw->hmc.hmc_fn_id = hw->pf_id; + + /* allocate memory for hmc_obj */ + ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem, + sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX); + if (ret_code) + goto init_lan_hmc_out; + hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *) + hw->hmc.hmc_obj_virt_mem.va; + + /* The full object will be used to create the LAN HMC SD */ + full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL]; + full_obj->max_cnt = 0; + full_obj->cnt = 0; + full_obj->base = 0; + full_obj->size = 0; + + /* Tx queue context information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; + obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); + obj->cnt = txq_num; + obj->base = 0; + size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ); + obj->size = BIT_ULL(size_exp); + + /* validate values requested by driver don't exceed HMC capacity */ + if (txq_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + txq_num, obj->max_cnt, ret_code); + goto init_lan_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + /* Rx queue context information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; + obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); + obj->cnt = rxq_num; + obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base + + (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt * + hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size); + obj->base = i40e_align_l2obj_base(obj->base); + size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ); + obj->size = BIT_ULL(size_exp); + + /* validate values requested by driver don't exceed HMC capacity */ + if (rxq_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + rxq_num, obj->max_cnt, ret_code); + goto init_lan_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + /* FCoE context information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; + obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX); + obj->cnt = fcoe_cntx_num; + obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base + + (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt * + hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size); + obj->base = i40e_align_l2obj_base(obj->base); + size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ); + obj->size = BIT_ULL(size_exp); + + /* validate values requested by driver don't exceed HMC capacity */ + if (fcoe_cntx_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + fcoe_cntx_num, obj->max_cnt, ret_code); + goto init_lan_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + /* FCoE filter information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; + obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX); + obj->cnt = fcoe_filt_num; + obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base + + (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt * + hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size); + obj->base = i40e_align_l2obj_base(obj->base); + size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ); + obj->size = BIT_ULL(size_exp); + + /* validate values requested by driver don't exceed HMC capacity */ + if (fcoe_filt_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + fcoe_filt_num, obj->max_cnt, ret_code); + goto init_lan_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + hw->hmc.first_sd_index = 0; + hw->hmc.sd_table.ref_cnt = 0; + l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num, + fcoe_filt_num); + if (NULL == hw->hmc.sd_table.sd_entry) { + hw->hmc.sd_table.sd_cnt = (u32) + (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) / + I40E_HMC_DIRECT_BP_SIZE; + + /* allocate the sd_entry members in the sd_table */ + ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr, + (sizeof(struct i40e_hmc_sd_entry) * + hw->hmc.sd_table.sd_cnt)); + if (ret_code) + goto init_lan_hmc_out; + hw->hmc.sd_table.sd_entry = + (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va; + } + /* store in the LAN full object for later */ + full_obj->size = l2fpm_size; + +init_lan_hmc_out: + return ret_code; +} + +/** + * i40e_remove_pd_page - Remove a page from the page descriptor table + * @hw: pointer to the HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @idx: segment descriptor index to find the relevant page descriptor + * + * This function: + * 1. Marks the entry in pd table (for paged address mode) invalid + * 2. write to register PMPDINV to invalidate the backing page in FV cache + * 3. Decrement the ref count for pd_entry + * assumptions: + * 1. caller can deallocate the memory used by pd after this function + * returns. + **/ +STATIC enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS) + ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true); + + return ret_code; +} + +/** + * i40e_remove_sd_bp - remove a backing page from a segment descriptor + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + * + * This function: + * 1. Marks the entry in sd table (for direct address mode) invalid + * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set + * to 0) and PMSDDATAHIGH to invalidate the sd page + * 3. Decrement the ref count for the sd_entry + * assumptions: + * 1. caller can deallocate the memory used by backing storage after this + * function returns. + **/ +STATIC enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + + if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS) + ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true); + + return ret_code; +} + +/** + * i40e_create_lan_hmc_object - allocate backing store for hmc objects + * @hw: pointer to the HW structure + * @info: pointer to i40e_hmc_create_obj_info struct + * + * This will allocate memory for PDs and backing pages and populate + * the sd and pd entries. + **/ +enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw, + struct i40e_hmc_lan_create_obj_info *info) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_hmc_sd_entry *sd_entry; + u32 pd_idx1 = 0, pd_lmt1 = 0; + u32 pd_idx = 0, pd_lmt = 0; + bool pd_error = false; + u32 sd_idx, sd_lmt; + u64 sd_size; + u32 i, j; + + if (NULL == info) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n"); + goto exit; + } + if (NULL == info->hmc_info) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n"); + goto exit; + } + if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n"); + goto exit; + } + + if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + if ((info->start_idx + info->count) > + info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + + /* find sd index and limit */ + I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, + &sd_idx, &sd_lmt); + if (sd_idx >= info->hmc_info->sd_table.sd_cnt || + sd_lmt > info->hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_SD_INDEX; + goto exit; + } + /* find pd index */ + I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, &pd_idx, + &pd_lmt); + + /* This is to cover for cases where you may not want to have an SD with + * the full 2M memory but something smaller. By not filling out any + * size, the function will default the SD size to be 2M. + */ + if (info->direct_mode_sz == 0) + sd_size = I40E_HMC_DIRECT_BP_SIZE; + else + sd_size = info->direct_mode_sz; + + /* check if all the sds are valid. If not, allocate a page and + * initialize it. + */ + for (j = sd_idx; j < sd_lmt; j++) { + /* update the sd table entry */ + ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j, + info->entry_type, + sd_size); + if (I40E_SUCCESS != ret_code) + goto exit_sd_error; + sd_entry = &info->hmc_info->sd_table.sd_entry[j]; + if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { + /* check if all the pds in this sd are valid. If not, + * allocate a page and initialize it. + */ + + /* find pd_idx and pd_lmt in this sd */ + pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT)); + pd_lmt1 = min(pd_lmt, + ((j + 1) * I40E_HMC_MAX_BP_COUNT)); + for (i = pd_idx1; i < pd_lmt1; i++) { + /* update the pd table entry */ + ret_code = i40e_add_pd_table_entry(hw, + info->hmc_info, + i, NULL); + if (I40E_SUCCESS != ret_code) { + pd_error = true; + break; + } + } + if (pd_error) { + /* remove the backing pages from pd_idx1 to i */ + while (i && (i > pd_idx1)) { + i40e_remove_pd_bp(hw, info->hmc_info, + (i - 1)); + i--; + } + } + } + if (!sd_entry->valid) { + sd_entry->valid = true; + switch (sd_entry->entry_type) { + case I40E_SD_TYPE_PAGED: + I40E_SET_PF_SD_ENTRY(hw, + sd_entry->u.pd_table.pd_page_addr.pa, + j, sd_entry->entry_type); + break; + case I40E_SD_TYPE_DIRECT: + I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa, + j, sd_entry->entry_type); + break; + default: + ret_code = I40E_ERR_INVALID_SD_TYPE; + goto exit; + } + } + } + goto exit; + +exit_sd_error: + /* cleanup for sd entries from j to sd_idx */ + while (j && (j > sd_idx)) { + sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1]; + switch (sd_entry->entry_type) { + case I40E_SD_TYPE_PAGED: + pd_idx1 = max(pd_idx, + ((j - 1) * I40E_HMC_MAX_BP_COUNT)); + pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); + for (i = pd_idx1; i < pd_lmt1; i++) + i40e_remove_pd_bp(hw, info->hmc_info, i); + i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); + break; + case I40E_SD_TYPE_DIRECT: + i40e_remove_sd_bp(hw, info->hmc_info, (j - 1)); + break; + default: + ret_code = I40E_ERR_INVALID_SD_TYPE; + break; + } + j--; + } +exit: + return ret_code; +} + +/** + * i40e_configure_lan_hmc - prepare the HMC backing store + * @hw: pointer to the hw structure + * @model: the model for the layout of the SD/PD tables + * + * - This function will be called once per physical function initialization. + * - This function will be called after i40e_init_lan_hmc() and before + * any LAN/FCoE HMC objects can be created. + **/ +enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw, + enum i40e_hmc_model model) +{ + struct i40e_hmc_lan_create_obj_info info; + u8 hmc_fn_id = hw->hmc.hmc_fn_id; + struct i40e_hmc_obj_info *obj; + enum i40e_status_code ret_code = I40E_SUCCESS; + + /* Initialize part of the create object info struct */ + info.hmc_info = &hw->hmc; + info.rsrc_type = I40E_HMC_LAN_FULL; + info.start_idx = 0; + info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size; + + /* Build the SD entry for the LAN objects */ + switch (model) { + case I40E_HMC_MODEL_DIRECT_PREFERRED: + case I40E_HMC_MODEL_DIRECT_ONLY: + info.entry_type = I40E_SD_TYPE_DIRECT; + /* Make one big object, a single SD */ + info.count = 1; + ret_code = i40e_create_lan_hmc_object(hw, &info); + if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) + goto try_type_paged; + else if (ret_code != I40E_SUCCESS) + goto configure_lan_hmc_out; + /* else clause falls through the break */ + break; + case I40E_HMC_MODEL_PAGED_ONLY: +try_type_paged: + info.entry_type = I40E_SD_TYPE_PAGED; + /* Make one big object in the PD table */ + info.count = 1; + ret_code = i40e_create_lan_hmc_object(hw, &info); + if (ret_code != I40E_SUCCESS) + goto configure_lan_hmc_out; + break; + default: + /* unsupported type */ + ret_code = I40E_ERR_INVALID_SD_TYPE; + DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n", + ret_code); + goto configure_lan_hmc_out; + } + + /* Configure and program the FPM registers so objects can be created */ + + /* Tx contexts */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; + wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt); + + /* Rx contexts */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; + wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt); + + /* FCoE contexts */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; + wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt); + + /* FCoE filters */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; + wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt); + +configure_lan_hmc_out: + return ret_code; +} + +/** + * i40e_delete_hmc_object - remove hmc objects + * @hw: pointer to the HW structure + * @info: pointer to i40e_hmc_delete_obj_info struct + * + * This will de-populate the SDs and PDs. It frees + * the memory for PDS and backing storage. After this function is returned, + * caller should deallocate memory allocated previously for + * book-keeping information about PDs and backing storage. + **/ +enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw, + struct i40e_hmc_lan_delete_obj_info *info) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_hmc_pd_table *pd_table; + u32 pd_idx, pd_lmt, rel_pd_idx; + u32 sd_idx, sd_lmt; + u32 i, j; + + if (NULL == info) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n"); + goto exit; + } + if (NULL == info->hmc_info) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n"); + goto exit; + } + if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n"); + goto exit; + } + + if (NULL == info->hmc_info->sd_table.sd_entry) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n"); + goto exit; + } + + if (NULL == info->hmc_info->hmc_obj) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n"); + goto exit; + } + if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + + if ((info->start_idx + info->count) > + info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + + I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, &pd_idx, + &pd_lmt); + + for (j = pd_idx; j < pd_lmt; j++) { + sd_idx = j / I40E_HMC_PD_CNT_IN_SD; + + if (I40E_SD_TYPE_PAGED != + info->hmc_info->sd_table.sd_entry[sd_idx].entry_type) + continue; + + rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD; + + pd_table = + &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + if (pd_table->pd_entry[rel_pd_idx].valid) { + ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j); + if (I40E_SUCCESS != ret_code) + goto exit; + } + } + + /* find sd index and limit */ + I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, + &sd_idx, &sd_lmt); + if (sd_idx >= info->hmc_info->sd_table.sd_cnt || + sd_lmt > info->hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_SD_INDEX; + goto exit; + } + + for (i = sd_idx; i < sd_lmt; i++) { + if (!info->hmc_info->sd_table.sd_entry[i].valid) + continue; + switch (info->hmc_info->sd_table.sd_entry[i].entry_type) { + case I40E_SD_TYPE_DIRECT: + ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i); + if (I40E_SUCCESS != ret_code) + goto exit; + break; + case I40E_SD_TYPE_PAGED: + ret_code = i40e_remove_pd_page(hw, info->hmc_info, i); + if (I40E_SUCCESS != ret_code) + goto exit; + break; + default: + break; + } + } +exit: + return ret_code; +} + +/** + * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory + * @hw: pointer to the hw structure + * + * This must be called by drivers as they are shutting down and being + * removed from the OS. + **/ +enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw) +{ + struct i40e_hmc_lan_delete_obj_info info; + enum i40e_status_code ret_code; + + info.hmc_info = &hw->hmc; + info.rsrc_type = I40E_HMC_LAN_FULL; + info.start_idx = 0; + info.count = 1; + + /* delete the object */ + ret_code = i40e_delete_lan_hmc_object(hw, &info); + + /* free the SD table entry for LAN */ + i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr); + hw->hmc.sd_table.sd_cnt = 0; + hw->hmc.sd_table.sd_entry = NULL; + + /* free memory used for hmc_obj */ + i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem); + hw->hmc.hmc_obj = NULL; + + return ret_code; +} + +#define I40E_HMC_STORE(_struct, _ele) \ + offsetof(struct _struct, _ele), \ + FIELD_SIZEOF(struct _struct, _ele) + +struct i40e_context_ele { + u16 offset; + u16 size_of; + u16 width; + u16 lsb; +}; + +/* LAN Tx Queue Context */ +static struct i40e_context_ele i40e_hmc_txq_ce_info[] = { + /* Field Width LSB */ + {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 }, +/* line 1 */ + {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 }, +/* line 7 */ + {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) }, + { 0 } +}; + +/* LAN Rx Queue Context */ +static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = { + /* Field Width LSB */ + { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 }, + { 0 } +}; + +/** + * i40e_write_byte - replace HMC context byte + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_byte(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u8 src_byte, dest_byte, mask; + u8 *from, *dest; + u16 shift_width; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = (u8)(BIT(ce_info->width) - 1); + + src_byte = *from; + src_byte &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_byte <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA); + + dest_byte &= ~mask; /* get the bits not changing */ + dest_byte |= src_byte; /* add in the new bits */ + + /* put it all back */ + i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA); +} + +/** + * i40e_write_word - replace HMC context word + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_word(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u16 src_word, mask; + u8 *from, *dest; + u16 shift_width; + __le16 dest_word; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = BIT(ce_info->width) - 1; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_word = *(u16 *)from; + src_word &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_word <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA); + + dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */ + dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */ + + /* put it all back */ + i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA); +} + +/** + * i40e_write_dword - replace HMC context dword + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_dword(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u32 src_dword, mask; + u8 *from, *dest; + u16 shift_width; + __le32 dest_dword; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 32 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 5 bits so the shift will do nothing + */ + if (ce_info->width < 32) + mask = BIT(ce_info->width) - 1; + else + mask = ~(u32)0; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_dword = *(u32 *)from; + src_dword &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_dword <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA); + + dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */ + dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */ + + /* put it all back */ + i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA); +} + +/** + * i40e_write_qword - replace HMC context qword + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_qword(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u64 src_qword, mask; + u8 *from, *dest; + u16 shift_width; + __le64 dest_qword; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 64 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 6 bits so the shift will do nothing + */ + if (ce_info->width < 64) + mask = BIT_ULL(ce_info->width) - 1; + else + mask = ~(u64)0; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_qword = *(u64 *)from; + src_qword &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_qword <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA); + + dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */ + dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */ + + /* put it all back */ + i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA); +} + +/** + * i40e_read_byte - read HMC context byte into struct + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be filled + * @dest: the struct to be filled + **/ +static void i40e_read_byte(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *dest) +{ + u8 dest_byte, mask; + u8 *src, *target; + u16 shift_width; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = (u8)(BIT(ce_info->width) - 1); + + /* shift to correct alignment */ + mask <<= shift_width; + + /* get the current bits from the src bit string */ + src = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA); + + dest_byte &= ~(mask); + + dest_byte >>= shift_width; + + /* get the address from the struct field */ + target = dest + ce_info->offset; + + /* put it back in the struct */ + i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA); +} + +/** + * i40e_read_word - read HMC context word into struct + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be filled + * @dest: the struct to be filled + **/ +static void i40e_read_word(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *dest) +{ + u16 dest_word, mask; + u8 *src, *target; + u16 shift_width; + __le16 src_word; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = BIT(ce_info->width) - 1; + + /* shift to correct alignment */ + mask <<= shift_width; + + /* get the current bits from the src bit string */ + src = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA); + + /* the data in the memory is stored as little endian so mask it + * correctly + */ + src_word &= ~(CPU_TO_LE16(mask)); + + /* get the data back into host order before shifting */ + dest_word = LE16_TO_CPU(src_word); + + dest_word >>= shift_width; + + /* get the address from the struct field */ + target = dest + ce_info->offset; + + /* put it back in the struct */ + i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA); +} + +/** + * i40e_read_dword - read HMC context dword into struct + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be filled + * @dest: the struct to be filled + **/ +static void i40e_read_dword(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *dest) +{ + u32 dest_dword, mask; + u8 *src, *target; + u16 shift_width; + __le32 src_dword; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 32 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 5 bits so the shift will do nothing + */ + if (ce_info->width < 32) + mask = BIT(ce_info->width) - 1; + else + mask = ~(u32)0; + + /* shift to correct alignment */ + mask <<= shift_width; + + /* get the current bits from the src bit string */ + src = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA); + + /* the data in the memory is stored as little endian so mask it + * correctly + */ + src_dword &= ~(CPU_TO_LE32(mask)); + + /* get the data back into host order before shifting */ + dest_dword = LE32_TO_CPU(src_dword); + + dest_dword >>= shift_width; + + /* get the address from the struct field */ + target = dest + ce_info->offset; + + /* put it back in the struct */ + i40e_memcpy(target, &dest_dword, sizeof(dest_dword), + I40E_NONDMA_TO_DMA); +} + +/** + * i40e_read_qword - read HMC context qword into struct + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be filled + * @dest: the struct to be filled + **/ +static void i40e_read_qword(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *dest) +{ + u64 dest_qword, mask; + u8 *src, *target; + u16 shift_width; + __le64 src_qword; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 64 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 6 bits so the shift will do nothing + */ + if (ce_info->width < 64) + mask = BIT_ULL(ce_info->width) - 1; + else + mask = ~(u64)0; + + /* shift to correct alignment */ + mask <<= shift_width; + + /* get the current bits from the src bit string */ + src = hmc_bits + (ce_info->lsb / 8); + + i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA); + + /* the data in the memory is stored as little endian so mask it + * correctly + */ + src_qword &= ~(CPU_TO_LE64(mask)); + + /* get the data back into host order before shifting */ + dest_qword = LE64_TO_CPU(src_qword); + + dest_qword >>= shift_width; + + /* get the address from the struct field */ + target = dest + ce_info->offset; + + /* put it back in the struct */ + i40e_memcpy(target, &dest_qword, sizeof(dest_qword), + I40E_NONDMA_TO_DMA); +} + +/** + * i40e_get_hmc_context - extract HMC context bits + * @context_bytes: pointer to the context bit array + * @ce_info: a description of the struct to be filled + * @dest: the struct to be filled + **/ +static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes, + struct i40e_context_ele *ce_info, + u8 *dest) +{ + int f; + + for (f = 0; ce_info[f].width != 0; f++) { + switch (ce_info[f].size_of) { + case 1: + i40e_read_byte(context_bytes, &ce_info[f], dest); + break; + case 2: + i40e_read_word(context_bytes, &ce_info[f], dest); + break; + case 4: + i40e_read_dword(context_bytes, &ce_info[f], dest); + break; + case 8: + i40e_read_qword(context_bytes, &ce_info[f], dest); + break; + default: + /* nothing to do, just keep going */ + break; + } + } + + return I40E_SUCCESS; +} + +/** + * i40e_clear_hmc_context - zero out the HMC context bits + * @hw: the hardware struct + * @context_bytes: pointer to the context bit array (DMA memory) + * @hmc_type: the type of HMC resource + **/ +static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw, + u8 *context_bytes, + enum i40e_hmc_lan_rsrc_type hmc_type) +{ + /* clean the bit array */ + i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size, + I40E_DMA_MEM); + + return I40E_SUCCESS; +} + +/** + * i40e_set_hmc_context - replace HMC context bits + * @context_bytes: pointer to the context bit array + * @ce_info: a description of the struct to be filled + * @dest: the struct to be filled + **/ +static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes, + struct i40e_context_ele *ce_info, + u8 *dest) +{ + int f; + + for (f = 0; ce_info[f].width != 0; f++) { + + /* we have to deal with each element of the HMC using the + * correct size so that we are correct regardless of the + * endianness of the machine + */ + switch (ce_info[f].size_of) { + case 1: + i40e_write_byte(context_bytes, &ce_info[f], dest); + break; + case 2: + i40e_write_word(context_bytes, &ce_info[f], dest); + break; + case 4: + i40e_write_dword(context_bytes, &ce_info[f], dest); + break; + case 8: + i40e_write_qword(context_bytes, &ce_info[f], dest); + break; + } + } + + return I40E_SUCCESS; +} + +/** + * i40e_hmc_get_object_va - retrieves an object's virtual address + * @hw: pointer to the hw structure + * @object_base: pointer to u64 to get the va + * @rsrc_type: the hmc resource type + * @obj_idx: hmc object index + * + * This function retrieves the object's virtual address from the object + * base pointer. This function is used for LAN Queue contexts. + **/ +STATIC +enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw, + u8 **object_base, + enum i40e_hmc_lan_rsrc_type rsrc_type, + u32 obj_idx) +{ + u32 obj_offset_in_sd, obj_offset_in_pd; + struct i40e_hmc_info *hmc_info = &hw->hmc; + struct i40e_hmc_sd_entry *sd_entry; + struct i40e_hmc_pd_entry *pd_entry; + u32 pd_idx, pd_lmt, rel_pd_idx; + enum i40e_status_code ret_code = I40E_SUCCESS; + u64 obj_offset_in_fpm; + u32 sd_idx, sd_lmt; + + if (NULL == hmc_info) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n"); + goto exit; + } + if (NULL == hmc_info->hmc_obj) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n"); + goto exit; + } + if (NULL == object_base) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n"); + goto exit; + } + if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) { + ret_code = I40E_ERR_BAD_PTR; + DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n"); + goto exit; + } + if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) { + DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n", + ret_code); + ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + goto exit; + } + /* find sd index and limit */ + I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, + &sd_idx, &sd_lmt); + + sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; + obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base + + hmc_info->hmc_obj[rsrc_type].size * obj_idx; + + if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { + I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, + &pd_idx, &pd_lmt); + rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD; + pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx]; + obj_offset_in_pd = (u32)(obj_offset_in_fpm % + I40E_HMC_PAGED_BP_SIZE); + *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd; + } else { + obj_offset_in_sd = (u32)(obj_offset_in_fpm % + I40E_HMC_DIRECT_BP_SIZE); + *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd; + } +exit: + return ret_code; +} + +/** + * i40e_get_lan_tx_queue_context - return the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + * @s: the struct to be filled + **/ +enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s) +{ + enum i40e_status_code err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); + if (err < 0) + return err; + + return i40e_get_hmc_context(context_bytes, + i40e_hmc_txq_ce_info, (u8 *)s); +} + +/** + * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + **/ +enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue) +{ + enum i40e_status_code err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); + if (err < 0) + return err; + + return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX); +} + +/** + * i40e_set_lan_tx_queue_context - set the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + * @s: the struct to be filled + **/ +enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s) +{ + enum i40e_status_code err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue); + if (err < 0) + return err; + + return i40e_set_hmc_context(context_bytes, + i40e_hmc_txq_ce_info, (u8 *)s); +} + +/** + * i40e_get_lan_rx_queue_context - return the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + * @s: the struct to be filled + **/ +enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s) +{ + enum i40e_status_code err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); + if (err < 0) + return err; + + return i40e_get_hmc_context(context_bytes, + i40e_hmc_rxq_ce_info, (u8 *)s); +} + +/** + * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + **/ +enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue) +{ + enum i40e_status_code err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); + if (err < 0) + return err; + + return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX); +} + +/** + * i40e_set_lan_rx_queue_context - set the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + * @s: the struct to be filled + **/ +enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s) +{ + enum i40e_status_code err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue); + if (err < 0) + return err; + + return i40e_set_hmc_context(context_bytes, + i40e_hmc_rxq_ce_info, (u8 *)s); +} diff --git a/drivers/net/i40e/base/i40e_lan_hmc.h b/drivers/net/i40e/base/i40e_lan_hmc.h new file mode 100644 index 00000000..b2a43104 --- /dev/null +++ b/drivers/net/i40e/base/i40e_lan_hmc.h @@ -0,0 +1,200 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _I40E_LAN_HMC_H_ +#define _I40E_LAN_HMC_H_ + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; + +/* HMC element context information */ + +/* Rx queue context data + * + * The sizes of the variables may be larger than needed due to crossing byte + * boundaries. If we do not have the width of the variable set to the correct + * size then we could end up shifting bits off the top of the variable when the + * variable is at the top of a byte and crosses over into the next byte. + */ +struct i40e_hmc_obj_rxq { + u16 head; + u16 cpuid; /* bigger than needed, see above for reason */ + u64 base; + u16 qlen; +#define I40E_RXQ_CTX_DBUFF_SHIFT 7 + u16 dbuff; /* bigger than needed, see above for reason */ +#define I40E_RXQ_CTX_HBUFF_SHIFT 6 + u16 hbuff; /* bigger than needed, see above for reason */ + u8 dtype; + u8 dsize; + u8 crcstrip; + u8 fc_ena; + u8 l2tsel; + u8 hsplit_0; + u8 hsplit_1; + u8 showiv; + u32 rxmax; /* bigger than needed, see above for reason */ + u8 tphrdesc_ena; + u8 tphwdesc_ena; + u8 tphdata_ena; + u8 tphhead_ena; + u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 prefena; /* NOTE: normally must be set to 1 at init */ +}; + +/* Tx queue context data +* +* The sizes of the variables may be larger than needed due to crossing byte +* boundaries. If we do not have the width of the variable set to the correct +* size then we could end up shifting bits off the top of the variable when the +* variable is at the top of a byte and crosses over into the next byte. +*/ +struct i40e_hmc_obj_txq { + u16 head; + u8 new_context; + u64 base; + u8 fc_ena; + u8 timesync_ena; + u8 fd_ena; + u8 alt_vlan_ena; + u16 thead_wb; + u8 cpuid; + u8 head_wb_ena; + u16 qlen; + u8 tphrdesc_ena; + u8 tphrpacket_ena; + u8 tphwdesc_ena; + u64 head_wb_addr; + u32 crc; + u16 rdylist; + u8 rdylist_act; +}; + +/* for hsplit_0 field of Rx HMC context */ +enum i40e_hmc_obj_rx_hsplit_0 { + I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8, +}; + +/* fcoe_cntx and fcoe_filt are for debugging purpose only */ +struct i40e_hmc_obj_fcoe_cntx { + u32 rsv[32]; +}; + +struct i40e_hmc_obj_fcoe_filt { + u32 rsv[8]; +}; + +/* Context sizes for LAN objects */ +enum i40e_hmc_lan_object_size { + I40E_HMC_LAN_OBJ_SZ_8 = 0x3, + I40E_HMC_LAN_OBJ_SZ_16 = 0x4, + I40E_HMC_LAN_OBJ_SZ_32 = 0x5, + I40E_HMC_LAN_OBJ_SZ_64 = 0x6, + I40E_HMC_LAN_OBJ_SZ_128 = 0x7, + I40E_HMC_LAN_OBJ_SZ_256 = 0x8, + I40E_HMC_LAN_OBJ_SZ_512 = 0x9, +}; + +#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512 +#define I40E_HMC_OBJ_SIZE_TXQ 128 +#define I40E_HMC_OBJ_SIZE_RXQ 32 +#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64 +#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64 + +enum i40e_hmc_lan_rsrc_type { + I40E_HMC_LAN_FULL = 0, + I40E_HMC_LAN_TX = 1, + I40E_HMC_LAN_RX = 2, + I40E_HMC_FCOE_CTX = 3, + I40E_HMC_FCOE_FILT = 4, + I40E_HMC_LAN_MAX = 5 +}; + +enum i40e_hmc_model { + I40E_HMC_MODEL_DIRECT_PREFERRED = 0, + I40E_HMC_MODEL_DIRECT_ONLY = 1, + I40E_HMC_MODEL_PAGED_ONLY = 2, + I40E_HMC_MODEL_UNKNOWN, +}; + +struct i40e_hmc_lan_create_obj_info { + struct i40e_hmc_info *hmc_info; + u32 rsrc_type; + u32 start_idx; + u32 count; + enum i40e_sd_entry_type entry_type; + u64 direct_mode_sz; +}; + +struct i40e_hmc_lan_delete_obj_info { + struct i40e_hmc_info *hmc_info; + u32 rsrc_type; + u32 start_idx; + u32 count; +}; + +enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, + u32 rxq_num, u32 fcoe_cntx_num, + u32 fcoe_filt_num); +enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw, + enum i40e_hmc_model model); +enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw); + +u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num, + u32 fcoe_cntx_num, u32 fcoe_filt_num); +enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s); +enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue); +enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s); +enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s); +enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue); +enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s); +enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw, + struct i40e_hmc_lan_create_obj_info *info); +enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw, + struct i40e_hmc_lan_delete_obj_info *info); + +#endif /* _I40E_LAN_HMC_H_ */ diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c new file mode 100644 index 00000000..f4e4eaa4 --- /dev/null +++ b/drivers/net/i40e/base/i40e_nvm.c @@ -0,0 +1,1571 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "i40e_prototype.h" + +enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, + u16 *data); +enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, + u16 *data); +enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data); +enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data); +enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 words, void *data, + bool last_command); + +/** + * i40e_init_nvm_ops - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Setup the function pointers and the NVM info structure. Should be called + * once per NVM initialization, e.g. inside the i40e_init_shared_code(). + * Please notice that the NVM term is used here (& in all methods covered + * in this file) as an equivalent of the FLASH part mapped into the SR. + * We are accessing FLASH always through the Shadow RAM. + **/ +enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw) +{ + struct i40e_nvm_info *nvm = &hw->nvm; + enum i40e_status_code ret_code = I40E_SUCCESS; + u32 fla, gens; + u8 sr_size; + + DEBUGFUNC("i40e_init_nvm"); + + /* The SR size is stored regardless of the nvm programming mode + * as the blank mode may be used in the factory line. + */ + gens = rd32(hw, I40E_GLNVM_GENS); + sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> + I40E_GLNVM_GENS_SR_SIZE_SHIFT); + /* Switching to words (sr_size contains power of 2KB) */ + nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB; + + /* Check if we are in the normal or blank NVM programming mode */ + fla = rd32(hw, I40E_GLNVM_FLA); + if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ + /* Max NVM timeout */ + nvm->timeout = I40E_MAX_NVM_TIMEOUT; + nvm->blank_nvm_mode = false; + } else { /* Blank programming mode */ + nvm->blank_nvm_mode = true; + ret_code = I40E_ERR_NVM_BLANK_MODE; + i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); + } + + return ret_code; +} + +/** + * i40e_acquire_nvm - Generic request for acquiring the NVM ownership + * @hw: pointer to the HW structure + * @access: NVM access type (read or write) + * + * This function will request NVM ownership for reading + * via the proper Admin Command. + **/ +enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw, + enum i40e_aq_resource_access_type access) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u64 gtime, timeout; + u64 time_left = 0; + + DEBUGFUNC("i40e_acquire_nvm"); + + if (hw->nvm.blank_nvm_mode) + goto i40e_i40e_acquire_nvm_exit; + + ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, + 0, &time_left, NULL); + /* Reading the Global Device Timer */ + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + + /* Store the timeout */ + hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; + + if (ret_code) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", + access, time_left, ret_code, hw->aq.asq_last_status); + + if (ret_code && time_left) { + /* Poll until the current NVM owner timeouts */ + timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; + while ((gtime < timeout) && time_left) { + i40e_msec_delay(10); + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + ret_code = i40e_aq_request_resource(hw, + I40E_NVM_RESOURCE_ID, + access, 0, &time_left, + NULL); + if (ret_code == I40E_SUCCESS) { + hw->nvm.hw_semaphore_timeout = + I40E_MS_TO_GTIME(time_left) + gtime; + break; + } + } + if (ret_code != I40E_SUCCESS) { + hw->nvm.hw_semaphore_timeout = 0; + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", + time_left, ret_code, hw->aq.asq_last_status); + } + } + +i40e_i40e_acquire_nvm_exit: + return ret_code; +} + +/** + * i40e_release_nvm - Generic request for releasing the NVM ownership + * @hw: pointer to the HW structure + * + * This function will release NVM resource via the proper Admin Command. + **/ +void i40e_release_nvm(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u32 total_delay = 0; + + DEBUGFUNC("i40e_release_nvm"); + + if (hw->nvm.blank_nvm_mode) + return; + + ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + + /* there are some rare cases when trying to release the resource + * results in an admin Q timeout, so handle them correctly + */ + while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) && + (total_delay < hw->aq.asq_cmd_timeout)) { + i40e_msec_delay(1); + ret_code = i40e_aq_release_resource(hw, + I40E_NVM_RESOURCE_ID, 0, NULL); + total_delay++; + } +} + +/** + * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit + * @hw: pointer to the HW structure + * + * Polls the SRCTL Shadow RAM register done bit. + **/ +static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; + u32 srctl, wait_cnt; + + DEBUGFUNC("i40e_poll_sr_srctl_done_bit"); + + /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ + for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { + srctl = rd32(hw, I40E_GLNVM_SRCTL); + if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { + ret_code = I40E_SUCCESS; + break; + } + i40e_usec_delay(5); + } + if (ret_code == I40E_ERR_TIMEOUT) + i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); + return ret_code; +} + +/** + * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + +#ifdef X722_SUPPORT + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_word_aq(hw, offset, data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_word_srctl(hw, offset, data); + } +#else + ret_code = i40e_read_nvm_word_srctl(hw, offset, data); +#endif + return ret_code; +} + +/** + * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, + u16 offset, + u16 *data) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + +#ifdef X722_SUPPORT + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + ret_code = i40e_read_nvm_word_aq(hw, offset, data); + else + ret_code = i40e_read_nvm_word_srctl(hw, offset, data); +#else + ret_code = i40e_read_nvm_word_srctl(hw, offset, data); +#endif + return ret_code; +} + +/** + * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; + u32 sr_reg; + + DEBUGFUNC("i40e_read_nvm_word_srctl"); + + if (offset >= hw->nvm.sr_size) { + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM read error: Offset %d beyond Shadow RAM limit %d\n", + offset, hw->nvm.sr_size); + ret_code = I40E_ERR_PARAM; + goto read_nvm_exit; + } + + /* Poll the done bit first */ + ret_code = i40e_poll_sr_srctl_done_bit(hw); + if (ret_code == I40E_SUCCESS) { + /* Write the address and start reading */ + sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | + BIT(I40E_GLNVM_SRCTL_START_SHIFT); + wr32(hw, I40E_GLNVM_SRCTL, sr_reg); + + /* Poll I40E_GLNVM_SRCTL until the done bit is set */ + ret_code = i40e_poll_sr_srctl_done_bit(hw); + if (ret_code == I40E_SUCCESS) { + sr_reg = rd32(hw, I40E_GLNVM_SRDATA); + *data = (u16)((sr_reg & + I40E_GLNVM_SRDATA_RDDATA_MASK) + >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); + } + } + if (ret_code != I40E_SUCCESS) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", + offset); + +read_nvm_exit: + return ret_code; +} + +/** + * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; + + DEBUGFUNC("i40e_read_nvm_word_aq"); + + ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true); + *data = LE16_TO_CPU(*(__le16 *)data); + + return ret_code; +} + +/** + * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, + u16 offset, + u16 *words, u16 *data) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + +#ifdef X722_SUPPORT + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data); + else + ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); +#else + ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); +#endif + return ret_code; +} + +/** + * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + +#ifdef X722_SUPPORT + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, + data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); + } +#else + ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); +#endif + return ret_code; +} + +/** + * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u16 index, word; + + DEBUGFUNC("i40e_read_nvm_buffer_srctl"); + + /* Loop through the selected region */ + for (word = 0; word < *words; word++) { + index = offset + word; + ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); + if (ret_code != I40E_SUCCESS) + break; + } + + /* Update the number of words read from the Shadow RAM */ + *words = word; + + return ret_code; +} + +/** + * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + enum i40e_status_code ret_code; + u16 read_size = *words; + bool last_cmd = false; + u16 words_read = 0; + u16 i = 0; + + DEBUGFUNC("i40e_read_nvm_buffer_aq"); + + do { + /* Calculate number of bytes we should read in this step. + * FVL AQ do not allow to read more than one page at a time or + * to cross page boundaries. + */ + if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS) + read_size = min(*words, + (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS - + (offset % I40E_SR_SECTOR_SIZE_IN_WORDS))); + else + read_size = min((*words - words_read), + I40E_SR_SECTOR_SIZE_IN_WORDS); + + /* Check if this is last command, if so set proper flag */ + if ((words_read + read_size) >= *words) + last_cmd = true; + + ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size, + data + words_read, last_cmd); + if (ret_code != I40E_SUCCESS) + goto read_nvm_buffer_aq_exit; + + /* Increment counter for words already read and move offset to + * new read location + */ + words_read += read_size; + offset += read_size; + } while (words_read < *words); + + for (i = 0; i < *words; i++) + data[i] = LE16_TO_CPU(((__le16 *)data)[i]); + +read_nvm_buffer_aq_exit: + *words = words_read; + return ret_code; +} + +/** + * i40e_read_nvm_aq - Read Shadow RAM. + * @hw: pointer to the HW structure. + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in words from module start + * @words: number of words to write + * @data: buffer with words to write to the Shadow RAM + * @last_command: tells the AdminQ that this is the last command + * + * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + **/ +enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 words, void *data, + bool last_command) +{ + enum i40e_status_code ret_code = I40E_ERR_NVM; + struct i40e_asq_cmd_details cmd_details; + + DEBUGFUNC("i40e_read_nvm_aq"); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + /* Here we are checking the SR limit only for the flat memory model. + * We cannot do it for the module-based model, as we did not acquire + * the NVM resource yet (we cannot get the module pointer value). + * Firmware will check the module-based model. + */ + if ((offset + words) > hw->nvm.sr_size) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: offset %d beyond Shadow RAM limit %d\n", + (offset + words), hw->nvm.sr_size); + else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) + /* We can write only up to 4KB (one sector), in one AQ write */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write fail error: tried to write %d words, limit is %d.\n", + words, I40E_SR_SECTOR_SIZE_IN_WORDS); + else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) + != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) + /* A single write cannot spread over two sectors */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + offset, words); + else + ret_code = i40e_aq_read_nvm(hw, module_pointer, + 2 * offset, /*bytes*/ + 2 * words, /*bytes*/ + data, last_command, &cmd_details); + + return ret_code; +} + +/** + * i40e_write_nvm_aq - Writes Shadow RAM. + * @hw: pointer to the HW structure. + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in words from module start + * @words: number of words to write + * @data: buffer with words to write to the Shadow RAM + * @last_command: tells the AdminQ that this is the last command + * + * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + **/ +enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 words, void *data, + bool last_command) +{ + enum i40e_status_code ret_code = I40E_ERR_NVM; + struct i40e_asq_cmd_details cmd_details; + + DEBUGFUNC("i40e_write_nvm_aq"); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + /* Here we are checking the SR limit only for the flat memory model. + * We cannot do it for the module-based model, as we did not acquire + * the NVM resource yet (we cannot get the module pointer value). + * Firmware will check the module-based model. + */ + if ((offset + words) > hw->nvm.sr_size) + DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n"); + else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) + /* We can write only up to 4KB (one sector), in one AQ write */ + DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n"); + else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) + != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) + /* A single write cannot spread over two sectors */ + DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n"); + else + ret_code = i40e_aq_update_nvm(hw, module_pointer, + 2 * offset, /*bytes*/ + 2 * words, /*bytes*/ + data, last_command, &cmd_details); + + return ret_code; +} + +/** + * __i40e_write_nvm_word - Writes Shadow RAM word + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to write + * @data: word to write to the Shadow RAM + * + * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method. + * NVM ownership have to be acquired and released (on ARQ completion event + * reception) by caller. To commit SR to NVM update checksum function + * should be called. + **/ +enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset, + void *data) +{ + DEBUGFUNC("i40e_write_nvm_word"); + + *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data)); + + /* Value 0x00 below means that we treat SR as a flat mem */ + return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false); +} + +/** + * __i40e_write_nvm_buffer - Writes Shadow RAM buffer + * @hw: pointer to the HW structure + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset of the Shadow RAM buffer to write + * @words: number of words to write + * @data: words to write to the Shadow RAM + * + * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + * NVM ownership must be acquired before calling this function and released + * on ARQ completion event reception by caller. To commit SR to NVM update + * checksum function should be called. + **/ +enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw, + u8 module_pointer, u32 offset, + u16 words, void *data) +{ + __le16 *le_word_ptr = (__le16 *)data; + u16 *word_ptr = (u16 *)data; + u32 i = 0; + + DEBUGFUNC("i40e_write_nvm_buffer"); + + for (i = 0; i < words; i++) + le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]); + + /* Here we will only write one buffer as the size of the modules + * mirrored in the Shadow RAM is always less than 4K. + */ + return i40e_write_nvm_aq(hw, module_pointer, offset, words, + data, false); +} + +/** + * i40e_calc_nvm_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @checksum: pointer to the checksum + * + * This function calculates SW Checksum that covers the whole 64kB shadow RAM + * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD + * is customer specific and unknown. Therefore, this function skips all maximum + * possible size of VPD (1kB). + **/ +enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + struct i40e_virt_mem vmem; + u16 pcie_alt_module = 0; + u16 checksum_local = 0; + u16 vpd_module = 0; + u16 *data; + u16 i = 0; + + DEBUGFUNC("i40e_calc_nvm_checksum"); + + ret_code = i40e_allocate_virt_mem(hw, &vmem, + I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); + if (ret_code) + goto i40e_calc_nvm_checksum_exit; + data = (u16 *)vmem.va; + + /* read pointer to VPD area */ + ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, + &vpd_module); + if (ret_code != I40E_SUCCESS) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + + /* read pointer to PCIe Alt Auto-load module */ + ret_code = __i40e_read_nvm_word(hw, + I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, + &pcie_alt_module); + if (ret_code != I40E_SUCCESS) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + + /* Calculate SW checksum that covers the whole 64kB shadow RAM + * except the VPD and PCIe ALT Auto-load modules + */ + for (i = 0; i < hw->nvm.sr_size; i++) { + /* Read SR page */ + if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { + u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; + + ret_code = __i40e_read_nvm_buffer(hw, i, &words, data); + if (ret_code != I40E_SUCCESS) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + } + + /* Skip Checksum word */ + if (i == I40E_SR_SW_CHECKSUM_WORD) + continue; + /* Skip VPD module (convert byte size to word count) */ + if ((i >= (u32)vpd_module) && + (i < ((u32)vpd_module + + (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { + continue; + } + /* Skip PCIe ALT module (convert byte size to word count) */ + if ((i >= (u32)pcie_alt_module) && + (i < ((u32)pcie_alt_module + + (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { + continue; + } + + checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; + } + + *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; + +i40e_calc_nvm_checksum_exit: + i40e_free_virt_mem(hw, &vmem); + return ret_code; +} + +/** + * i40e_update_nvm_checksum - Updates the NVM checksum + * @hw: pointer to hardware structure + * + * NVM ownership must be acquired before calling this function and released + * on ARQ completion event reception by caller. + * This function will commit SR to NVM. + **/ +enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u16 checksum; + __le16 le_sum; + + DEBUGFUNC("i40e_update_nvm_checksum"); + + ret_code = i40e_calc_nvm_checksum(hw, &checksum); + le_sum = CPU_TO_LE16(checksum); + if (ret_code == I40E_SUCCESS) + ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, + 1, &le_sum, true); + + return ret_code; +} + +/** + * i40e_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum: calculated checksum + * + * Performs checksum calculation and validates the NVM SW checksum. If the + * caller does not need checksum, the value can be NULL. + **/ +enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw, + u16 *checksum) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; + u16 checksum_sr = 0; + u16 checksum_local = 0; + + DEBUGFUNC("i40e_validate_nvm_checksum"); + + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + i40e_release_nvm(hw); + if (ret_code != I40E_SUCCESS) + goto i40e_validate_nvm_checksum_exit; + } else { + goto i40e_validate_nvm_checksum_exit; + } + + i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (checksum_local != checksum_sr) + ret_code = I40E_ERR_NVM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum) + *checksum = checksum_local; + +i40e_validate_nvm_checksum_exit: + return ret_code; +} + +STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); +STATIC INLINE u8 i40e_nvmupd_get_module(u32 val) +{ + return (u8)(val & I40E_NVM_MOD_PNT_MASK); +} +STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val) +{ + return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); +} + +STATIC const char *i40e_nvm_update_state_str[] = { + "I40E_NVMUPD_INVALID", + "I40E_NVMUPD_READ_CON", + "I40E_NVMUPD_READ_SNT", + "I40E_NVMUPD_READ_LCB", + "I40E_NVMUPD_READ_SA", + "I40E_NVMUPD_WRITE_ERA", + "I40E_NVMUPD_WRITE_CON", + "I40E_NVMUPD_WRITE_SNT", + "I40E_NVMUPD_WRITE_LCB", + "I40E_NVMUPD_WRITE_SA", + "I40E_NVMUPD_CSUM_CON", + "I40E_NVMUPD_CSUM_SA", + "I40E_NVMUPD_CSUM_LCB", + "I40E_NVMUPD_STATUS", + "I40E_NVMUPD_EXEC_AQ", + "I40E_NVMUPD_GET_AQ_RESULT", +}; + +/** + * i40e_nvmupd_command - Process an NVM update command + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * Dispatches command depending on what update state is current + **/ +enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + enum i40e_status_code status; + enum i40e_nvmupd_cmd upd_cmd; + + DEBUGFUNC("i40e_nvmupd_command"); + + /* assume success */ + *perrno = 0; + + /* early check for status command and debug msgs */ + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); + + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", + i40e_nvm_update_state_str[upd_cmd], + hw->nvmupd_state, + hw->aq.nvm_release_on_done, + cmd->command, cmd->config, cmd->offset, cmd->data_size); + + if (upd_cmd == I40E_NVMUPD_INVALID) { + *perrno = -EFAULT; + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command returns %d errno %d\n", + upd_cmd, *perrno); + } + + /* a status request returns immediately rather than + * going into the state machine + */ + if (upd_cmd == I40E_NVMUPD_STATUS) { + bytes[0] = hw->nvmupd_state; + return I40E_SUCCESS; + } + + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT: + status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_STATE_READING: + status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_STATE_WRITING: + status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_STATE_INIT_WAIT: + case I40E_NVMUPD_STATE_WRITE_WAIT: + status = I40E_ERR_NOT_READY; + *perrno = -EBUSY; + break; + + default: + /* invalid state, should never happen */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: no such state %d\n", hw->nvmupd_state); + status = I40E_NOT_SUPPORTED; + *perrno = -ESRCH; + break; + } + return status; +} + +/** + * i40e_nvmupd_state_init - Handle NVM update state Init + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * Process legitimate commands of the Init state and conditionally set next + * state. Reject all other commands. + **/ +STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + enum i40e_status_code status = I40E_SUCCESS; + enum i40e_nvmupd_cmd upd_cmd; + + DEBUGFUNC("i40e_nvmupd_state_init"); + + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); + + switch (upd_cmd) { + case I40E_NVMUPD_READ_SA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (status) { + *perrno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); + i40e_release_nvm(hw); + } + break; + + case I40E_NVMUPD_READ_SNT: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (status) { + *perrno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); + if (status) + i40e_release_nvm(hw); + else + hw->nvmupd_state = I40E_NVMUPD_STATE_READING; + } + break; + + case I40E_NVMUPD_WRITE_ERA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *perrno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_erase(hw, cmd, perrno); + if (status) { + i40e_release_nvm(hw); + } else { + hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + } + break; + + case I40E_NVMUPD_WRITE_SA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *perrno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); + if (status) { + i40e_release_nvm(hw); + } else { + hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + } + break; + + case I40E_NVMUPD_WRITE_SNT: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *perrno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); + if (status) + i40e_release_nvm(hw); + else + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; + } + break; + + case I40E_NVMUPD_CSUM_SA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *perrno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_update_nvm_checksum(hw); + if (status) { + *perrno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + i40e_release_nvm(hw); + } else { + hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + } + break; + + case I40E_NVMUPD_EXEC_AQ: + status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_GET_AQ_RESULT: + status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno); + break; + + default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in init state\n", + i40e_nvm_update_state_str[upd_cmd]); + status = I40E_ERR_NVM; + *perrno = -ESRCH; + break; + } + return status; +} + +/** + * i40e_nvmupd_state_reading - Handle NVM update state Reading + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * NVM ownership is already held. Process legitimate commands and set any + * change in state; reject all other commands. + **/ +STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + enum i40e_status_code status = I40E_SUCCESS; + enum i40e_nvmupd_cmd upd_cmd; + + DEBUGFUNC("i40e_nvmupd_state_reading"); + + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); + + switch (upd_cmd) { + case I40E_NVMUPD_READ_SA: + case I40E_NVMUPD_READ_CON: + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); + break; + + case I40E_NVMUPD_READ_LCB: + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); + i40e_release_nvm(hw); + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in reading state.\n", + i40e_nvm_update_state_str[upd_cmd]); + status = I40E_NOT_SUPPORTED; + *perrno = -ESRCH; + break; + } + return status; +} + +/** + * i40e_nvmupd_state_writing - Handle NVM update state Writing + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * NVM ownership is already held. Process legitimate commands and set any + * change in state; reject all other commands + **/ +STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + enum i40e_status_code status = I40E_SUCCESS; + enum i40e_nvmupd_cmd upd_cmd; + bool retry_attempt = false; + + DEBUGFUNC("i40e_nvmupd_state_writing"); + + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); + +retry: + switch (upd_cmd) { + case I40E_NVMUPD_WRITE_CON: + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); + if (!status) + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; + break; + + case I40E_NVMUPD_WRITE_LCB: + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); + if (status) { + *perrno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } else { + hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + break; + + case I40E_NVMUPD_CSUM_CON: + /* Assumes the caller has acquired the nvm */ + status = i40e_update_nvm_checksum(hw); + if (status) { + *perrno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } else { + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; + } + break; + + case I40E_NVMUPD_CSUM_LCB: + /* Assumes the caller has acquired the nvm */ + status = i40e_update_nvm_checksum(hw); + if (status) { + *perrno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } else { + hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + break; + + default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in writing state.\n", + i40e_nvm_update_state_str[upd_cmd]); + status = I40E_NOT_SUPPORTED; + *perrno = -ESRCH; + break; + } + + /* In some circumstances, a multi-write transaction takes longer + * than the default 3 minute timeout on the write semaphore. If + * the write failed with an EBUSY status, this is likely the problem, + * so here we try to reacquire the semaphore then retry the write. + * We only do one retry, then give up. + */ + if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && + !retry_attempt) { + enum i40e_status_code old_status = status; + u32 old_asq_status = hw->aq.asq_last_status; + u32 gtime; + + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + if (gtime >= hw->nvm.hw_semaphore_timeout) { + i40e_debug(hw, I40E_DEBUG_ALL, + "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", + gtime, hw->nvm.hw_semaphore_timeout); + i40e_release_nvm(hw); + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "NVMUPD: write semaphore reacquire failed aq_err = %d\n", + hw->aq.asq_last_status); + status = old_status; + hw->aq.asq_last_status = old_asq_status; + } else { + retry_attempt = true; + goto retry; + } + } + } + + return status; +} + +/** + * i40e_nvmupd_validate_command - Validate given command + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @perrno: pointer to return error code + * + * Return one of the valid command types or I40E_NVMUPD_INVALID + **/ +STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *perrno) +{ + enum i40e_nvmupd_cmd upd_cmd; + u8 module, transaction; + + DEBUGFUNC("i40e_nvmupd_validate_command\n"); + + /* anything that doesn't match a recognized case is an error */ + upd_cmd = I40E_NVMUPD_INVALID; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + + /* limits on data size */ + if ((cmd->data_size < 1) || + (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command data_size %d\n", + cmd->data_size); + *perrno = -EFAULT; + return I40E_NVMUPD_INVALID; + } + + switch (cmd->command) { + case I40E_NVM_READ: + switch (transaction) { + case I40E_NVM_CON: + upd_cmd = I40E_NVMUPD_READ_CON; + break; + case I40E_NVM_SNT: + upd_cmd = I40E_NVMUPD_READ_SNT; + break; + case I40E_NVM_LCB: + upd_cmd = I40E_NVMUPD_READ_LCB; + break; + case I40E_NVM_SA: + upd_cmd = I40E_NVMUPD_READ_SA; + break; + case I40E_NVM_EXEC: + if (module == 0xf) + upd_cmd = I40E_NVMUPD_STATUS; + else if (module == 0) + upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; + break; + } + break; + + case I40E_NVM_WRITE: + switch (transaction) { + case I40E_NVM_CON: + upd_cmd = I40E_NVMUPD_WRITE_CON; + break; + case I40E_NVM_SNT: + upd_cmd = I40E_NVMUPD_WRITE_SNT; + break; + case I40E_NVM_LCB: + upd_cmd = I40E_NVMUPD_WRITE_LCB; + break; + case I40E_NVM_SA: + upd_cmd = I40E_NVMUPD_WRITE_SA; + break; + case I40E_NVM_ERA: + upd_cmd = I40E_NVMUPD_WRITE_ERA; + break; + case I40E_NVM_CSUM: + upd_cmd = I40E_NVMUPD_CSUM_CON; + break; + case (I40E_NVM_CSUM|I40E_NVM_SA): + upd_cmd = I40E_NVMUPD_CSUM_SA; + break; + case (I40E_NVM_CSUM|I40E_NVM_LCB): + upd_cmd = I40E_NVMUPD_CSUM_LCB; + break; + case I40E_NVM_EXEC: + if (module == 0) + upd_cmd = I40E_NVMUPD_EXEC_AQ; + break; + } + break; + } + + return upd_cmd; +} + +/** + * i40e_nvmupd_exec_aq - Run an AQ command + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + struct i40e_asq_cmd_details cmd_details; + enum i40e_status_code status; + struct i40e_aq_desc *aq_desc; + u32 buff_size = 0; + u8 *buff = NULL; + u32 aq_desc_len; + u32 aq_data_len; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + aq_desc_len = sizeof(struct i40e_aq_desc); + memset(&hw->nvm_wb_desc, 0, aq_desc_len); + + /* get the aq descriptor */ + if (cmd->data_size < aq_desc_len) { + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", + cmd->data_size, aq_desc_len); + *perrno = -EINVAL; + return I40E_ERR_PARAM; + } + aq_desc = (struct i40e_aq_desc *)bytes; + + /* if data buffer needed, make sure it's ready */ + aq_data_len = cmd->data_size - aq_desc_len; + buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen)); + if (buff_size) { + if (!hw->nvm_buff.va) { + status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, + hw->aq.asq_buf_size); + if (status) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", + status); + } + + if (hw->nvm_buff.va) { + buff = hw->nvm_buff.va; + memcpy(buff, &bytes[aq_desc_len], aq_data_len); + } + } + + /* and away we go! */ + status = i40e_asq_send_command(hw, aq_desc, buff, + buff_size, &cmd_details); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_exec_aq err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} + +/** + * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + u32 aq_total_len; + u32 aq_desc_len; + int remainder; + u8 *buff; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + + aq_desc_len = sizeof(struct i40e_aq_desc); + aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen); + + /* check offset range */ + if (cmd->offset > aq_total_len) { + i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", + __func__, cmd->offset, aq_total_len); + *perrno = -EINVAL; + return I40E_ERR_PARAM; + } + + /* check copylength range */ + if (cmd->data_size > (aq_total_len - cmd->offset)) { + int new_len = aq_total_len - cmd->offset; + + i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", + __func__, cmd->data_size, new_len); + cmd->data_size = new_len; + } + + remainder = cmd->data_size; + if (cmd->offset < aq_desc_len) { + u32 len = aq_desc_len - cmd->offset; + + len = min(len, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", + __func__, cmd->offset, cmd->offset + len); + + buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; + memcpy(bytes, buff, len); + + bytes += len; + remainder -= len; + buff = hw->nvm_buff.va; + } else { + buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len); + } + + if (remainder > 0) { + int start_byte = buff - (u8 *)hw->nvm_buff.va; + + i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", + __func__, start_byte, start_byte + remainder); + memcpy(bytes, buff, remainder); + } + + return I40E_SUCCESS; +} + +/** + * i40e_nvmupd_nvm_read - Read NVM + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + struct i40e_asq_cmd_details cmd_details; + enum i40e_status_code status; + u8 module, transaction; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, + bytes, last, &cmd_details); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_read status %d aq %d\n", + status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} + +/** + * i40e_nvmupd_nvm_erase - Erase an NVM module + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @perrno: pointer to return error code + * + * module, offset, data_size and data are in cmd structure + **/ +STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *perrno) +{ + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_asq_cmd_details cmd_details; + u8 module, transaction; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction & I40E_NVM_LCB); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, + last, &cmd_details); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_erase status %d aq %d\n", + status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} + +/** + * i40e_nvmupd_nvm_write - Write NVM + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * module, offset, data_size and data are in cmd structure + **/ +STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_asq_cmd_details cmd_details; + u8 module, transaction; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction & I40E_NVM_LCB); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + status = i40e_aq_update_nvm(hw, module, cmd->offset, + (u16)cmd->data_size, bytes, last, + &cmd_details); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_write status %d aq %d\n", + status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} diff --git a/drivers/net/i40e/base/i40e_osdep.h b/drivers/net/i40e/base/i40e_osdep.h new file mode 100644 index 00000000..8c84ed82 --- /dev/null +++ b/drivers/net/i40e/base/i40e_osdep.h @@ -0,0 +1,233 @@ +/****************************************************************************** + + Copyright (c) 2001-2015, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +******************************************************************************/ + +#ifndef _I40E_OSDEP_H_ +#define _I40E_OSDEP_H_ + +#include <string.h> +#include <stdint.h> +#include <stdio.h> +#include <stdarg.h> + +#include <rte_common.h> +#include <rte_memcpy.h> +#include <rte_byteorder.h> +#include <rte_cycles.h> +#include <rte_spinlock.h> +#include <rte_log.h> + +#include "../i40e_logs.h" + +#define INLINE inline +#define STATIC static + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef int32_t s32; +typedef uint64_t u64; +typedef int bool; + +typedef enum i40e_status_code i40e_status; +#define __iomem +#define hw_dbg(hw, S, A...) do {} while (0) +#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) +#define lower_32_bits(n) ((u32)(n)) +#define low_16_bits(x) ((x) & 0xFFFF) +#define high_16_bits(x) (((x) & 0xFFFF0000) >> 16) + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + +#ifndef __le16 +#define __le16 uint16_t +#endif +#ifndef __le32 +#define __le32 uint32_t +#endif +#ifndef __le64 +#define __le64 uint64_t +#endif +#ifndef __be16 +#define __be16 uint16_t +#endif +#ifndef __be32 +#define __be32 uint32_t +#endif +#ifndef __be64 +#define __be64 uint64_t +#endif + +#define FALSE 0 +#define TRUE 1 +#define false 0 +#define true 1 + +#define min(a,b) RTE_MIN(a,b) +#define max(a,b) RTE_MAX(a,b) + +#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) +#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x") + +#define DEBUGOUT(S) PMD_DRV_LOG_RAW(DEBUG, S) +#define DEBUGOUT1(S, A...) PMD_DRV_LOG_RAW(DEBUG, S, ##A) + +#define DEBUGFUNC(F) DEBUGOUT(F "\n") +#define DEBUGOUT2 DEBUGOUT1 +#define DEBUGOUT3 DEBUGOUT2 +#define DEBUGOUT6 DEBUGOUT3 +#define DEBUGOUT7 DEBUGOUT6 + +#define i40e_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + PMD_DRV_LOG_RAW(DEBUG, "i40e %02x.%x " s, \ + (h)->bus.device, (h)->bus.func, \ + ##__VA_ARGS__); \ +} while (0) + +/* AQ commands based interfaces of i40e_read_rx_ctl() and i40e_write_rx_ctl() + * are required for reading/writing below registers, as reading/writing it + * directly may not function correctly if the device is under heavy small + * packet traffic. Note that those interfaces are available from FVL5 and not + * suitable before the AdminQ is ready during initialization. + * + * I40E_PFQF_CTL_0 + * I40E_PFQF_HENA + * I40E_PFQF_FDALLOC + * I40E_PFQF_HREGION + * I40E_PFLAN_QALLOC + * I40E_VPQF_CTL + * I40E_VFQF_HENA + * I40E_VFQF_HREGION + * I40E_VSIQF_CTL + * I40E_VSILAN_QBASE + * I40E_VSILAN_QTABLE + * I40E_VSIQF_TCREGION + * I40E_PFQF_HKEY + * I40E_VFQF_HKEY + * I40E_PRTQF_CTL_0 + * I40E_GLFCOE_RCTL + * I40E_GLFCOE_RSOF + * I40E_GLQF_CTL + * I40E_GLQF_SWAP + * I40E_GLQF_HASH_MSK + * I40E_GLQF_HASH_INSET + * I40E_GLQF_HSYM + * I40E_GLQF_FC_MSK + * I40E_GLQF_FC_INSET + * I40E_GLQF_FD_MSK + * I40E_PRTQF_FD_INSET + * I40E_PRTQF_FD_FLXINSET + * I40E_PRTQF_FD_MSK + */ + +#define I40E_PCI_REG(reg) (*((volatile uint32_t *)(reg))) +#define I40E_PCI_REG_ADDR(a, reg) \ + ((volatile uint32_t *)((char *)(a)->hw_addr + (reg))) +static inline uint32_t i40e_read_addr(volatile void *addr) +{ + return rte_le_to_cpu_32(I40E_PCI_REG(addr)); +} +#define I40E_PCI_REG_WRITE(reg, value) \ + do { I40E_PCI_REG((reg)) = rte_cpu_to_le_32(value); } while (0) + +#define I40E_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_GLGEN_STAT) +#define I40EVF_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_VFGEN_RSTAT) + +#define I40E_READ_REG(hw, reg) i40e_read_addr(I40E_PCI_REG_ADDR((hw), (reg))) +#define I40E_WRITE_REG(hw, reg, value) \ + I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), (reg)), (value)) + +#define rd32(a, reg) i40e_read_addr(I40E_PCI_REG_ADDR((a), (reg))) +#define wr32(a, reg, value) \ + I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((a), (reg)), (value)) +#define flush(a) i40e_read_addr(I40E_PCI_REG_ADDR((a), (I40E_GLGEN_STAT))) + +#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0])) + +/* memory allocation tracking */ +struct i40e_dma_mem { + void *va; + u64 pa; + u32 size; + const void *zone; +} __attribute__((packed)); + +#define i40e_allocate_dma_mem(h, m, unused, s, a) \ + i40e_allocate_dma_mem_d(h, m, s, a) +#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m) + +struct i40e_virt_mem { + void *va; + u32 size; +} __attribute__((packed)); + +#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s) +#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m) + +#define CPU_TO_LE16(o) rte_cpu_to_le_16(o) +#define CPU_TO_LE32(s) rte_cpu_to_le_32(s) +#define CPU_TO_LE64(h) rte_cpu_to_le_64(h) +#define LE16_TO_CPU(a) rte_le_to_cpu_16(a) +#define LE32_TO_CPU(c) rte_le_to_cpu_32(c) +#define LE64_TO_CPU(k) rte_le_to_cpu_64(k) + +/* SW spinlock */ +struct i40e_spinlock { + rte_spinlock_t spinlock; +}; + +#define i40e_init_spinlock(_sp) i40e_init_spinlock_d(_sp) +#define i40e_acquire_spinlock(_sp) i40e_acquire_spinlock_d(_sp) +#define i40e_release_spinlock(_sp) i40e_release_spinlock_d(_sp) +#define i40e_destroy_spinlock(_sp) i40e_destroy_spinlock_d(_sp) + +#define I40E_NTOHS(a) rte_be_to_cpu_16(a) +#define I40E_NTOHL(a) rte_be_to_cpu_32(a) +#define I40E_HTONS(a) rte_cpu_to_be_16(a) +#define I40E_HTONL(a) rte_cpu_to_be_32(a) + +#define i40e_memset(a, b, c, d) memset((a), (b), (c)) +#define i40e_memcpy(a, b, c, d) rte_memcpy((a), (b), (c)) + +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#define DELAY(x) rte_delay_us(x) +#define i40e_usec_delay(x) rte_delay_us(x) +#define i40e_msec_delay(x) rte_delay_us(1000*(x)) +#define udelay(x) DELAY(x) +#define msleep(x) DELAY(1000*(x)) +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) + +#endif /* _I40E_OSDEP_H_ */ diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h new file mode 100644 index 00000000..674430de --- /dev/null +++ b/drivers/net/i40e/base/i40e_prototype.h @@ -0,0 +1,551 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _I40E_PROTOTYPE_H_ +#define _I40E_PROTOTYPE_H_ + +#include "i40e_type.h" +#include "i40e_alloc.h" +#include "i40e_virtchnl.h" + +/* Prototypes for shared code functions that are not in + * the standard function pointer structures. These are + * mostly because they are needed even before the init + * has happened and will assist in the early SW and FW + * setup. + */ + +/* adminq functions */ +enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw); +enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw); +enum i40e_status_code i40e_init_asq(struct i40e_hw *hw); +enum i40e_status_code i40e_init_arq(struct i40e_hw *hw); +enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw); +enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw); +enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw); +enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw); +u16 i40e_clean_asq(struct i40e_hw *hw); +void i40e_free_adminq_asq(struct i40e_hw *hw); +void i40e_free_adminq_arq(struct i40e_hw *hw); +enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr); +void i40e_adminq_init_ring_data(struct i40e_hw *hw); +enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *events_pending); +enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +bool i40e_asq_done(struct i40e_hw *hw); + +/* debug function for adminq */ +void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, + void *desc, void *buffer, u16 buf_len); + +void i40e_idle_aq(struct i40e_hw *hw); +void i40e_resume_aq(struct i40e_hw *hw); +bool i40e_check_asq_alive(struct i40e_hw *hw); +enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); +#ifdef X722_SUPPORT + +enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +#endif +#ifndef I40E_NDIS_SUPPORT +const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err); +#endif /* I40E_NDIS_SUPPORT */ + +#ifdef PF_DRIVER + +u32 i40e_led_get(struct i40e_hw *hw); +void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); +enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on, + u16 led_addr, u32 mode); +enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, + u16 *val); +enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval); + +/* admin send queue commands */ + +enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw, + u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, + u16 *api_major_version, u16 *api_minor_version, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw, + u32 reg_addr, u64 reg_val, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw, + u32 reg_addr, u64 *reg_val, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, + bool qualified_modules, bool report_init, + struct i40e_aq_get_phy_abilities_resp *abilities, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, + struct i40e_aq_set_phy_config *config, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_reset); +enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw, + u16 max_frame_size, bool crc_en, u16 pacing, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw, + u64 *advt_reg, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw, + u64 *advt_reg, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw, + bool enable_link, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw, + bool enable_lse, struct i40e_link_status *link, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw, + u64 advt_reg, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw, + struct i40e_driver_version *dv, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, + u16 vsi_id, bool set_filter, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, u16 vid, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, + u16 seid, bool enable, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, + u16 downlink_seid, u8 enabled_tc, + bool default_port, u16 *pveb_seid, + bool enable_stats, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw, + u16 veb_seid, u16 *switch_id, bool *floating, + u16 *statistic_index, u16 *vebs_used, + u16 *vebs_free, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id, + struct i40e_aqc_add_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, + struct i40e_aqc_remove_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free); +enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rules_used, u16 *rules_free); + +enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id, + struct i40e_aqc_add_remove_vlan_element_data *v_list, + u8 count, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id, + struct i40e_aqc_add_remove_vlan_element_data *v_list, + u8 count, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, + u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw, + struct i40e_aqc_get_switch_config_resp *buf, + u16 buf_size, u16 *start_seid, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, + u16 flags, u16 valid_flags, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + enum i40e_aq_resource_access_type access, + u8 sdp_number, u64 *timeout, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + u8 sdp_number, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, bool last_command, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw, + u8 cmd_flags, u32 field_id, void *data, + u16 buf_size, u16 *element_count, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw, + u8 cmd_flags, void *data, u16 buf_size, + u16 element_count, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw, + void *buff, u16 buff_size, u16 *data_size, + enum i40e_admin_queue_opc list_type_opc, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, + u8 mib_type, void *buff, u16 buff_size, + u16 *local_len, u16 *remote_len, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw, + u8 mib_type, void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, + bool enable_update, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type, + void *buff, u16 buff_size, u16 tlv_len, + u16 *mib_len, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw, + u8 bridge_type, void *buff, u16 buff_size, + u16 old_len, u16 new_len, u16 offset, + u16 *mib_len, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw, + u8 bridge_type, void *buff, u16 buff_size, + u16 tlv_len, u16 *mib_len, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw, + bool start_agent, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw, + u16 udp_port, u8 protocol_index, + u8 *filter_index, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw, + u8 *num_entries, + struct i40e_aqc_switch_resource_alloc_element_resp *buf, + u16 count, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags, + u16 mac_seid, u16 vsi_seid, + u16 *ret_seid); +enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue, + u16 vsi_seid, u16 tag, u16 queue_num, + u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid, + u16 tag, u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pe_seid, + u16 etag, u8 num_tags_in_buf, void *buf, + u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pe_seid, + u16 etag, u16 *tags_used, u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid, + u16 old_tag, u16 new_tag, u16 *tags_used, + u16 *tags_free, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid, + u16 vlan_id, u16 *stat_index, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid, + u16 vlan_id, u16 stat_index, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw, + u16 bad_frame_vsi, bool save_bad_pac, + bool pad_short_pac, bool double_vlan, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw, + u16 flags, u8 *mac_addr, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_credit, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, + u8 tcmap, bool request, u8 *tcmap_ret, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw, + enum i40e_aq_hmc_profile *profile, + u8 *pe_vf_enabled_count, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit( + struct i40e_hw *hw, u16 seid, + struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, + enum i40e_aq_hmc_profile profile, + u8 pe_vf_enabled_count, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_bw, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_ets_data *ets_data, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_port_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg); +enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw, + u16 vsi, + struct i40e_aqc_add_remove_cloud_filters_element_data *filters, + u8 filter_count); + +enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw, + u16 vsi, + struct i40e_aqc_add_remove_cloud_filters_element_data *filters, + u8 filter_count); + +enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw, + u32 reg_addr0, u32 *reg_val0, + u32 reg_addr1, u32 *reg_val1); +enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw, + u32 addr, u32 dw_count, void *buffer); +enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw, + u32 reg_addr0, u32 reg_val0, + u32 reg_addr1, u32 reg_val1); +enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw, + u32 addr, u32 dw_count, void *buffer); +enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw); +enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw, + u8 bios_mode, bool *reset_needed); +enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw, + u8 oem_mode); + +/* i40e_common */ +enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw); +enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw); +void i40e_clear_hw(struct i40e_hw *hw); +void i40e_clear_pxe_mode(struct i40e_hw *hw); +enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up); +enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw); +enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid); +enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, + u32 pba_num_size); +void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); +enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw); +/* prototype for functions used for NVM access */ +enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw); +enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw, + enum i40e_aq_resource_access_type access); +void i40e_release_nvm(struct i40e_hw *hw); +enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data); +enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data); +enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module, + u32 offset, u16 words, void *data, + bool last_command); +enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data); +enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data); +enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset, + void *data); +enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module, + u32 offset, u16 words, void *data); +enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum); +enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw); +enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw, + u16 *checksum); +enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *); +void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); +#endif /* PF_DRIVER */ + +#if defined(I40E_QV) || defined(VF_DRIVER) +enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw); + +#endif +extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; + +STATIC INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +{ + return i40e_ptype_lookup[ptype]; +} + +/* prototype for functions used for SW spinlocks */ +void i40e_init_spinlock(struct i40e_spinlock *sp); +void i40e_acquire_spinlock(struct i40e_spinlock *sp); +void i40e_release_spinlock(struct i40e_spinlock *sp); +void i40e_destroy_spinlock(struct i40e_spinlock *sp); + +/* i40e_common for VF drivers*/ +void i40e_vf_parse_hw_config(struct i40e_hw *hw, + struct i40e_virtchnl_vf_resource *msg); +enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw); +enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum i40e_virtchnl_ops v_opcode, + enum i40e_status_code v_retval, + u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings); +enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct i40e_asq_cmd_details *cmd_details); +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 vsi_seid); +enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details); +u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); +enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details); +void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); +#ifdef X722_SUPPORT +enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw, + struct i40e_aqc_arp_proxy_data *proxy_config, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw, + struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw, + u8 filter_index, + struct i40e_aqc_set_wol_filter_data *filter, + bool set_filter, bool no_wol_tco, + bool filter_valid, bool no_wol_tco_valid, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw, + u16 *wake_reason, + struct i40e_asq_cmd_details *cmd_details); +#endif +enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page, + u16 reg, u8 phy_addr, u16 *value); +enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page, + u16 reg, u8 phy_addr, u16 value); +u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); +enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval); +#endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h new file mode 100644 index 00000000..fd0a7230 --- /dev/null +++ b/drivers/net/i40e/base/i40e_register.h @@ -0,0 +1,5370 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _I40E_REGISTER_H_ +#define _I40E_REGISTER_H_ + + +#ifdef PF_DRIVER +#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */ +#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT) +#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */ +#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT) +#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */ +#define I40E_GL_ARQH_ARQH_SHIFT 0 +#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT) +#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */ +#define I40E_GL_ARQT_ARQT_SHIFT 0 +#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT) +#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */ +#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT) +#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */ +#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT) +#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */ +#define I40E_GL_ATQH_ATQH_SHIFT 0 +#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT) +#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */ +#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT) +#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT) +#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT) +#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT) +#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT) +#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */ +#define I40E_GL_ATQT_ATQT_SHIFT 0 +#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT) +#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */ +#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT) +#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */ +#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT) +#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */ +#define I40E_PF_ARQH_ARQH_SHIFT 0 +#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT) +#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */ +#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0 +#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT) +#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28 +#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT) +#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29 +#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT) +#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 +#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) +#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 +#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ +#define I40E_PF_ARQT_ARQT_SHIFT 0 +#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) +#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */ +#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT) +#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */ +#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT) +#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */ +#define I40E_PF_ATQH_ATQH_SHIFT 0 +#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT) +#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */ +#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT) +#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT) +#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT) +#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) +#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ +#define I40E_PF_ATQT_ATQT_SHIFT 0 +#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) +#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQBAH_MAX_INDEX 127 +#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT) +#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQBAL_MAX_INDEX 127 +#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT) +#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQH_MAX_INDEX 127 +#define I40E_VF_ARQH_ARQH_SHIFT 0 +#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT) +#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQLEN_MAX_INDEX 127 +#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0 +#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT) +#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28 +#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT) +#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29 +#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT) +#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 +#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) +#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 +#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQT_MAX_INDEX 127 +#define I40E_VF_ARQT_ARQT_SHIFT 0 +#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT) +#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQBAH_MAX_INDEX 127 +#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT) +#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQBAL_MAX_INDEX 127 +#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT) +#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQH_MAX_INDEX 127 +#define I40E_VF_ATQH_ATQH_SHIFT 0 +#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT) +#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQLEN_MAX_INDEX 127 +#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT) +#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT) +#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT) +#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) +#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQT_MAX_INDEX 127 +#define I40E_VF_ATQT_ATQT_SHIFT 0 +#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT) +#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */ +#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0 +#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */ +#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT) +#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */ +#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT) +#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */ +#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0 +#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT) +#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12 +#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT) +#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15 +#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT) +#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17 +#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT) +#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3 +#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0 +#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT) +#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */ +#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0 +#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT) +#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1 +#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT) +#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127 +#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0 +#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT) +#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4 +#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT) +#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8 +#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT) +#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127 +#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0 +#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4 +#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8 +#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16 +#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24 +#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT) +#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */ +#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0 +#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT) +#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */ +#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0 +#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT) +#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */ +#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3 +#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT) +#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */ +#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0 +#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT) +#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */ +#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3 +#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0 +#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT) +#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16 +#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT) +#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */ +#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0 +#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT) +#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2 +#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT) +#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6 +#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT) +#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9 +#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT) +#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16 +#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT) +#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */ +#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0 +#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT) +#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */ +#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0 +#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT) +#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1 +#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT) +#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2 +#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT) +#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3 +#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT) +#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4 +#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT) +#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */ +#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0 +#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) +#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1 +#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) +#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2 +#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) +#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8 +#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT) +#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7 +#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0 +#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) +#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 +#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) +#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 +#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) +#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ +#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 +#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) +#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8 +#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT) +#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16 +#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) +#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */ +#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0 +#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT) +#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */ +#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0 +#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3 +#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6 +#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9 +#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12 +#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15 +#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18 +#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 +#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 +#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 +#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) +#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ +#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 +#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) +#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT) +#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */ +#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0 +#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT) +#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13 +#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT) +#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) +#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT) +#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */ +#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0 +#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT) +#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) +#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */ +#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0 +#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT) +#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8 +#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT) +#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */ +#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0 +#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT) +#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8 +#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT) +#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */ +#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0 +#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8 +#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9 +#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10 +#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11 +#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12 +#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13 +#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14 +#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15 +#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT) +#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */ +#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7 +#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0 +#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT) +#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */ +#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0 +#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT) +#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4 +#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT) +#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5 +#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT) +#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16 +#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT) +#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */ +#define I40E_GL_FWSTS_FWS0B_SHIFT 0 +#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT) +#define I40E_GL_FWSTS_FWRI_SHIFT 9 +#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) +#define I40E_GL_FWSTS_FWS1B_SHIFT 16 +#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ +#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 +#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) +#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4 +#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8 +#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12 +#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16 +#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20 +#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT) +#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */ +#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4 +#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT) +#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5 +#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6 +#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7 +#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10 +#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11 +#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12 +#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) +#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17 +#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19 +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 +#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) +#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ +#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 +#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) +#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5 +#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT) +#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6 +#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT) +#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */ +#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0 +#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT) +#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */ +#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0 +#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT) +#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_I2CCMD_MAX_INDEX 3 +#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0 +#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT) +#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16 +#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT) +#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24 +#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT) +#define I40E_GLGEN_I2CCMD_OP_SHIFT 27 +#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT) +#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28 +#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT) +#define I40E_GLGEN_I2CCMD_R_SHIFT 29 +#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT) +#define I40E_GLGEN_I2CCMD_E_SHIFT 31 +#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT) +#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3 +#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0 +#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT) +#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5 +#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT) +#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8 +#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9 +#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10 +#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11 +#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12 +#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13 +#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14 +#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15 +#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT) +#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31 +#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT) +#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */ +#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0 +#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT) +#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 +#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31 +#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT) +#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MSCA_MAX_INDEX 3 +#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0 +#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT) +#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16 +#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT) +#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21 +#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT) +#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 +#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_GLGEN_MSCA_STCODE_SHIFT 28 +#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT) +#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 +#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) +#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 +#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) +#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MSRWD_MAX_INDEX 3 +#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 +#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT) +#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16 +#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT) +#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */ +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0 +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT) +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16 +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT) +#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */ +#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0 +#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT) +#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2 +#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT) +#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4 +#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT) +#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6 +#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT) +#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8 +#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT) +#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10 +#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT) +#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */ +#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0 +#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) +#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 +#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) +#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ +#define I40E_GLGEN_RTRIG_CORER_SHIFT 0 +#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) +#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1 +#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT) +#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2 +#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT) +#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */ +#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0 +#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT) +#define I40E_GLGEN_STAT_DCBEN_SHIFT 2 +#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT) +#define I40E_GLGEN_STAT_VTEN_SHIFT 3 +#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT) +#define I40E_GLGEN_STAT_FCOEN_SHIFT 4 +#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT) +#define I40E_GLGEN_STAT_EVBEN_SHIFT 5 +#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT) +#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6 +#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT) +#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3 +#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0 +#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT) +#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */ +#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0 +#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT) +#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */ +#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0 +#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT) +#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */ +#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0 +#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT) +#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */ +#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0 +#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT) +#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */ +#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0 +#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT) +#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1 +#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT) +#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2 +#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT) +#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3 +#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT) +#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */ +#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0 +#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1 +#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2 +#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */ +#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0 +#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT) +#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */ +#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0 +#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT) +#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1 +#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT) +#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFGEN_RSTAT1_MAX_INDEX 127 +#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0 +#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT) +#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127 +#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0 +#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT) +#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127 +#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0 +#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT) +#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_VSIGEN_RSTAT_MAX_INDEX 383 +#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0 +#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT) +#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_VSIGEN_RTRIG_MAX_INDEX 383 +#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0 +#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT) +#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15 +#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0 +#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT) +#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15 +#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0 +#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT) +#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0 +#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT) +#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15 +#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0 +#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT) +#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15 +#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0 +#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT) +#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */ +#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0 +#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT) +#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */ +#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0 +#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT) +#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */ +#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0 +#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT) +#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15 +#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0 +#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT) +#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15 +#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0 +#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT) +#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29 +#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT) +#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */ +#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0 +#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT) +#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */ +#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0 +#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT) +#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15 +#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0 +#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT) +#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15 +#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0 +#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT) +#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */ +#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0 +#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT) +#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */ +#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0 +#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT) +#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */ +#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0 +#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT) +#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15 +#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0 +#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT) +#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15 +#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0 +#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT) +#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */ +#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0 +#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT) +#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15 +#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0 +#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) +#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24 +#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT) +#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15 +#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0 +#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT) +#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */ +#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0 +#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT) +#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15 +#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0 +#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT) +#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_SDPART_MAX_INDEX 15 +#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT) +#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */ +#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0 +#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT) +#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */ +#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0 +#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT) +#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7 +#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT) +#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8 +#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT) +#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16 +#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT) +#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31 +#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT) +#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */ +#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0 +#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT) +#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16 +#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT) +#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */ +#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0 +#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT) +#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31 +#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT) +#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */ +#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0 +#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT) +#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */ +#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0 +#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1 +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2 +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12 +#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) +#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */ +#define I40E_GL_GP_FUSE_MAX_INDEX 28 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT) +#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */ +#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1 +#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT) +#define I40E_GL_UFUSE_NIC_ID_SHIFT 2 +#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT) +#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10 +#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT) +#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11 +#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT) +#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */ +#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 +#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 +#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 +#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 +#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 +#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 +#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 +#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 +#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 +#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 +#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 +#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 +#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 +#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 +#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 +#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 +#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 +#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 +#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 +#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 +#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 +#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 +#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 +#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 +#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 +#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 +#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 +#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 +#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 +#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 +#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT) +#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */ +#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0 +#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT) +#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4 +#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT) +#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */ +#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11 +#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT) +#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31 +#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT) +#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_PFINT_CEQCTL_MAX_INDEX 511 +#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11 +#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT) +#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31 +#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT) +#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */ +#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0 +#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT) +#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1 +#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT) +#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2 +#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT) +#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */ +#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0 +#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT) +#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1 +#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT) +#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 +#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT) +#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3 +#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5 +#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT) +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 +#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT) +#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511 +#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0 +#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1 +#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 +#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT) +#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 +#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5 +#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT) +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 +#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT) +#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */ +#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 +#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 +#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 +#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 +#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 +#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 +#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 +#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 +#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 +#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 +#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 +#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 +#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 +#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 +#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 +#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 +#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 +#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 +#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 +#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 +#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 +#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 +#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 +#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 +#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 +#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 +#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 +#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 +#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 +#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 +#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT) +#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */ +#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0 +#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1 +#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2 +#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3 +#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4 +#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5 +#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6 +#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7 +#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8 +#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT) +#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16 +#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT) +#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19 +#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT) +#define I40E_PFINT_ICR0_GRST_SHIFT 20 +#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT) +#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21 +#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT) +#define I40E_PFINT_ICR0_GPIO_SHIFT 22 +#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT) +#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 +#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT) +#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24 +#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT) +#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) +#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 +#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT) +#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28 +#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT) +#define I40E_PFINT_ICR0_VFLR_SHIFT 29 +#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT) +#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30 +#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT) +#define I40E_PFINT_ICR0_SWINT_SHIFT 31 +#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT) +#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */ +#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16 +#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19 +#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT) +#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20 +#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT) +#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21 +#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT) +#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22 +#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT) +#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 +#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) +#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24 +#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT) +#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) +#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 +#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28 +#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29 +#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT) +#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30 +#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT) +#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31 +#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT) +#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */ +#define I40E_PFINT_ITR0_MAX_INDEX 2 +#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0 +#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT) +#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_ITRN_MAX_INDEX 2 +#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0 +#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT) +#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */ +#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 +#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) +#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 +#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT) +#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_LNKLSTN_MAX_INDEX 511 +#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 +#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) +#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 +#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) +#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */ +#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0 +#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT) +#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6 +#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT) +#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_RATEN_MAX_INDEX 511 +#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0 +#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) +#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 +#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) +#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ +#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 +#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) +#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QINT_RQCTL_MAX_INDEX 1535 +#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0 +#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT) +#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11 +#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT) +#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) +#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) +#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) +#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31 +#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT) +#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QINT_TQCTL_MAX_INDEX 1535 +#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0 +#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT) +#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11 +#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT) +#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) +#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) +#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT) +#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31 +#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT) +#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127 +#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT) +#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511 +#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT) +#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_ICR0_MAX_INDEX 127 +#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0 +#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1 +#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2 +#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3 +#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4 +#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT) +#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_SWINT_SHIFT 31 +#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT) +#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127 +#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31 +#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT) +#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_ITR0_MAX_INDEX 2 +#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT) +#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */ +#define I40E_VFINT_ITRN_MAX_INDEX 2 +#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 +#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 +#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) +#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPINT_AEQCTL_MAX_INDEX 127 +#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 +#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT) +#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31 +#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT) +#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_VPINT_CEQCTL_MAX_INDEX 511 +#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11 +#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT) +#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31 +#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT) +#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPINT_LNKLST0_MAX_INDEX 127 +#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 +#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) +#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 +#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT) +#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VPINT_LNKLSTN_MAX_INDEX 511 +#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 +#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) +#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 +#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) +#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPINT_RATE0_MAX_INDEX 127 +#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0 +#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT) +#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6 +#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT) +#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VPINT_RATEN_MAX_INDEX 511 +#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0 +#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT) +#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6 +#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT) +#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */ +#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0 +#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT) +#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1 +#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT) +#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */ +#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0 +#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT) +#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0 +#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT) +#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0 +#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT) +#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 +#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */ +#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11 +#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0 +#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16 +#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 +#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) +#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ +#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 +#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) +#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 +#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) +#define I40E_PFLAN_QALLOC_VALID_SHIFT 31 +#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT) +#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QRX_ENA_MAX_INDEX 1535 +#define I40E_QRX_ENA_QENA_REQ_SHIFT 0 +#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT) +#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1 +#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT) +#define I40E_QRX_ENA_QENA_STAT_SHIFT 2 +#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT) +#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QRX_TAIL_MAX_INDEX 1535 +#define I40E_QRX_TAIL_TAIL_SHIFT 0 +#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT) +#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QTX_CTL_MAX_INDEX 1535 +#define I40E_QTX_CTL_PFVF_Q_SHIFT 0 +#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT) +#define I40E_QTX_CTL_PF_INDX_SHIFT 2 +#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT) +#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7 +#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT) +#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QTX_ENA_MAX_INDEX 1535 +#define I40E_QTX_ENA_QENA_REQ_SHIFT 0 +#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT) +#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1 +#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT) +#define I40E_QTX_ENA_QENA_STAT_SHIFT 2 +#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT) +#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QTX_HEAD_MAX_INDEX 1535 +#define I40E_QTX_HEAD_HEAD_SHIFT 0 +#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT) +#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16 +#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT) +#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QTX_TAIL_MAX_INDEX 1535 +#define I40E_QTX_TAIL_TAIL_SHIFT 0 +#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT) +#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_MAPENA_MAX_INDEX 127 +#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0 +#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT) +#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_QTABLE_MAX_INDEX 15 +#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0 +#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT) +#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ +#define I40E_VSILAN_QBASE_MAX_INDEX 383 +#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0 +#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT) +#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11 +#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT) +#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */ +#define I40E_VSILAN_QTABLE_MAX_INDEX 7 +#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0 +#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT) +#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16 +#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) +#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */ +#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0 +#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT) +#define I40E_PRTGL_SAH_MFS_SHIFT 16 +#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT) +#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */ +#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0 +#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */ +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */ +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT) +#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */ +#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0 +#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT) +#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */ +#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0 +#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT) +#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10 +#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT) +#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11 +#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT) +#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15 +#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT) +#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16 +#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT) +#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19 +#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26 +#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27 +#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28 +#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29 +#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */ +#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0 +#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT) +#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31 +#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0 +#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT) +#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0 +#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT) +#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7 +#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0 +#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT) +#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */ +#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0 +#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT) +#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1 +#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT) +#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17 +#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT) +#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19 +#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT) +#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25 +#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT) +#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26 +#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT) +#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28 +#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT) +#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29 +#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT) +#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7 +#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0 +#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT) +#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEF_MAX_INDEX 7 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4 +#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5 +#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13 +#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17 +#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25 +#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26 +#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27 +#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28 +#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29 +#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30 +#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31 +#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8 +#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24 +#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28 +#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29 +#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30 +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31 +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT) +#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT) +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT) +#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_METF_MAX_INDEX 3 +#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0 +#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT) +#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30 +#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT) +#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ +#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15 +#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0 +#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT) +#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16 +#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT) +#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17 +#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT) +#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18 +#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT) +#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3 +#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0 +#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT) +#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ +#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15 +#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0 +#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT) +#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MMAH_MAX_INDEX 3 +#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0 +#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT) +#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MMAL_MAX_INDEX 3 +#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0 +#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT) +#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */ +#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0 +#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT) +#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */ +#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0 +#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1 +#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2 +#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3 +#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4 +#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5 +#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6 +#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7 +#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT) +#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */ +#define I40E_MSIX_PBA_MAX_INDEX 5 +#define I40E_MSIX_PBA_PENBIT_SHIFT 0 +#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT) +#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TADD_MAX_INDEX 128 +#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0 +#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT) +#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2 +#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT) +#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TMSG_MAX_INDEX 128 +#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0 +#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT) +#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TUADD_MAX_INDEX 128 +#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0 +#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT) +#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TVCTRL_MAX_INDEX 128 +#define I40E_MSIX_TVCTRL_MASK_SHIFT 0 +#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT) +#endif /* PF_DRIVER */ +#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ +#define I40E_VFMSIX_PBA1_MAX_INDEX 19 +#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 +#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) +#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TADD1_MAX_INDEX 639 +#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 +#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) +#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 +#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) +#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 +#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 +#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) +#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 +#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 +#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) +#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 +#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 +#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) +#ifdef PF_DRIVER +#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ +#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0 +#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT) +#define I40E_GLNVM_FLA_FL_CE_SHIFT 1 +#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT) +#define I40E_GLNVM_FLA_FL_SI_SHIFT 2 +#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT) +#define I40E_GLNVM_FLA_FL_SO_SHIFT 3 +#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT) +#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4 +#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT) +#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5 +#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT) +#define I40E_GLNVM_FLA_LOCKED_SHIFT 6 +#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) +#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18 +#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT) +#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30 +#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT) +#define I40E_GLNVM_FLA_FL_DER_SHIFT 31 +#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT) +#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */ +#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0 +#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT) +#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31 +#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT) +#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */ +#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0 +#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT) +#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5 +#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT) +#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8 +#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT) +#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23 +#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT) +#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25 +#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT) +#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */ +#define I40E_GLNVM_PROTCSR_MAX_INDEX 59 +#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0 +#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT) +#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */ +#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0 +#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT) +#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14 +#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT) +#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29 +#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT) +#define I40E_GLNVM_SRCTL_START_SHIFT 30 +#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) +#define I40E_GLNVM_SRCTL_DONE_SHIFT 31 +#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT) +#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ +#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 +#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) +#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16 +#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT) +#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0 +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1 +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2 +#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3 +#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4 +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5 +#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6 +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7 +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8 +#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9 +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT) +#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */ +#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0 +#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT) +#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */ +#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0 +#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT) +#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */ +#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0 +#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT) +#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */ +#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0 +#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT) +#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2 +#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3 +#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4 +#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5 +#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6 +#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7 +#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16 +#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17 +#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18 +#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19 +#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT) +#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20 +#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30 +#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT) +#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31 +#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT) +#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */ +#define I40E_GLPCI_CNF_FLEX10_SHIFT 1 +#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT) +#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2 +#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT) +#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */ +#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0 +#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT) +#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1 +#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT) +#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2 +#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT) +#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13 +#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT) +#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */ +#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0 +#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT) +#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28 +#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT) +#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT) +#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3 +#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0 +#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT) +#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16 +#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT) +#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ +#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 +#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 +#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) +#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ +#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 +#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) +#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1 +#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT) +#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3 +#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT) +#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4 +#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT) +#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6 +#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT) +#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10 +#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT) +#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11 +#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT) +#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */ +#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0 +#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT) +#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6 +#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT) +#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9 +#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT) +#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */ +#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0 +#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT) +#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */ +#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0 +#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT) +#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */ +#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0 +#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16 +#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */ +#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0 +#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16 +#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT) +#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */ +#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0 +#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT) +#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2 +#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5 +#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8 +#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11 +#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14 +#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT) +#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15 +#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */ +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT) +#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */ +#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0 +#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8 +#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16 +#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24 +#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT) +#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */ +#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0 +#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT) +#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */ +#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0 +#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT) +#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */ +#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0 +#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT) +#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */ +#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0 +#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT) +#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */ +#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0 +#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT) +#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */ +#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0 +#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT) +#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */ +#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1 +#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT) +#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */ +#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0 +#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT) +#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */ +#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0 +#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) +#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 +#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ +#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 +#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) +#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 +#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) +#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ +#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 +#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) +#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3 +#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT) +#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8 +#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT) +#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */ +#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0 +#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT) +#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12 +#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT) +#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */ +#define I40E_PF_PCI_CIAD_DATA_SHIFT 0 +#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT) +#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */ +#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0 +#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT) +#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1 +#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT) +#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2 +#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT) +#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */ +#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2 +#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT) +#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3 +#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT) +#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4 +#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT) +#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5 +#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT) +#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */ +#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0 +#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT) +#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16 +#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT) +#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */ +#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0 +#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT) +#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3 +#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT) +#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */ +#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0 +#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT) +#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1 +#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT) +#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2 +#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT) +#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */ +#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0 +#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT) +#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */ +#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0 +#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT) +#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */ +#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0 +#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT) +#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */ +#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */ +#define I40E_PFPCI_PM_PME_EN_SHIFT 0 +#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT) +#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */ +#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0 +#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT) +#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */ +#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0 +#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT) +#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16 +#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT) +#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */ +#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */ +#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */ +#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */ +#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0 +#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT) +#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */ +#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0 +#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT) +#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */ +#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29 +#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT) +#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30 +#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT) +#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31 +#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT) +#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */ +#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16 +#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT) +#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24 +#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT) +#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26 +#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT) +#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */ +#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31 +#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT) +#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */ +#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0 +#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT) +#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16 +#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT) +#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */ +#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0 +#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT) +#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */ +#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0 +#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT) +#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1 +#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT) +#define I40E_PRTPM_GC_RATD_SHIFT 2 +#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT) +#define I40E_PRTPM_GC_LCDMP_SHIFT 3 +#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT) +#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31 +#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT) +#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */ +#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0 +#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT) +#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */ +#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 +#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT) +#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GL_PRS_FVBM_MAX_INDEX 3 +#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0 +#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT) +#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8 +#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT) +#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31 +#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT) +#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */ +#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 +#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT) +#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */ +#define I40E_GLRPB_GHW_GHW_SHIFT 0 +#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT) +#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */ +#define I40E_GLRPB_GLW_GLW_SHIFT 0 +#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT) +#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */ +#define I40E_GLRPB_PHW_PHW_SHIFT 0 +#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT) +#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */ +#define I40E_GLRPB_PLW_PLW_SHIFT 0 +#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT) +#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DHW_MAX_INDEX 7 +#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0 +#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT) +#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DLW_MAX_INDEX 7 +#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0 +#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT) +#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DPS_MAX_INDEX 7 +#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0 +#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT) +#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_SHT_MAX_INDEX 7 +#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0 +#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT) +#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */ +#define I40E_PRTRPB_SHW_SHW_SHIFT 0 +#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT) +#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_SLT_MAX_INDEX 7 +#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0 +#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT) +#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */ +#define I40E_PRTRPB_SLW_SLW_SHIFT 0 +#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT) +#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */ +#define I40E_PRTRPB_SPS_SPS_SHIFT 0 +#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT) +#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */ +#define I40E_GLQF_CTL_HTOEP_SHIFT 1 +#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT) +#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2 +#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT) +#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3 +#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT) +#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6 +#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT) +#define I40E_GLQF_CTL_RSVD_SHIFT 7 +#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT) +#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8 +#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT) +#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11 +#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT) +#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14 +#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT) +#define I40E_GLQF_CTL_FDBEST_SHIFT 17 +#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT) +#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25 +#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT) +#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26 +#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT) +#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27 +#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT) +#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */ +#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0 +#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT) +#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13 +#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT) +#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_GLQF_HKEY_MAX_INDEX 12 +#define I40E_GLQF_HKEY_KEY_0_SHIFT 0 +#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT) +#define I40E_GLQF_HKEY_KEY_1_SHIFT 8 +#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT) +#define I40E_GLQF_HKEY_KEY_2_SHIFT 16 +#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT) +#define I40E_GLQF_HKEY_KEY_3_SHIFT 24 +#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT) +#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_HSYM_MAX_INDEX 63 +#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0 +#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT) +#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_GLQF_PCNT_MAX_INDEX 511 +#define I40E_GLQF_PCNT_PCNT_SHIFT 0 +#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT) +#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_SWAP_MAX_INDEX 1 +#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0 +#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT) +#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6 +#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT) +#define I40E_GLQF_SWAP_FLEN0_SHIFT 12 +#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT) +#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16 +#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT) +#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22 +#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT) +#define I40E_GLQF_SWAP_FLEN1_SHIFT 28 +#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT) +#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */ +#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0 +#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5 +#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10 +#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14 +#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) +#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16 +#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) +#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17 +#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT) +#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18 +#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT) +#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19 +#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT) +#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20 +#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24 +#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT) +#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */ +#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0 +#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT) +#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */ +#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0 +#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT) +#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8 +#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT) +#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */ +#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0 +#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT) +#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16 +#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT) +#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_PFQF_HENA_MAX_INDEX 1 +#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0 +#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT) +#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_PFQF_HKEY_MAX_INDEX 12 +#define I40E_PFQF_HKEY_KEY_0_SHIFT 0 +#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT) +#define I40E_PFQF_HKEY_KEY_1_SHIFT 8 +#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT) +#define I40E_PFQF_HKEY_KEY_2_SHIFT 16 +#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT) +#define I40E_PFQF_HKEY_KEY_3_SHIFT 24 +#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT) +#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_PFQF_HLUT_MAX_INDEX 127 +#define I40E_PFQF_HLUT_LUT0_SHIFT 0 +#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT) +#define I40E_PFQF_HLUT_LUT1_SHIFT 8 +#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT) +#define I40E_PFQF_HLUT_LUT2_SHIFT 16 +#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT) +#define I40E_PFQF_HLUT_LUT3_SHIFT 24 +#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT) +#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */ +#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0 +#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT) +#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 +#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 +#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) +#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_INSET_MAX_INDEX 63 +#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 +#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT) +#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_INSET_MAX_INDEX 63 +#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 +#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT) +#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_MSK_MAX_INDEX 63 +#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 +#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT) +#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16 +#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT) +#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */ +#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8 +#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0 +#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) +#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5 +#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) +#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10 +#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) +#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HENA1_MAX_INDEX 1 +#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0 +#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT) +#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HKEY1_MAX_INDEX 12 +#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0 +#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT) +#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8 +#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT) +#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16 +#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT) +#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24 +#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT) +#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HLUT1_MAX_INDEX 15 +#define I40E_VFQF_HLUT1_LUT0_SHIFT 0 +#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT) +#define I40E_VFQF_HLUT1_LUT1_SHIFT 8 +#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT) +#define I40E_VFQF_HLUT1_LUT2_SHIFT 16 +#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT) +#define I40E_VFQF_HLUT1_LUT3_SHIFT 24 +#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT) +#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HREGION1_MAX_INDEX 7 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT) +#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1 +#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT) +#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5 +#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT) +#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9 +#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT) +#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13 +#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT) +#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17 +#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT) +#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21 +#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT) +#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25 +#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT) +#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29 +#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT) +#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPQF_CTL_MAX_INDEX 127 +#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0 +#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT) +#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5 +#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT) +#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10 +#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT) +#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14 +#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT) +#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ +#define I40E_VSIQF_CTL_MAX_INDEX 383 +#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0 +#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT) +#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1 +#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2 +#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3 +#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4 +#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5 +#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT) +#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */ +#define I40E_VSIQF_TCREGION_MAX_INDEX 3 +#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0 +#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT) +#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9 +#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT) +#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16 +#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT) +#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25 +#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT) +#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOECRC_MAX_INDEX 143 +#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0 +#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT) +#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDDPC_MAX_INDEX 143 +#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0 +#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT) +#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIFEC_MAX_INDEX 143 +#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0 +#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT) +#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143 +#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0 +#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT) +#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIXEC_MAX_INDEX 143 +#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0 +#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT) +#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIXVC_MAX_INDEX 143 +#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0 +#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT) +#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWRCH_MAX_INDEX 143 +#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0 +#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT) +#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWRCL_MAX_INDEX 143 +#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0 +#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT) +#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWTCH_MAX_INDEX 143 +#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0 +#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT) +#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWTCL_MAX_INDEX 143 +#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0 +#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT) +#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOELAST_MAX_INDEX 143 +#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0 +#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT) +#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEPRC_MAX_INDEX 143 +#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0 +#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT) +#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEPTC_MAX_INDEX 143 +#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0 +#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT) +#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOERPDC_MAX_INDEX 143 +#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0 +#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT) +#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_RXERR1_L_MAX_INDEX 143 +#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0 +#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT) +#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_RXERR2_L_MAX_INDEX 143 +#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0 +#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) +#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPRCH_MAX_INDEX 3 +#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) +#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPRCL_MAX_INDEX 3 +#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) +#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPTCH_MAX_INDEX 3 +#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) +#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPTCL_MAX_INDEX 3 +#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) +#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_CRCERRS_MAX_INDEX 3 +#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 +#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT) +#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GORCH_MAX_INDEX 3 +#define I40E_GLPRT_GORCH_GORCH_SHIFT 0 +#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT) +#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GORCL_MAX_INDEX 3 +#define I40E_GLPRT_GORCL_GORCL_SHIFT 0 +#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT) +#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GOTCH_MAX_INDEX 3 +#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT) +#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GOTCL_MAX_INDEX 3 +#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT) +#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_ILLERRC_MAX_INDEX 3 +#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0 +#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT) +#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LDPC_MAX_INDEX 3 +#define I40E_GLPRT_LDPC_LDPC_SHIFT 0 +#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT) +#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3 +#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0 +#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT) +#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3 +#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0 +#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT) +#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXONRXC_MAX_INDEX 3 +#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0 +#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT) +#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXONTXC_MAX_INDEX 3 +#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0 +#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT) +#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MLFC_MAX_INDEX 3 +#define I40E_GLPRT_MLFC_MLFC_SHIFT 0 +#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT) +#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPRCH_MAX_INDEX 3 +#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT) +#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPRCL_MAX_INDEX 3 +#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT) +#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPTCH_MAX_INDEX 3 +#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT) +#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPTCL_MAX_INDEX 3 +#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT) +#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MRFC_MAX_INDEX 3 +#define I40E_GLPRT_MRFC_MRFC_SHIFT 0 +#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT) +#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1023H_MAX_INDEX 3 +#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0 +#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT) +#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1023L_MAX_INDEX 3 +#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0 +#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT) +#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC127H_MAX_INDEX 3 +#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0 +#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT) +#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC127L_MAX_INDEX 3 +#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0 +#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT) +#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1522H_MAX_INDEX 3 +#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0 +#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT) +#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1522L_MAX_INDEX 3 +#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0 +#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT) +#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC255H_MAX_INDEX 3 +#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0 +#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT) +#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC255L_MAX_INDEX 3 +#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0 +#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT) +#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC511H_MAX_INDEX 3 +#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0 +#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT) +#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC511L_MAX_INDEX 3 +#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0 +#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT) +#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC64H_MAX_INDEX 3 +#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0 +#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT) +#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC64L_MAX_INDEX 3 +#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0 +#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT) +#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC9522H_MAX_INDEX 3 +#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0 +#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT) +#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC9522L_MAX_INDEX 3 +#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0 +#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT) +#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1023H_MAX_INDEX 3 +#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0 +#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT) +#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1023L_MAX_INDEX 3 +#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0 +#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT) +#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC127H_MAX_INDEX 3 +#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0 +#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT) +#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC127L_MAX_INDEX 3 +#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0 +#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT) +#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1522H_MAX_INDEX 3 +#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0 +#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT) +#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1522L_MAX_INDEX 3 +#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0 +#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT) +#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC255H_MAX_INDEX 3 +#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0 +#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT) +#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC255L_MAX_INDEX 3 +#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0 +#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT) +#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC511H_MAX_INDEX 3 +#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0 +#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT) +#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC511L_MAX_INDEX 3 +#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0 +#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT) +#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC64H_MAX_INDEX 3 +#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0 +#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT) +#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC64L_MAX_INDEX 3 +#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0 +#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT) +#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC9522H_MAX_INDEX 3 +#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0 +#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT) +#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC9522L_MAX_INDEX 3 +#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0 +#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT) +#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3 +#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0 +#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT) +#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3 +#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0 +#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT) +#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXONRXC_MAX_INDEX 3 +#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0 +#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT) +#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXONTXC_MAX_INDEX 3 +#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0 +#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT) +#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RDPC_MAX_INDEX 3 +#define I40E_GLPRT_RDPC_RDPC_SHIFT 0 +#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT) +#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RFC_MAX_INDEX 3 +#define I40E_GLPRT_RFC_RFC_SHIFT 0 +#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT) +#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RJC_MAX_INDEX 3 +#define I40E_GLPRT_RJC_RJC_SHIFT 0 +#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT) +#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RLEC_MAX_INDEX 3 +#define I40E_GLPRT_RLEC_RLEC_SHIFT 0 +#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT) +#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_ROC_MAX_INDEX 3 +#define I40E_GLPRT_ROC_ROC_SHIFT 0 +#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT) +#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RUC_MAX_INDEX 3 +#define I40E_GLPRT_RUC_RUC_SHIFT 0 +#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT) +#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RUPP_MAX_INDEX 3 +#define I40E_GLPRT_RUPP_RUPP_SHIFT 0 +#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT) +#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3 +#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0 +#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT) +#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_TDOLD_MAX_INDEX 3 +#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 +#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) +#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPRCH_MAX_INDEX 3 +#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPRCL_MAX_INDEX 3 +#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT) +#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPTCH_MAX_INDEX 3 +#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0 +#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT) +#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPTCL_MAX_INDEX 3 +#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0 +#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT) +#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPRCH_MAX_INDEX 15 +#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT) +#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPRCL_MAX_INDEX 15 +#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT) +#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPTCH_MAX_INDEX 15 +#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT) +#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPTCL_MAX_INDEX 15 +#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT) +#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GORCH_MAX_INDEX 15 +#define I40E_GLSW_GORCH_GORCH_SHIFT 0 +#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT) +#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GORCL_MAX_INDEX 15 +#define I40E_GLSW_GORCL_GORCL_SHIFT 0 +#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT) +#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GOTCH_MAX_INDEX 15 +#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT) +#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GOTCL_MAX_INDEX 15 +#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT) +#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPRCH_MAX_INDEX 15 +#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT) +#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPRCL_MAX_INDEX 15 +#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT) +#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPTCH_MAX_INDEX 15 +#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT) +#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPTCL_MAX_INDEX 15 +#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT) +#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_RUPP_MAX_INDEX 15 +#define I40E_GLSW_RUPP_RUPP_SHIFT 0 +#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT) +#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_TDPC_MAX_INDEX 15 +#define I40E_GLSW_TDPC_TDPC_SHIFT 0 +#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT) +#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPRCH_MAX_INDEX 15 +#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT) +#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPRCL_MAX_INDEX 15 +#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT) +#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPTCH_MAX_INDEX 15 +#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0 +#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT) +#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPTCL_MAX_INDEX 15 +#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0 +#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT) +#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPRCH_MAX_INDEX 383 +#define I40E_GLV_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT) +#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPRCL_MAX_INDEX 383 +#define I40E_GLV_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT) +#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPTCH_MAX_INDEX 383 +#define I40E_GLV_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT) +#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPTCL_MAX_INDEX 383 +#define I40E_GLV_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT) +#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GORCH_MAX_INDEX 383 +#define I40E_GLV_GORCH_GORCH_SHIFT 0 +#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT) +#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GORCL_MAX_INDEX 383 +#define I40E_GLV_GORCL_GORCL_SHIFT 0 +#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT) +#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GOTCH_MAX_INDEX 383 +#define I40E_GLV_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT) +#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GOTCL_MAX_INDEX 383 +#define I40E_GLV_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT) +#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPRCH_MAX_INDEX 383 +#define I40E_GLV_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT) +#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPRCL_MAX_INDEX 383 +#define I40E_GLV_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT) +#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPTCH_MAX_INDEX 383 +#define I40E_GLV_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT) +#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPTCL_MAX_INDEX 383 +#define I40E_GLV_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT) +#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_RDPC_MAX_INDEX 383 +#define I40E_GLV_RDPC_RDPC_SHIFT 0 +#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT) +#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_RUPP_MAX_INDEX 383 +#define I40E_GLV_RUPP_RUPP_SHIFT 0 +#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT) +#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_TEPC_MAX_INDEX 383 +#define I40E_GLV_TEPC_TEPC_SHIFT 0 +#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT) +#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPRCH_MAX_INDEX 383 +#define I40E_GLV_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT) +#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPRCL_MAX_INDEX 383 +#define I40E_GLV_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT) +#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPTCH_MAX_INDEX 383 +#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0 +#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT) +#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPTCL_MAX_INDEX 383 +#define I40E_GLV_UPTCL_UPTCL_SHIFT 0 +#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT) +#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RBCH_MAX_INDEX 7 +#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0 +#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT) +#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RBCL_MAX_INDEX 7 +#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0 +#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT) +#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RPCH_MAX_INDEX 7 +#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0 +#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT) +#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RPCL_MAX_INDEX 7 +#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0 +#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT) +#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TBCH_MAX_INDEX 7 +#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0 +#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT) +#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TBCL_MAX_INDEX 7 +#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0 +#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT) +#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TPCH_MAX_INDEX 7 +#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0 +#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT) +#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TPCL_MAX_INDEX 7 +#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0 +#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT) +#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_BPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0 +#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT) +#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_BPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0 +#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT) +#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GORCH_MAX_INDEX 127 +#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0 +#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT) +#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GORCL_MAX_INDEX 127 +#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0 +#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT) +#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127 +#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0 +#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT) +#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127 +#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0 +#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT) +#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_MPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0 +#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT) +#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_MPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0 +#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT) +#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_UPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0 +#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT) +#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_UPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0 +#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT) +#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */ +#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0 +#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT) +#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */ +#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35 +#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0 +#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT) +#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1 +#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0 +#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT) +#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */ +#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0 +#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT) +#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31 +#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT) +#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1 +#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0 +#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1 +#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3 +#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT) +#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8 +#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT) +#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16 +#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT) +#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1 +#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0 +#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT) +#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1 +#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT) +#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_CLKO_MAX_INDEX 1 +#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0 +#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT) +#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */ +#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0 +#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT) +#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1 +#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2 +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3 +#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8 +#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT) +#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12 +#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT) +#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31 +#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT) +#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */ +#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0 +#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT) +#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8 +#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT) +#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16 +#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT) +#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20 +#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24 +#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26 +#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31 +#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT) +#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1 +#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0 +#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT) +#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1 +#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0 +#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT) +#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */ +#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0 +#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT) +#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */ +#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0 +#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT) +#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3 +#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0 +#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT) +#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3 +#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0 +#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT) +#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */ +#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0 +#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT) +#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1 +#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT) +#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2 +#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT) +#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3 +#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT) +#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4 +#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT) +#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */ +#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0 +#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1 +#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2 +#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3 +#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT) +#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1 +#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0 +#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT) +#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1 +#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0 +#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT) +#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0 +#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT) +#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0 +#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT) +#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0 +#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT) +#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 +#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) +#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ +#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 +#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) +#define I40E_GL_MDET_RX_EVENT_SHIFT 8 +#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT) +#define I40E_GL_MDET_RX_QUEUE_SHIFT 17 +#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT) +#define I40E_GL_MDET_RX_VALID_SHIFT 31 +#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT) +#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */ +#define I40E_GL_MDET_TX_QUEUE_SHIFT 0 +#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT) +#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12 +#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT) +#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21 +#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT) +#define I40E_GL_MDET_TX_EVENT_SHIFT 25 +#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT) +#define I40E_GL_MDET_TX_VALID_SHIFT 31 +#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT) +#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */ +#define I40E_PF_MDET_RX_VALID_SHIFT 0 +#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT) +#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */ +#define I40E_PF_MDET_TX_VALID_SHIFT 0 +#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT) +#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */ +#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0 +#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT) +#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 +#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) +#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 +#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT) +#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VP_MDET_RX_MAX_INDEX 127 +#define I40E_VP_MDET_RX_VALID_SHIFT 0 +#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT) +#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VP_MDET_TX_MAX_INDEX 127 +#define I40E_VP_MDET_TX_VALID_SHIFT 0 +#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT) +#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */ +#define I40E_GLPM_WUMC_NOTCO_SHIFT 0 +#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT) +#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1 +#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT) +#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2 +#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT) +#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3 +#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT) +#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16 +#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT) +#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */ +#define I40E_PFPM_APM_APME_SHIFT 0 +#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT) +#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7 +#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0 +#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT) +#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */ +#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5 +#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT) +#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */ +#define I40E_PFPM_WUFC_LNKC_SHIFT 0 +#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT) +#define I40E_PFPM_WUFC_MAG_SHIFT 1 +#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT) +#define I40E_PFPM_WUFC_MNG_SHIFT 3 +#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT) +#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4 +#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5 +#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6 +#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7 +#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8 +#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9 +#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10 +#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11 +#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX0_SHIFT 16 +#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT) +#define I40E_PFPM_WUFC_FLX1_SHIFT 17 +#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT) +#define I40E_PFPM_WUFC_FLX2_SHIFT 18 +#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT) +#define I40E_PFPM_WUFC_FLX3_SHIFT 19 +#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT) +#define I40E_PFPM_WUFC_FLX4_SHIFT 20 +#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT) +#define I40E_PFPM_WUFC_FLX5_SHIFT 21 +#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT) +#define I40E_PFPM_WUFC_FLX6_SHIFT 22 +#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT) +#define I40E_PFPM_WUFC_FLX7_SHIFT 23 +#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT) +#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31 +#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT) +#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */ +#define I40E_PFPM_WUS_LNKC_SHIFT 0 +#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT) +#define I40E_PFPM_WUS_MAG_SHIFT 1 +#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT) +#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2 +#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT) +#define I40E_PFPM_WUS_MNG_SHIFT 3 +#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT) +#define I40E_PFPM_WUS_FLX0_SHIFT 16 +#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT) +#define I40E_PFPM_WUS_FLX1_SHIFT 17 +#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT) +#define I40E_PFPM_WUS_FLX2_SHIFT 18 +#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT) +#define I40E_PFPM_WUS_FLX3_SHIFT 19 +#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT) +#define I40E_PFPM_WUS_FLX4_SHIFT 20 +#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT) +#define I40E_PFPM_WUS_FLX5_SHIFT 21 +#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT) +#define I40E_PFPM_WUS_FLX6_SHIFT 22 +#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT) +#define I40E_PFPM_WUS_FLX7_SHIFT 23 +#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT) +#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31 +#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT) +#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */ +#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0 +#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT) +#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1 +#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT) +#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ +#define I40E_PRTPM_SAH_MAX_INDEX 3 +#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0 +#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT) +#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26 +#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT) +#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30 +#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT) +#define I40E_PRTPM_SAH_AV_SHIFT 31 +#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT) +#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ +#define I40E_PRTPM_SAL_MAX_INDEX 3 +#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0 +#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT) +#endif /* PF_DRIVER */ +#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ +#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 +#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) +#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ +#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 +#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) +#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ +#define I40E_VF_ARQH1_ARQH_SHIFT 0 +#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) +#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ +#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 +#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) +#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 +#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) +#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 +#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) +#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 +#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) +#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 +#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) +#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ +#define I40E_VF_ARQT1_ARQT_SHIFT 0 +#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) +#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ +#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 +#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) +#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ +#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 +#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) +#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ +#define I40E_VF_ATQH1_ATQH_SHIFT 0 +#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) +#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ +#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 +#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) +#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 +#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) +#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 +#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) +#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 +#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) +#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 +#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) +#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ +#define I40E_VF_ATQT1_ATQT_SHIFT 0 +#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) +#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ +#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 +#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) +#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ +#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) +#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 +#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) +#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ +#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 +#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) +#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ +#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 +#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 +#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 +#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 +#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 +#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) +#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) +#define I40E_VFINT_ICR01_SWINT_SHIFT 31 +#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) +#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ +#define I40E_VFINT_ITR01_MAX_INDEX 2 +#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) +#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ +#define I40E_VFINT_ITRN1_MAX_INDEX 2 +#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ +#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 +#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) +#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_QRX_TAIL1_MAX_INDEX 15 +#define I40E_QRX_TAIL1_TAIL_SHIFT 0 +#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) +#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ +#define I40E_QTX_TAIL1_MAX_INDEX 15 +#define I40E_QTX_TAIL1_TAIL_SHIFT 0 +#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) +#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ +#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 +#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) +#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TADD_MAX_INDEX 16 +#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 +#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) +#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 +#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) +#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TMSG_MAX_INDEX 16 +#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 +#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) +#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TUADD_MAX_INDEX 16 +#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 +#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) +#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 +#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 +#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) +#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ +#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) +#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) +#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ +#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) +#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) +#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_VFQF_HENA_MAX_INDEX 1 +#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 +#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) +#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_VFQF_HKEY_MAX_INDEX 12 +#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 +#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) +#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 +#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) +#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 +#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) +#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 +#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) +#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_VFQF_HLUT_MAX_INDEX 15 +#define I40E_VFQF_HLUT_LUT0_SHIFT 0 +#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) +#define I40E_VFQF_HLUT_LUT1_SHIFT 8 +#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) +#define I40E_VFQF_HLUT_LUT2_SHIFT 16 +#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) +#define I40E_VFQF_HLUT_LUT3_SHIFT 24 +#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) +#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_VFQF_HREGION_MAX_INDEX 7 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) +#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 +#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) +#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 +#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) +#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 +#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) +#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 +#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) +#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 +#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) +#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 +#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) +#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 +#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) +#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 +#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) +#ifdef X722_SUPPORT + +#ifdef PF_DRIVER +#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */ +#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0 +#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT) +#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */ +#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2 +#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT) +#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3 +#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT) +#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4 +#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT) +#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8 +#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT) +#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */ +#define I40E_MNGSB_FDS_START_BC_SHIFT 0 +#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT) +#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16 +#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT) + +#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127 +#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0 +#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT) +#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127 +#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0 +#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT) + +#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */ +#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12 +#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT) +#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16 +#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT) +#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */ +#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12 +#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT) +#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16 +#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT) +#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */ +#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT) +#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT) +#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */ +#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT) +#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) + +#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT) +#define I40E_GL_FWSTS_FWROWD_SHIFT 8 +#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT) +#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */ +#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15 +#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 +#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) +#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_CEQPART_MAX_INDEX 15 +#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0 +#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT) +#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16 +#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT) +#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */ +#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0 +#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT) +#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_DBCQPART_MAX_INDEX 15 +#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0 +#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT) +#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16 +#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT) +#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */ +#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0 +#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT) +#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_DBQPPART_MAX_INDEX 15 +#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0 +#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT) +#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16 +#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT) +#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0 +#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT) +#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0 +#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT) +#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */ +#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0 +#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT) +#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */ +#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT) +#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PECQBASE_MAX_INDEX 15 +#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0 +#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT) +#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PECQCNT_MAX_INDEX 15 +#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0 +#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT) +#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */ +#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0 +#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT) +#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0 +#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT) +#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0 +#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT) +#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */ +#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT) +#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */ +#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0 +#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT) +#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0 +#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT) +#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0 +#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT) +#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */ +#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0 +#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT) +#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */ +#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0 +#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT) +#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0 +#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT) +#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0 +#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT) +#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */ +#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0 +#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT) +#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */ +#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0 +#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT) +#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0 +#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT) +#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0 +#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT) +#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 +#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) +#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0 +#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT) +#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0 +#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT) +#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0 +#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT) +#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0 +#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT) +#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0 +#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT) +#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */ +#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT) +#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15 +#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0 +#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT) +#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15 +#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0 +#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT) +#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */ +#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0 +#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT) +#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */ +#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0 +#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT) +#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15 +#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0 +#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT) +#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15 +#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0 +#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT) +#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */ +#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0 +#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT) +#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */ +#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0 +#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT) +#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0 +#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT) +#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0 +#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT) +#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 +#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT) +#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */ +#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0 +#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT) +#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */ +#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0 +#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT) +#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */ +#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT) +#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15 +#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT) +#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 +#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) +#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31 +#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0 +#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT) +#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16 +#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT) +#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31 +#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0 +#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT) +#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16 +#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT) +#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31 +#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0 +#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT) +#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16 +#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT) +#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0 +#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT) +#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0 +#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT) +#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPDINV_MAX_INDEX 31 +#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0 +#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT) +#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15 +#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT) +#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16 +#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT) +#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0 +#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT) +#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0 +#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT) +#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0 +#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT) +#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0 +#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT) +#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0 +#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT) +#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0 +#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT) +#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0 +#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT) +#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0 +#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT) +#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT) +#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0 +#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT) +#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0 +#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT) +#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0 +#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT) +#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) +#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0 +#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT) +#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0 +#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT) +#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0 +#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT) +#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0 +#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT) +#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0 +#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT) +#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0 +#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT) +#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0 +#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT) +#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0 +#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT) +#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT) +#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFSDPART_MAX_INDEX 31 +#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT) +#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */ +#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */ +#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */ +#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT) +#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15 +#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) +#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15 +#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT) +#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */ +#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0 +#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT) +#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */ +#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0 +#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT) +#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */ +#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0 +#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */ +#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0 +#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2 +#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4 +#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12 +#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13 +#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14 +#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT) +#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */ +#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0 +#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT) +#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */ +#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0 +#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT) +#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2 +#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT) +#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3 +#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT) +#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 +#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT) +#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 +#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT) +#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_QBASE_MAX_INDEX 127 +#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0 +#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT) +#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11 +#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT) +#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31 +#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT) +#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */ +#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0 +#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT) +#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */ +#define I40E_GLNVM_AL_REQ_POR_SHIFT 0 +#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT) +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1 +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT) +#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2 +#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT) +#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3 +#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT) +#define I40E_GLNVM_AL_REQ_PE_SHIFT 4 +#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT) +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5 +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT) +#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */ +#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0 +#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT) +#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12 +#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT) +#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ +#define I40E_GLNVM_FLA_LOCKED_SHIFT 6 +#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) + +#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ +#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0 +#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT) +#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1 +#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT) +#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3 +#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT) +#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4 +#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT) +#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5 +#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT) +#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8 +#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT) +#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9 +#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT) +#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10 +#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT) +#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */ +#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0 +#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1 +#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2 +#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT) +#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3 +#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4 +#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5 +#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6 +#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7 +#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT) +#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8 +#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9 +#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10 +#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT) +#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */ +#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0 +#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1 +#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2 +#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3 +#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4 +#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5 +#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6 +#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7 +#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8 +#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9 +#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10 +#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11 +#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12 +#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13 +#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14 +#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15 +#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16 +#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT) +#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */ +#define I40E_MNGSB_DADD_ADDR_SHIFT 0 +#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT) +#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */ +#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0 +#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT) +#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */ +#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0 +#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT) +#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8 +#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT) +#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26 +#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT) +#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28 +#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT) +#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30 +#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT) +#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31 +#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT) +#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */ +#define I40E_MNGSB_RDATA_DATA_SHIFT 0 +#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT) +#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */ +#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0 +#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT) +#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8 +#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT) +#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16 +#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT) +#define I40E_MNGSB_RHDR0_TAG_SHIFT 24 +#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT) +#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27 +#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT) +#define I40E_MNGSB_RHDR0_EH_SHIFT 31 +#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT) +#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */ +#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0 +#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26 +#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30 +#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31 +#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT) +#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */ +#define I40E_MNGSB_WDATA_DATA_SHIFT 0 +#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT) +#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */ +#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0 +#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT) +#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12 +#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT) +#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16 +#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT) +#define I40E_MNGSB_WHDR0_TAG_SHIFT 24 +#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT) +#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */ +#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0 +#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT) +#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */ +#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0 +#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT) + +#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21 +#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT) + +#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT) +#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT) + +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4 +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT) +#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10 +#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT) +#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */ +#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0 +#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT) +#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1 +#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT) +#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2 +#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT) +#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6 +#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT) +#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16 +#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT) +#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT) +#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0 +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT) +#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0 +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT) +#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0 +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT) +#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0 +#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT) +#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17 +#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT) +#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18 +#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT) +#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */ +#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0 +#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT) +#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT) +#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT) +#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT) +#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT) +#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT) +#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0 +#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26 +#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27 +#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28 +#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29 +#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30 +#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31 +#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT) +#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT) +#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT) +#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0 +#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT) +#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT) +#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT) +#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8 +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1 +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2 +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3 +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31 +#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT) +#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */ +#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */ +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */ +#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */ +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 +#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 +#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */ +#define I40E_PFPE_CQACK_PECQID_SHIFT 0 +#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT) +#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */ +#define I40E_PFPE_CQARM_PECQID_SHIFT 0 +#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT) +#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */ +#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */ +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */ +#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */ +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */ +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */ +#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */ +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */ +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */ +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT) +#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */ +#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0 +#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1 +#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2 +#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3 +#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 +#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT) +#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */ +#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0 +#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT) +#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31 +#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT) +#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */ +#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) +#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */ +#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0 +#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT) +#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */ +#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7 +#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */ +#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0 +#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13 +#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */ +#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0 +#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8 +#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16 +#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24 +#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT) +#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT) +#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13 +#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT) +#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30 +#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT) +#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT) +#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16 +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31 +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT) +#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0 +#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT) +#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8 +#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT) +#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16 +#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT) +#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC_MAX_INDEX 127 +#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW_MAX_INDEX 127 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQACK_MAX_INDEX 127 +#define I40E_VFPE_CQACK_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT) +#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQARM_MAX_INDEX 127 +#define I40E_VFPE_CQARM_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT) +#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPDB_MAX_INDEX 127 +#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL_MAX_INDEX 127 +#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127 +#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC_MAX_INDEX 127 +#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) +#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT) +#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT) +#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT) +#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0 +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT) +#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0 +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT) +#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT) +#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) +#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */ +#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0 +#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT) +#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */ +#define I40E_GLQF_APBVT_MAX_INDEX 2047 +#define I40E_GLQF_APBVT_APBVT_SHIFT 0 +#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT) +#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */ +#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63 +#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0 +#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT) +#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_FD_MSK_MAX_INDEX 1 +#define I40E_GLQF_FD_MSK_MASK_SHIFT 0 +#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT) +#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16 +#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT) +#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_HASH_INSET_MAX_INDEX 1 +#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0 +#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT) +#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_HASH_MSK_MAX_INDEX 1 +#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0 +#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT) +#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16 +#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT) +#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_ORT_MAX_INDEX 63 +#define I40E_GLQF_ORT_PIT_INDX_SHIFT 0 +#define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT) +#define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5 +#define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT) +#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7 +#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) +#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */ +#define I40E_GLQF_PIT_MAX_INDEX 23 +#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0 +#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT) +#define I40E_GLQF_PIT_FSIZE_SHIFT 5 +#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT) +#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10 +#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT) +#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1 +#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0 +#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT) +#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */ +#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0 +#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT) +#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8 +#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT) +#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */ +#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0 +#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT) +#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5 +#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT) +/* Redefined for X722 family */ +#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_X722_PFQF_HLUT_MAX_INDEX 127 +#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0 +#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8 +#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16 +#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24 +#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT) +#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PFQF_HREGION_MAX_INDEX 7 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT) +#define I40E_PFQF_HREGION_REGION_0_SHIFT 1 +#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT) +#define I40E_PFQF_HREGION_REGION_1_SHIFT 5 +#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT) +#define I40E_PFQF_HREGION_REGION_2_SHIFT 9 +#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT) +#define I40E_PFQF_HREGION_REGION_3_SHIFT 13 +#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT) +#define I40E_PFQF_HREGION_REGION_4_SHIFT 17 +#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT) +#define I40E_PFQF_HREGION_REGION_5_SHIFT 21 +#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT) +#define I40E_PFQF_HREGION_REGION_6_SHIFT 25 +#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT) +#define I40E_PFQF_HREGION_REGION_7_SHIFT 29 +#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT) +#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8 +#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT) +#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */ +#define I40E_VSIQF_HKEY_MAX_INDEX 12 +#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0 +#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT) +#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8 +#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT) +#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16 +#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT) +#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24 +#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT) +#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */ +#define I40E_VSIQF_HLUT_MAX_INDEX 15 +#define I40E_VSIQF_HLUT_LUT0_SHIFT 0 +#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT) +#define I40E_VSIQF_HLUT_LUT1_SHIFT 8 +#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT) +#define I40E_VSIQF_HLUT_LUT2_SHIFT 16 +#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT) +#define I40E_VSIQF_HLUT_LUT3_SHIFT 24 +#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT) +#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */ +#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0 +#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT) +#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */ +#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0 +#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT) +#endif /* PF_DRIVER */ +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) +#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ +#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) +#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ +#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) +#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ +#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) + +#endif /* X722_SUPPORT */ +#endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/i40e/base/i40e_status.h b/drivers/net/i40e/base/i40e_status.h new file mode 100644 index 00000000..5632ff2b --- /dev/null +++ b/drivers/net/i40e/base/i40e_status.h @@ -0,0 +1,107 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _I40E_STATUS_H_ +#define _I40E_STATUS_H_ + +/* Error Codes */ +enum i40e_status_code { + I40E_SUCCESS = 0, + I40E_ERR_NVM = -1, + I40E_ERR_NVM_CHECKSUM = -2, + I40E_ERR_PHY = -3, + I40E_ERR_CONFIG = -4, + I40E_ERR_PARAM = -5, + I40E_ERR_MAC_TYPE = -6, + I40E_ERR_UNKNOWN_PHY = -7, + I40E_ERR_LINK_SETUP = -8, + I40E_ERR_ADAPTER_STOPPED = -9, + I40E_ERR_INVALID_MAC_ADDR = -10, + I40E_ERR_DEVICE_NOT_SUPPORTED = -11, + I40E_ERR_MASTER_REQUESTS_PENDING = -12, + I40E_ERR_INVALID_LINK_SETTINGS = -13, + I40E_ERR_AUTONEG_NOT_COMPLETE = -14, + I40E_ERR_RESET_FAILED = -15, + I40E_ERR_SWFW_SYNC = -16, + I40E_ERR_NO_AVAILABLE_VSI = -17, + I40E_ERR_NO_MEMORY = -18, + I40E_ERR_BAD_PTR = -19, + I40E_ERR_RING_FULL = -20, + I40E_ERR_INVALID_PD_ID = -21, + I40E_ERR_INVALID_QP_ID = -22, + I40E_ERR_INVALID_CQ_ID = -23, + I40E_ERR_INVALID_CEQ_ID = -24, + I40E_ERR_INVALID_AEQ_ID = -25, + I40E_ERR_INVALID_SIZE = -26, + I40E_ERR_INVALID_ARP_INDEX = -27, + I40E_ERR_INVALID_FPM_FUNC_ID = -28, + I40E_ERR_QP_INVALID_MSG_SIZE = -29, + I40E_ERR_QP_TOOMANY_WRS_POSTED = -30, + I40E_ERR_INVALID_FRAG_COUNT = -31, + I40E_ERR_QUEUE_EMPTY = -32, + I40E_ERR_INVALID_ALIGNMENT = -33, + I40E_ERR_FLUSHED_QUEUE = -34, + I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35, + I40E_ERR_INVALID_IMM_DATA_SIZE = -36, + I40E_ERR_TIMEOUT = -37, + I40E_ERR_OPCODE_MISMATCH = -38, + I40E_ERR_CQP_COMPL_ERROR = -39, + I40E_ERR_INVALID_VF_ID = -40, + I40E_ERR_INVALID_HMCFN_ID = -41, + I40E_ERR_BACKING_PAGE_ERROR = -42, + I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43, + I40E_ERR_INVALID_PBLE_INDEX = -44, + I40E_ERR_INVALID_SD_INDEX = -45, + I40E_ERR_INVALID_PAGE_DESC_INDEX = -46, + I40E_ERR_INVALID_SD_TYPE = -47, + I40E_ERR_MEMCPY_FAILED = -48, + I40E_ERR_INVALID_HMC_OBJ_INDEX = -49, + I40E_ERR_INVALID_HMC_OBJ_COUNT = -50, + I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51, + I40E_ERR_SRQ_ENABLED = -52, + I40E_ERR_ADMIN_QUEUE_ERROR = -53, + I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54, + I40E_ERR_BUF_TOO_SHORT = -55, + I40E_ERR_ADMIN_QUEUE_FULL = -56, + I40E_ERR_ADMIN_QUEUE_NO_WORK = -57, + I40E_ERR_BAD_IWARP_CQE = -58, + I40E_ERR_NVM_BLANK_MODE = -59, + I40E_ERR_NOT_IMPLEMENTED = -60, + I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61, + I40E_ERR_DIAG_TEST_FAILED = -62, + I40E_ERR_NOT_READY = -63, + I40E_NOT_SUPPORTED = -64, + I40E_ERR_FIRMWARE_API_VERSION = -65, +}; + +#endif /* _I40E_STATUS_H_ */ diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h new file mode 100644 index 00000000..d5ca67af --- /dev/null +++ b/drivers/net/i40e/base/i40e_type.h @@ -0,0 +1,1634 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _I40E_TYPE_H_ +#define _I40E_TYPE_H_ + +#include "i40e_status.h" +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_hmc.h" +#include "i40e_lan_hmc.h" +#include "i40e_devids.h" + +#define UNREFERENCED_XPARAMETER +#define UNREFERENCED_1PARAMETER(_p) (_p); +#define UNREFERENCED_2PARAMETER(_p, _q) (_p); (_q); +#define UNREFERENCED_3PARAMETER(_p, _q, _r) (_p); (_q); (_r); +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) (_p); (_q); (_r); (_s); +#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) (_p); (_q); (_r); (_s); (_t); + +#ifndef LINUX_MACROS +#ifndef BIT +#define BIT(a) (1UL << (a)) +#endif /* BIT */ +#ifndef BIT_ULL +#define BIT_ULL(a) (1ULL << (a)) +#endif /* BIT_ULL */ +#endif /* LINUX_MACROS */ + +#ifndef I40E_MASK +/* I40E_MASK is a macro used on 32 bit registers */ +#define I40E_MASK(mask, shift) (mask << shift) +#endif + +#define I40E_MAX_PF 16 +#define I40E_MAX_PF_VSI 64 +#define I40E_MAX_PF_QP 128 +#define I40E_MAX_VSI_QP 16 +#define I40E_MAX_VF_VSI 3 +#define I40E_MAX_CHAINED_RX_BUFFERS 5 +#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 + +/* something less than 1 minute */ +#define I40E_HEARTBEAT_TIMEOUT (HZ * 50) + +/* Max default timeout in ms, */ +#define I40E_MAX_NVM_TIMEOUT 18000 + +/* Check whether address is multicast. */ +#define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define I40E_IS_BROADCAST(address) \ + ((((u8 *)(address))[0] == ((u8)0xff)) && \ + (((u8 *)(address))[1] == ((u8)0xff))) + +/* Switch from ms to the 1usec global time (this is the GTIME resolution) */ +#define I40E_MS_TO_GTIME(time) ((time) * 1000) + +/* forward declaration */ +struct i40e_hw; +typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); + +#define I40E_ETH_LENGTH_OF_ADDRESS 6 +/* Data type manipulation macros. */ +#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF)) +#define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF)) + +#define I40E_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF)) +#define I40E_LO_WORD(x) ((u16)((x) & 0xFFFF)) + +#define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF)) +#define I40E_LO_BYTE(x) ((u8)((x) & 0xFF)) + +/* Number of Transmit Descriptors must be a multiple of 8. */ +#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8 +/* Number of Receive Descriptors must be a multiple of 32 if + * the number of descriptors is greater than 32. + */ +#define I40E_REQ_RX_DESCRIPTOR_MULTIPLE 32 + +#define I40E_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +/* bitfields for Tx queue mapping in QTX_CTL */ +#define I40E_QTX_CTL_VF_QUEUE 0x0 +#define I40E_QTX_CTL_VM_QUEUE 0x1 +#define I40E_QTX_CTL_PF_QUEUE 0x2 + +/* debug masks - set these bits in hw->debug_mask to control output */ +enum i40e_debug_mask { + I40E_DEBUG_INIT = 0x00000001, + I40E_DEBUG_RELEASE = 0x00000002, + + I40E_DEBUG_LINK = 0x00000010, + I40E_DEBUG_PHY = 0x00000020, + I40E_DEBUG_HMC = 0x00000040, + I40E_DEBUG_NVM = 0x00000080, + I40E_DEBUG_LAN = 0x00000100, + I40E_DEBUG_FLOW = 0x00000200, + I40E_DEBUG_DCB = 0x00000400, + I40E_DEBUG_DIAG = 0x00000800, + I40E_DEBUG_FD = 0x00001000, + + I40E_DEBUG_AQ_MESSAGE = 0x01000000, + I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, + I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, + I40E_DEBUG_AQ_COMMAND = 0x06000000, + I40E_DEBUG_AQ = 0x0F000000, + + I40E_DEBUG_USER = 0xF0000000, + + I40E_DEBUG_ALL = 0xFFFFFFFF +}; + +/* PCI Bus Info */ +#define I40E_PCI_LINK_STATUS 0xB2 +#define I40E_PCI_LINK_WIDTH 0x3F0 +#define I40E_PCI_LINK_WIDTH_1 0x10 +#define I40E_PCI_LINK_WIDTH_2 0x20 +#define I40E_PCI_LINK_WIDTH_4 0x40 +#define I40E_PCI_LINK_WIDTH_8 0x80 +#define I40E_PCI_LINK_SPEED 0xF +#define I40E_PCI_LINK_SPEED_2500 0x1 +#define I40E_PCI_LINK_SPEED_5000 0x2 +#define I40E_PCI_LINK_SPEED_8000 0x3 + +#define I40E_MDIO_STCODE 0 +#define I40E_MDIO_OPCODE_ADDRESS 0 +#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) + +#define I40E_PHY_COM_REG_PAGE 0x1E +#define I40E_PHY_LED_LINK_MODE_MASK 0xF0 +#define I40E_PHY_LED_MANUAL_ON 0x100 +#define I40E_PHY_LED_PROV_REG_1 0xC430 +#define I40E_PHY_LED_MODE_MASK 0xFFFF +#define I40E_PHY_LED_MODE_ORIG 0x80000000 + +/* Memory types */ +enum i40e_memset_type { + I40E_NONDMA_MEM = 0, + I40E_DMA_MEM +}; + +/* Memcpy types */ +enum i40e_memcpy_type { + I40E_NONDMA_TO_NONDMA = 0, + I40E_NONDMA_TO_DMA, + I40E_DMA_TO_DMA, + I40E_DMA_TO_NONDMA +}; + +#ifdef X722_SUPPORT +#define I40E_FW_API_VERSION_MINOR_X722 0x0004 +#endif +#define I40E_FW_API_VERSION_MINOR_X710 0x0005 + + +/* These are structs for managing the hardware information and the operations. + * The structures of function pointers are filled out at init time when we + * know for sure exactly which hardware we're working with. This gives us the + * flexibility of using the same main driver code but adapting to slightly + * different hardware needs as new parts are developed. For this architecture, + * the Firmware and AdminQ are intended to insulate the driver from most of the + * future changes, but these structures will also do part of the job. + */ +enum i40e_mac_type { + I40E_MAC_UNKNOWN = 0, + I40E_MAC_X710, + I40E_MAC_XL710, + I40E_MAC_VF, +#ifdef X722_SUPPORT + I40E_MAC_X722, + I40E_MAC_X722_VF, +#endif + I40E_MAC_GENERIC, +}; + +enum i40e_media_type { + I40E_MEDIA_TYPE_UNKNOWN = 0, + I40E_MEDIA_TYPE_FIBER, + I40E_MEDIA_TYPE_BASET, + I40E_MEDIA_TYPE_BACKPLANE, + I40E_MEDIA_TYPE_CX4, + I40E_MEDIA_TYPE_DA, + I40E_MEDIA_TYPE_VIRTUAL +}; + +enum i40e_fc_mode { + I40E_FC_NONE = 0, + I40E_FC_RX_PAUSE, + I40E_FC_TX_PAUSE, + I40E_FC_FULL, + I40E_FC_PFC, + I40E_FC_DEFAULT +}; + +enum i40e_set_fc_aq_failures { + I40E_SET_FC_AQ_FAIL_NONE = 0, + I40E_SET_FC_AQ_FAIL_GET = 1, + I40E_SET_FC_AQ_FAIL_SET = 2, + I40E_SET_FC_AQ_FAIL_UPDATE = 4, + I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6 +}; + +enum i40e_vsi_type { + I40E_VSI_MAIN = 0, + I40E_VSI_VMDQ1 = 1, + I40E_VSI_VMDQ2 = 2, + I40E_VSI_CTRL = 3, + I40E_VSI_FCOE = 4, + I40E_VSI_MIRROR = 5, + I40E_VSI_SRIOV = 6, + I40E_VSI_FDIR = 7, + I40E_VSI_TYPE_UNKNOWN +}; + +enum i40e_queue_type { + I40E_QUEUE_TYPE_RX = 0, + I40E_QUEUE_TYPE_TX, + I40E_QUEUE_TYPE_PE_CEQ, + I40E_QUEUE_TYPE_UNKNOWN +}; + +struct i40e_link_status { + enum i40e_aq_phy_type phy_type; + enum i40e_aq_link_speed link_speed; + u8 link_info; + u8 an_info; + u8 ext_info; + u8 loopback; + /* is Link Status Event notification to SW enabled */ + bool lse_enable; + u16 max_frame_size; + bool crc_enable; + u8 pacing; + u8 requested_speeds; + u8 module_type[3]; + /* 1st byte: module identifier */ +#define I40E_MODULE_TYPE_SFP 0x03 +#define I40E_MODULE_TYPE_QSFP 0x0D + /* 2nd byte: ethernet compliance codes for 10/40G */ +#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 +#define I40E_MODULE_TYPE_40G_LR4 0x02 +#define I40E_MODULE_TYPE_40G_SR4 0x04 +#define I40E_MODULE_TYPE_40G_CR4 0x08 +#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 +#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 +#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 +#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 + /* 3rd byte: ethernet compliance codes for 1G */ +#define I40E_MODULE_TYPE_1000BASE_SX 0x01 +#define I40E_MODULE_TYPE_1000BASE_LX 0x02 +#define I40E_MODULE_TYPE_1000BASE_CX 0x04 +#define I40E_MODULE_TYPE_1000BASE_T 0x08 +}; + +enum i40e_aq_capabilities_phy_type { + I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII), + I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX), + I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4), + I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR), + I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4), + I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI), + I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI), + I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI), + I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI), + I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI), + I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU), + I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU), + I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC), + I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC), + I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX), + I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T), + I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T), + I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR), + I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR), + I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU), + I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1), + I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4), + I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4), + I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4), + I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX), + I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX), + I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL), + I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2) +}; + +struct i40e_phy_info { + struct i40e_link_status link_info; + struct i40e_link_status link_info_old; + bool get_link_info; + enum i40e_media_type media_type; + /* all the phy types the NVM is capable of */ + u32 phy_types; +}; + +#define I40E_HW_CAP_MAX_GPIO 30 +#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0 +#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1 + +#ifdef X722_SUPPORT +enum i40e_acpi_programming_method { + I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0, + I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1 +}; + +#define I40E_WOL_SUPPORT_MASK 1 +#define I40E_ACPI_PROGRAMMING_METHOD_MASK (1 << 1) +#define I40E_PROXY_SUPPORT_MASK (1 << 2) + +#endif +/* Capabilities of a PF or a VF or the whole device */ +struct i40e_hw_capabilities { + u32 switch_mode; +#define I40E_NVM_IMAGE_TYPE_EVB 0x0 +#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 +#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 + + u32 management_mode; + u32 npar_enable; + u32 os2bmc; + u32 valid_functions; + bool sr_iov_1_1; + bool vmdq; + bool evb_802_1_qbg; /* Edge Virtual Bridging */ + bool evb_802_1_qbh; /* Bridge Port Extension */ + bool dcb; + bool fcoe; + bool iscsi; /* Indicates iSCSI enabled */ + bool flex10_enable; + bool flex10_capable; + u32 flex10_mode; +#define I40E_FLEX10_MODE_UNKNOWN 0x0 +#define I40E_FLEX10_MODE_DCC 0x1 +#define I40E_FLEX10_MODE_DCI 0x2 + + u32 flex10_status; +#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 +#define I40E_FLEX10_STATUS_VC_MODE 0x2 + + bool mgmt_cem; + bool ieee_1588; + bool iwarp; + bool fd; + u32 fd_filters_guaranteed; + u32 fd_filters_best_effort; + bool rss; + u32 rss_table_size; + u32 rss_table_entry_width; + bool led[I40E_HW_CAP_MAX_GPIO]; + bool sdp[I40E_HW_CAP_MAX_GPIO]; + u32 nvm_image_type; + u32 num_flow_director_filters; + u32 num_vfs; + u32 vf_base_id; + u32 num_vsis; + u32 num_rx_qp; + u32 num_tx_qp; + u32 base_queue; + u32 num_msix_vectors; + u32 num_msix_vectors_vf; + u32 led_pin_num; + u32 sdp_pin_num; + u32 mdio_port_num; + u32 mdio_port_mode; + u8 rx_buf_chain_len; + u32 enabled_tcmap; + u32 maxtc; + u64 wr_csr_prot; +#ifdef X722_SUPPORT + bool apm_wol_support; + enum i40e_acpi_programming_method acpi_prog_method; + bool proxy_support; +#endif +}; + +struct i40e_mac_info { + enum i40e_mac_type type; + u8 addr[I40E_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS]; + u8 port_addr[I40E_ETH_LENGTH_OF_ADDRESS]; + u16 max_fcoeq; +}; + +enum i40e_aq_resources_ids { + I40E_NVM_RESOURCE_ID = 1 +}; + +enum i40e_aq_resource_access_type { + I40E_RESOURCE_READ = 1, + I40E_RESOURCE_WRITE +}; + +struct i40e_nvm_info { + u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ + u32 timeout; /* [ms] */ + u16 sr_size; /* Shadow RAM size in words */ + bool blank_nvm_mode; /* is NVM empty (no FW present)*/ + u16 version; /* NVM package version */ + u32 eetrack; /* NVM data version */ + u32 oem_ver; /* OEM version info */ +}; + +/* definitions used in NVM update support */ + +enum i40e_nvmupd_cmd { + I40E_NVMUPD_INVALID, + I40E_NVMUPD_READ_CON, + I40E_NVMUPD_READ_SNT, + I40E_NVMUPD_READ_LCB, + I40E_NVMUPD_READ_SA, + I40E_NVMUPD_WRITE_ERA, + I40E_NVMUPD_WRITE_CON, + I40E_NVMUPD_WRITE_SNT, + I40E_NVMUPD_WRITE_LCB, + I40E_NVMUPD_WRITE_SA, + I40E_NVMUPD_CSUM_CON, + I40E_NVMUPD_CSUM_SA, + I40E_NVMUPD_CSUM_LCB, + I40E_NVMUPD_STATUS, + I40E_NVMUPD_EXEC_AQ, + I40E_NVMUPD_GET_AQ_RESULT, +}; + +enum i40e_nvmupd_state { + I40E_NVMUPD_STATE_INIT, + I40E_NVMUPD_STATE_READING, + I40E_NVMUPD_STATE_WRITING, + I40E_NVMUPD_STATE_INIT_WAIT, + I40E_NVMUPD_STATE_WRITE_WAIT, +}; + +/* nvm_access definition and its masks/shifts need to be accessible to + * application, core driver, and shared code. Where is the right file? + */ +#define I40E_NVM_READ 0xB +#define I40E_NVM_WRITE 0xC + +#define I40E_NVM_MOD_PNT_MASK 0xFF + +#define I40E_NVM_TRANS_SHIFT 8 +#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) +#define I40E_NVM_CON 0x0 +#define I40E_NVM_SNT 0x1 +#define I40E_NVM_LCB 0x2 +#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) +#define I40E_NVM_ERA 0x4 +#define I40E_NVM_CSUM 0x8 +#define I40E_NVM_EXEC 0xf + +#define I40E_NVM_ADAPT_SHIFT 16 +#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT) + +#define I40E_NVMUPD_MAX_DATA 4096 +#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ + +struct i40e_nvm_access { + u32 command; + u32 config; + u32 offset; /* in bytes */ + u32 data_size; /* in bytes */ + u8 data[1]; +}; + +/* PCI bus types */ +enum i40e_bus_type { + i40e_bus_type_unknown = 0, + i40e_bus_type_pci, + i40e_bus_type_pcix, + i40e_bus_type_pci_express, + i40e_bus_type_reserved +}; + +/* PCI bus speeds */ +enum i40e_bus_speed { + i40e_bus_speed_unknown = 0, + i40e_bus_speed_33 = 33, + i40e_bus_speed_66 = 66, + i40e_bus_speed_100 = 100, + i40e_bus_speed_120 = 120, + i40e_bus_speed_133 = 133, + i40e_bus_speed_2500 = 2500, + i40e_bus_speed_5000 = 5000, + i40e_bus_speed_8000 = 8000, + i40e_bus_speed_reserved +}; + +/* PCI bus widths */ +enum i40e_bus_width { + i40e_bus_width_unknown = 0, + i40e_bus_width_pcie_x1 = 1, + i40e_bus_width_pcie_x2 = 2, + i40e_bus_width_pcie_x4 = 4, + i40e_bus_width_pcie_x8 = 8, + i40e_bus_width_32 = 32, + i40e_bus_width_64 = 64, + i40e_bus_width_reserved +}; + +/* Bus parameters */ +struct i40e_bus_info { + enum i40e_bus_speed speed; + enum i40e_bus_width width; + enum i40e_bus_type type; + + u16 func; + u16 device; + u16 lan_id; +}; + +/* Flow control (FC) parameters */ +struct i40e_fc_info { + enum i40e_fc_mode current_mode; /* FC mode in effect */ + enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +#define I40E_MAX_TRAFFIC_CLASS 8 +#define I40E_MAX_USER_PRIORITY 8 +#define I40E_DCBX_MAX_APPS 32 +#define I40E_LLDPDU_SIZE 1500 +#define I40E_TLV_STATUS_OPER 0x1 +#define I40E_TLV_STATUS_SYNC 0x2 +#define I40E_TLV_STATUS_ERR 0x4 +#define I40E_CEE_OPER_MAX_APPS 3 +#define I40E_APP_PROTOID_FCOE 0x8906 +#define I40E_APP_PROTOID_ISCSI 0x0cbc +#define I40E_APP_PROTOID_FIP 0x8914 +#define I40E_APP_SEL_ETHTYPE 0x1 +#define I40E_APP_SEL_TCPIP 0x2 +#define I40E_CEE_APP_SEL_ETHTYPE 0x0 +#define I40E_CEE_APP_SEL_TCPIP 0x1 + +/* CEE or IEEE 802.1Qaz ETS Configuration data */ +struct i40e_dcb_ets_config { + u8 willing; + u8 cbs; + u8 maxtcs; + u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; + u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* CEE or IEEE 802.1Qaz PFC Configuration data */ +struct i40e_dcb_pfc_config { + u8 willing; + u8 mbc; + u8 pfccap; + u8 pfcenable; +}; + +/* CEE or IEEE 802.1Qaz Application Priority data */ +struct i40e_dcb_app_priority_table { + u8 priority; + u8 selector; + u16 protocolid; +}; + +struct i40e_dcbx_config { + u8 dcbx_mode; +#define I40E_DCBX_MODE_CEE 0x1 +#define I40E_DCBX_MODE_IEEE 0x2 + u8 app_mode; +#define I40E_DCBX_APPS_NON_WILLING 0x1 + u32 numapps; + u32 tlv_status; /* CEE mode TLV status */ + struct i40e_dcb_ets_config etscfg; + struct i40e_dcb_ets_config etsrec; + struct i40e_dcb_pfc_config pfc; + struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS]; +}; + +/* Port hardware description */ +struct i40e_hw { + u8 *hw_addr; + void *back; + + /* subsystem structs */ + struct i40e_phy_info phy; + struct i40e_mac_info mac; + struct i40e_bus_info bus; + struct i40e_nvm_info nvm; + struct i40e_fc_info fc; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 port; + bool adapter_stopped; + + /* capabilities for entire device and PCI func */ + struct i40e_hw_capabilities dev_caps; + struct i40e_hw_capabilities func_caps; + + /* Flow Director shared filter space */ + u16 fdir_shared_filter_count; + + /* device profile info */ + u8 pf_id; + u16 main_vsi_seid; + + /* for multi-function MACs */ + u16 partition_id; + u16 num_partitions; + u16 num_ports; + + /* Closest numa node to the device */ + u16 numa_node; + + /* Admin Queue info */ + struct i40e_adminq_info aq; + + /* state of nvm update process */ + enum i40e_nvmupd_state nvmupd_state; + struct i40e_aq_desc nvm_wb_desc; + struct i40e_virt_mem nvm_buff; + + /* HMC info */ + struct i40e_hmc_info hmc; /* HMC info struct */ + + /* LLDP/DCBX Status */ + u16 dcbx_status; + + /* DCBX info */ + struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ + struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ + struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ + +#ifdef X722_SUPPORT + /* WoL and proxy support */ + u16 num_wol_proxy_filters; + u16 wol_proxy_vsi_seid; + +#endif +#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) + u64 flags; + + /* debug mask */ + u32 debug_mask; +#ifndef I40E_NDIS_SUPPORT + char err_str[16]; +#endif /* I40E_NDIS_SUPPORT */ +}; + +STATIC INLINE bool i40e_is_vf(struct i40e_hw *hw) +{ +#ifdef X722_SUPPORT + return (hw->mac.type == I40E_MAC_VF || + hw->mac.type == I40E_MAC_X722_VF); +#else + return hw->mac.type == I40E_MAC_VF; +#endif +} + +struct i40e_driver_version { + u8 major_version; + u8 minor_version; + u8 build_version; + u8 subbuild_version; + u8 driver_string[32]; +}; + +/* RX Descriptors */ +union i40e_16byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow director filter id */ + __le32 fcoe_param; /* FCoE DDP Context id */ + } hi_dword; + } qword0; + struct { + /* ext status/error/pktype/length */ + __le64 status_error_len; + } qword1; + } wb; /* writeback */ +}; + +union i40e_32byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_buffer_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fcoe_param; /* FCoE DDP Context id */ + /* Flow director filter id in case of + * Programming status desc WB + */ + __le32 fd_id; + } hi_dword; + } qword0; + struct { + /* status/error/pktype/length */ + __le64 status_error_len; + } qword1; + struct { + __le16 ext_status; /* extended status */ + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + union { + __le32 flex_bytes_lo; + __le32 pe_status; + } lo_dword; + union { + __le32 flex_bytes_hi; + __le32 fd_id; + } hi_dword; + } qword3; + } wb; /* writeback */ +}; + +#define I40E_RXD_QW0_MIRROR_STATUS_SHIFT 8 +#define I40E_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \ + I40E_RXD_QW0_MIRROR_STATUS_SHIFT) +#define I40E_RXD_QW0_FCOEINDX_SHIFT 0 +#define I40E_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \ + I40E_RXD_QW0_FCOEINDX_SHIFT) + +enum i40e_rx_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_STATUS_DD_SHIFT = 0, + I40E_RX_DESC_STATUS_EOF_SHIFT = 1, + I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, + I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, + I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, + I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ + I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, +#ifdef X722_SUPPORT + I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, +#else + I40E_RX_DESC_STATUS_RESERVED1_SHIFT = 8, +#endif + + I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ + I40E_RX_DESC_STATUS_FLM_SHIFT = 11, + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ + I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, + I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, + I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */ +#ifdef X722_SUPPORT + I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, +#else + I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18, +#endif + I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ +}; + +#define I40E_RXD_QW1_STATUS_SHIFT 0 +#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \ + I40E_RXD_QW1_STATUS_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) + +#define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT I40E_RX_DESC_STATUS_UMBCAST +#define I40E_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \ + I40E_RXD_QW1_STATUS_UMBCAST_SHIFT) + +enum i40e_rx_desc_fltstat_values { + I40E_RX_DESC_FLTSTAT_NO_DATA = 0, + I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ + I40E_RX_DESC_FLTSTAT_RSV = 2, + I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, +}; + +#define I40E_RXD_PACKET_TYPE_UNICAST 0 +#define I40E_RXD_PACKET_TYPE_MULTICAST 1 +#define I40E_RXD_PACKET_TYPE_BROADCAST 2 +#define I40E_RXD_PACKET_TYPE_MIRRORED 3 + +#define I40E_RXD_QW1_ERROR_SHIFT 19 +#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) + +enum i40e_rx_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_ERROR_RXE_SHIFT = 0, + I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, + I40E_RX_DESC_ERROR_HBO_SHIFT = 2, + I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ + I40E_RX_DESC_ERROR_IPE_SHIFT = 3, + I40E_RX_DESC_ERROR_L4E_SHIFT = 4, + I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, + I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, + I40E_RX_DESC_ERROR_PPRS_SHIFT = 7 +}; + +enum i40e_rx_desc_error_l3l4e_fcoe_masks { + I40E_RX_DESC_ERROR_L3L4E_NONE = 0, + I40E_RX_DESC_ERROR_L3L4E_PROT = 1, + I40E_RX_DESC_ERROR_L3L4E_FC = 2, + I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, + I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 +}; + +#define I40E_RXD_QW1_PTYPE_SHIFT 30 +#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) + +/* Packet type non-ip values */ +enum i40e_rx_l2_ptype { + I40E_RX_PTYPE_L2_RESERVED = 0, + I40E_RX_PTYPE_L2_MAC_PAY2 = 1, + I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, + I40E_RX_PTYPE_L2_FIP_PAY2 = 3, + I40E_RX_PTYPE_L2_OUI_PAY2 = 4, + I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, + I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, + I40E_RX_PTYPE_L2_ECP_PAY2 = 7, + I40E_RX_PTYPE_L2_EVB_PAY2 = 8, + I40E_RX_PTYPE_L2_QCN_PAY2 = 9, + I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, + I40E_RX_PTYPE_L2_ARP = 11, + I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, + I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, + I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, + I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, + I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, + I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, + I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, + I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, + I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, + I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, + I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, + I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 +}; + +struct i40e_rx_ptype_decoded { + u32 ptype:8; + u32 known:1; + u32 outer_ip:1; + u32 outer_ip_ver:1; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:4; + u32 payload_layer:3; +}; + +enum i40e_rx_ptype_outer_ip { + I40E_RX_PTYPE_OUTER_L2 = 0, + I40E_RX_PTYPE_OUTER_IP = 1 +}; + +enum i40e_rx_ptype_outer_ip_ver { + I40E_RX_PTYPE_OUTER_NONE = 0, + I40E_RX_PTYPE_OUTER_IPV4 = 0, + I40E_RX_PTYPE_OUTER_IPV6 = 1 +}; + +enum i40e_rx_ptype_outer_fragmented { + I40E_RX_PTYPE_NOT_FRAG = 0, + I40E_RX_PTYPE_FRAG = 1 +}; + +enum i40e_rx_ptype_tunnel_type { + I40E_RX_PTYPE_TUNNEL_NONE = 0, + I40E_RX_PTYPE_TUNNEL_IP_IP = 1, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, +}; + +enum i40e_rx_ptype_tunnel_end_prot { + I40E_RX_PTYPE_TUNNEL_END_NONE = 0, + I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, + I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum i40e_rx_ptype_inner_prot { + I40E_RX_PTYPE_INNER_PROT_NONE = 0, + I40E_RX_PTYPE_INNER_PROT_UDP = 1, + I40E_RX_PTYPE_INNER_PROT_TCP = 2, + I40E_RX_PTYPE_INNER_PROT_SCTP = 3, + I40E_RX_PTYPE_INNER_PROT_ICMP = 4, + I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 +}; + +enum i40e_rx_ptype_payload_layer { + I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +#define I40E_RX_PTYPE_BIT_MASK 0x0FFFFFFF +#define I40E_RX_PTYPE_SHIFT 56 + +#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 +#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ + I40E_RXD_QW1_LENGTH_PBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 +#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ + I40E_RXD_QW1_LENGTH_HBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 +#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) + +#define I40E_RXD_QW1_NEXTP_SHIFT 38 +#define I40E_RXD_QW1_NEXTP_MASK (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT) + +#define I40E_RXD_QW2_EXT_STATUS_SHIFT 0 +#define I40E_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \ + I40E_RXD_QW2_EXT_STATUS_SHIFT) + +enum i40e_rx_desc_ext_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, + I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, + I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, + I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, + I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, +}; + +#define I40E_RXD_QW2_L2TAG2_SHIFT 0 +#define I40E_RXD_QW2_L2TAG2_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG2_SHIFT) + +#define I40E_RXD_QW2_L2TAG3_SHIFT 16 +#define I40E_RXD_QW2_L2TAG3_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG3_SHIFT) + +enum i40e_rx_desc_pe_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ + I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ + I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ + I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, + I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, + I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, + I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, + I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, + I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 +}; + +#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 +#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 + +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ + I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) + +#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0 +#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \ + I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT) + +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) + +enum i40e_rx_prog_status_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ +}; + +enum i40e_rx_prog_status_desc_prog_id_masks { + I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, +}; + +enum i40e_rx_prog_status_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 +}; + +#define I40E_TWO_BIT_MASK 0x3 +#define I40E_THREE_BIT_MASK 0x7 +#define I40E_FOUR_BIT_MASK 0xF +#define I40E_EIGHTEEN_BIT_MASK 0x3FFFF + +/* TX Descriptor */ +struct i40e_tx_desc { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le64 cmd_type_offset_bsz; +}; + +#define I40E_TXD_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) + +enum i40e_tx_desc_dtype_value { + I40E_TX_DESC_DTYPE_DATA = 0x0, + I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ + I40E_TX_DESC_DTYPE_CONTEXT = 0x1, + I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, + I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, + I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, + I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, + I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, + I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, + I40E_TX_DESC_DTYPE_DESC_DONE = 0xF +}; + +#define I40E_TXD_QW1_CMD_SHIFT 4 +#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) + +enum i40e_tx_desc_cmd_bits { + I40E_TX_DESC_CMD_EOP = 0x0001, + I40E_TX_DESC_CMD_RS = 0x0002, + I40E_TX_DESC_CMD_ICRC = 0x0004, + I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, + I40E_TX_DESC_CMD_DUMMY = 0x0010, + I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ + I40E_TX_DESC_CMD_FCOET = 0x0080, + I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ +}; + +#define I40E_TXD_QW1_OFFSET_SHIFT 16 +#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ + I40E_TXD_QW1_OFFSET_SHIFT) + +enum i40e_tx_desc_length_fields { + /* Note: These are predefined bit offsets */ + I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ + I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ +}; + +#define I40E_TXD_QW1_MACLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_MACLEN_SHIFT) +#define I40E_TXD_QW1_IPLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_IPLEN_SHIFT) +#define I40E_TXD_QW1_L4LEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) +#define I40E_TXD_QW1_FCLEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) + +#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 +#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ + I40E_TXD_QW1_TX_BUF_SZ_SHIFT) + +#define I40E_TXD_QW1_L2TAG1_SHIFT 48 +#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) + +/* Context descriptors */ +struct i40e_tx_context_desc { + __le32 tunneling_params; + __le16 l2tag2; + __le16 rsvd; + __le64 type_cmd_tso_mss; +}; + +#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) + +#define I40E_TXD_CTX_QW1_CMD_SHIFT 4 +#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) + +enum i40e_tx_ctx_desc_cmd_bits { + I40E_TX_CTX_DESC_TSO = 0x01, + I40E_TX_CTX_DESC_TSYN = 0x02, + I40E_TX_CTX_DESC_IL2TAG2 = 0x04, + I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, + I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, + I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, + I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, + I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, + I40E_TX_CTX_DESC_SWPE = 0x40 +}; + +#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 +#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) + +#define I40E_TXD_CTX_QW1_MSS_SHIFT 50 +#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ + I40E_TXD_CTX_QW1_MSS_SHIFT) + +#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 +#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) + +#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 +#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ + I40E_TXD_CTX_QW0_EXT_IP_SHIFT) + +enum i40e_tx_ctx_desc_eipt_offload { + I40E_TX_CTX_EXT_IP_NONE = 0x0, + I40E_TX_CTX_EXT_IP_IPV6 = 0x1, + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, + I40E_TX_CTX_EXT_IP_IPV4 = 0x3 +}; + +#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 +#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_NATT_SHIFT 9 +#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) +#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) + +#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK + +#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 +#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ + I40E_TXD_CTX_QW0_NATLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 +#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ + I40E_TXD_CTX_QW0_DECTTL_SHIFT) + +#ifdef X722_SUPPORT +#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 +#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) +#endif +struct i40e_nop_desc { + __le64 rsvd; + __le64 dtype_cmd; +}; + +#define I40E_TXD_NOP_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_NOP_QW1_DTYPE_MASK (0xFUL << I40E_TXD_NOP_QW1_DTYPE_SHIFT) + +#define I40E_TXD_NOP_QW1_CMD_SHIFT 4 +#define I40E_TXD_NOP_QW1_CMD_MASK (0x7FUL << I40E_TXD_NOP_QW1_CMD_SHIFT) + +enum i40e_tx_nop_desc_cmd_bits { + /* Note: These are predefined bit offsets */ + I40E_TX_NOP_DESC_EOP_SHIFT = 0, + I40E_TX_NOP_DESC_RS_SHIFT = 1, + I40E_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */ +}; + +struct i40e_filter_program_desc { + __le32 qindex_flex_ptype_vsi; + __le32 rsvd; + __le32 dtype_cmd_cntindex; + __le32 fd_id; +}; +#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 +#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ + I40E_TXD_FLTR_QW0_QINDEX_SHIFT) +#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 +#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ + I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) +#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 +#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) + +/* Packet Classifier Types for filters */ +enum i40e_filter_pctype { +#ifdef X722_SUPPORT + /* Note: Values 0-28 are reserved for future use. + * Value 29, 30, 32 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, +#else + /* Note: Values 0-30 are reserved for future use */ +#endif + I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, +#ifdef X722_SUPPORT + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, +#else + /* Note: Value 32 is reserved for future use */ +#endif + I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, + I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, + I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, +#ifdef X722_SUPPORT + /* Note: Values 37-38 are reserved for future use. + * Value 39, 40, 42 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, +#else + /* Note: Values 37-40 are reserved for future use */ +#endif + I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, +#ifdef X722_SUPPORT + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, +#endif + I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, + I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, + I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, + /* Note: Value 47 is reserved for future use */ + I40E_FILTER_PCTYPE_FCOE_OX = 48, + I40E_FILTER_PCTYPE_FCOE_RX = 49, + I40E_FILTER_PCTYPE_FCOE_OTHER = 50, + /* Note: Values 51-62 are reserved for future use */ + I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, +}; + +enum i40e_filter_program_desc_dest { + I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, +}; + +enum i40e_filter_program_desc_fd_status { + I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, +}; + +#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) + +#define I40E_TXD_FLTR_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << I40E_TXD_FLTR_QW1_DTYPE_SHIFT) + +#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 +#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) + +#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) + +enum i40e_filter_program_desc_pcmd { + I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, + I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, +}; + +#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) + +#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) + +#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ + I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) +#ifdef X722_SUPPORT + +#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) +#endif + +#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 +#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) + +enum i40e_filter_type { + I40E_FLOW_DIRECTOR_FLTR = 0, + I40E_PE_QUAD_HASH_FLTR = 1, + I40E_ETHERTYPE_FLTR, + I40E_FCOE_CTX_FLTR, + I40E_MAC_VLAN_FLTR, + I40E_HASH_FLTR +}; + +struct i40e_vsi_context { + u16 seid; + u16 uplink_seid; + u16 vsi_number; + u16 vsis_allocated; + u16 vsis_unallocated; + u16 flags; + u8 pf_num; + u8 vf_num; + u8 connection_type; + struct i40e_aqc_vsi_properties_data info; +}; + +struct i40e_veb_context { + u16 seid; + u16 uplink_seid; + u16 veb_number; + u16 vebs_allocated; + u16 vebs_unallocated; + u16 flags; + struct i40e_aqc_get_veb_parameters_completion info; +}; + +/* Statistics collected by each port, VSI, VEB, and S-channel */ +struct i40e_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; + +/* Statistics collected per VEB per TC */ +struct i40e_veb_tc_stats { + u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* Statistics collected by the MAC */ +struct i40e_hw_port_stats { + /* eth stats collected by the port */ + struct i40e_eth_stats eth; + + /* additional port specific stats */ + u64 tx_dropped_link_down; /* tdold */ + u64 crc_errors; /* crcerrs */ + u64 illegal_bytes; /* illerrc */ + u64 error_bytes; /* errbc */ + u64 mac_local_faults; /* mlfc */ + u64 mac_remote_faults; /* mrfc */ + u64 rx_length_errors; /* rlec */ + u64 link_xon_rx; /* lxonrxc */ + u64 link_xoff_rx; /* lxoffrxc */ + u64 priority_xon_rx[8]; /* pxonrxc[8] */ + u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ + u64 link_xon_tx; /* lxontxc */ + u64 link_xoff_tx; /* lxofftxc */ + u64 priority_xon_tx[8]; /* pxontxc[8] */ + u64 priority_xoff_tx[8]; /* pxofftxc[8] */ + u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ + u64 rx_size_64; /* prc64 */ + u64 rx_size_127; /* prc127 */ + u64 rx_size_255; /* prc255 */ + u64 rx_size_511; /* prc511 */ + u64 rx_size_1023; /* prc1023 */ + u64 rx_size_1522; /* prc1522 */ + u64 rx_size_big; /* prc9522 */ + u64 rx_undersize; /* ruc */ + u64 rx_fragments; /* rfc */ + u64 rx_oversize; /* roc */ + u64 rx_jabber; /* rjc */ + u64 tx_size_64; /* ptc64 */ + u64 tx_size_127; /* ptc127 */ + u64 tx_size_255; /* ptc255 */ + u64 tx_size_511; /* ptc511 */ + u64 tx_size_1023; /* ptc1023 */ + u64 tx_size_1522; /* ptc1522 */ + u64 tx_size_big; /* ptc9522 */ + u64 mac_short_packet_dropped; /* mspdc */ + u64 checksum_error; /* xec */ + /* flow director stats */ + u64 fd_atr_match; + u64 fd_sb_match; + u64 fd_atr_tunnel_match; + u32 fd_atr_status; + u32 fd_sb_status; + /* EEE LPI */ + u32 tx_lpi_status; + u32 rx_lpi_status; + u64 tx_lpi_count; /* etlpic */ + u64 rx_lpi_count; /* erlpic */ +}; + +/* Checksum and Shadow RAM pointers */ +#define I40E_SR_NVM_CONTROL_WORD 0x00 +#define I40E_SR_PCIE_ANALOG_CONFIG_PTR 0x03 +#define I40E_SR_PHY_ANALOG_CONFIG_PTR 0x04 +#define I40E_SR_OPTION_ROM_PTR 0x05 +#define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06 +#define I40E_SR_AUTO_GENERATED_POINTERS_PTR 0x07 +#define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08 +#define I40E_SR_EMP_GLOBAL_MODULE_PTR 0x09 +#define I40E_SR_RO_PCIE_LCB_PTR 0x0A +#define I40E_SR_EMP_IMAGE_PTR 0x0B +#define I40E_SR_PE_IMAGE_PTR 0x0C +#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D +#define I40E_SR_MNG_CONFIG_PTR 0x0E +#define I40E_SR_EMP_MODULE_PTR 0x0F +#define I40E_SR_PBA_FLAGS 0x15 +#define I40E_SR_PBA_BLOCK_PTR 0x16 +#define I40E_SR_BOOT_CONFIG_PTR 0x17 +#define I40E_NVM_OEM_VER_OFF 0x83 +#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 +#define I40E_SR_NVM_WAKE_ON_LAN 0x19 +#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 +#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28 +#define I40E_SR_NVM_MAP_VERSION 0x29 +#define I40E_SR_NVM_IMAGE_VERSION 0x2A +#define I40E_SR_NVM_STRUCTURE_VERSION 0x2B +#define I40E_SR_NVM_EETRACK_LO 0x2D +#define I40E_SR_NVM_EETRACK_HI 0x2E +#define I40E_SR_VPD_PTR 0x2F +#define I40E_SR_PXE_SETUP_PTR 0x30 +#define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31 +#define I40E_SR_NVM_ORIGINAL_EETRACK_LO 0x34 +#define I40E_SR_NVM_ORIGINAL_EETRACK_HI 0x35 +#define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37 +#define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38 +#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A +#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B +#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C +#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E +#define I40E_SR_SW_CHECKSUM_WORD 0x3F +#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40 +#define I40E_SR_4TH_FREE_PROVISION_AREA_PTR 0x42 +#define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44 +#define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46 +#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48 +#define I40E_SR_FEATURE_CONFIGURATION_PTR 0x49 +#define I40E_SR_CONFIGURATION_METADATA_PTR 0x4D +#define I40E_SR_IMMEDIATE_VALUES_PTR 0x4E + +/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ +#define I40E_SR_VPD_MODULE_MAX_SIZE 1024 +#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 +#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 +#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) + +/* Shadow RAM related */ +#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 +#define I40E_SR_BUF_ALIGNMENT 4096 +#define I40E_SR_WORDS_IN_1KB 512 +/* Checksum should be calculated such that after adding all the words, + * including the checksum word itself, the sum should be 0xBABA. + */ +#define I40E_SR_SW_CHECKSUM_BASE 0xBABA + +#define I40E_SRRD_SRCTL_ATTEMPTS 100000 + +enum i40e_switch_element_types { + I40E_SWITCH_ELEMENT_TYPE_MAC = 1, + I40E_SWITCH_ELEMENT_TYPE_PF = 2, + I40E_SWITCH_ELEMENT_TYPE_VF = 3, + I40E_SWITCH_ELEMENT_TYPE_EMP = 4, + I40E_SWITCH_ELEMENT_TYPE_BMC = 6, + I40E_SWITCH_ELEMENT_TYPE_PE = 16, + I40E_SWITCH_ELEMENT_TYPE_VEB = 17, + I40E_SWITCH_ELEMENT_TYPE_PA = 18, + I40E_SWITCH_ELEMENT_TYPE_VSI = 19, +}; + +/* Supported EtherType filters */ +enum i40e_ether_type_index { + I40E_ETHER_TYPE_1588 = 0, + I40E_ETHER_TYPE_FIP = 1, + I40E_ETHER_TYPE_OUI_EXTENDED = 2, + I40E_ETHER_TYPE_MAC_CONTROL = 3, + I40E_ETHER_TYPE_LLDP = 4, + I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, + I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, + I40E_ETHER_TYPE_QCN_CNM = 7, + I40E_ETHER_TYPE_8021X = 8, + I40E_ETHER_TYPE_ARP = 9, + I40E_ETHER_TYPE_RSV1 = 10, + I40E_ETHER_TYPE_RSV2 = 11, +}; + +/* Filter context base size is 1K */ +#define I40E_HASH_FILTER_BASE_SIZE 1024 +/* Supported Hash filter values */ +enum i40e_hash_filter_size { + I40E_HASH_FILTER_SIZE_1K = 0, + I40E_HASH_FILTER_SIZE_2K = 1, + I40E_HASH_FILTER_SIZE_4K = 2, + I40E_HASH_FILTER_SIZE_8K = 3, + I40E_HASH_FILTER_SIZE_16K = 4, + I40E_HASH_FILTER_SIZE_32K = 5, + I40E_HASH_FILTER_SIZE_64K = 6, + I40E_HASH_FILTER_SIZE_128K = 7, + I40E_HASH_FILTER_SIZE_256K = 8, + I40E_HASH_FILTER_SIZE_512K = 9, + I40E_HASH_FILTER_SIZE_1M = 10, +}; + +/* DMA context base size is 0.5K */ +#define I40E_DMA_CNTX_BASE_SIZE 512 +/* Supported DMA context values */ +enum i40e_dma_cntx_size { + I40E_DMA_CNTX_SIZE_512 = 0, + I40E_DMA_CNTX_SIZE_1K = 1, + I40E_DMA_CNTX_SIZE_2K = 2, + I40E_DMA_CNTX_SIZE_4K = 3, + I40E_DMA_CNTX_SIZE_8K = 4, + I40E_DMA_CNTX_SIZE_16K = 5, + I40E_DMA_CNTX_SIZE_32K = 6, + I40E_DMA_CNTX_SIZE_64K = 7, + I40E_DMA_CNTX_SIZE_128K = 8, + I40E_DMA_CNTX_SIZE_256K = 9, +}; + +/* Supported Hash look up table (LUT) sizes */ +enum i40e_hash_lut_size { + I40E_HASH_LUT_SIZE_128 = 0, + I40E_HASH_LUT_SIZE_512 = 1, +}; + +/* Structure to hold a per PF filter control settings */ +struct i40e_filter_control_settings { + /* number of PE Quad Hash filter buckets */ + enum i40e_hash_filter_size pe_filt_num; + /* number of PE Quad Hash contexts */ + enum i40e_dma_cntx_size pe_cntx_num; + /* number of FCoE filter buckets */ + enum i40e_hash_filter_size fcoe_filt_num; + /* number of FCoE DDP contexts */ + enum i40e_dma_cntx_size fcoe_cntx_num; + /* size of the Hash LUT */ + enum i40e_hash_lut_size hash_lut_size; + /* enable FDIR filters for PF and its VFs */ + bool enable_fdir; + /* enable Ethertype filters for PF and its VFs */ + bool enable_ethtype; + /* enable MAC/VLAN filters for PF and its VFs */ + bool enable_macvlan; +}; + +/* Structure to hold device level control filter counts */ +struct i40e_control_filter_stats { + u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ + u16 etype_used; /* Used perfect EtherType filters */ + u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ + u16 etype_free; /* Un-used perfect EtherType filters */ +}; + +enum i40e_reset_type { + I40E_RESET_POR = 0, + I40E_RESET_CORER = 1, + I40E_RESET_GLOBR = 2, + I40E_RESET_EMPR = 3, +}; + +/* IEEE 802.1AB LLDP Agent Variables from NVM */ +#define I40E_NVM_LLDP_CFG_PTR 0xD +struct i40e_lldp_variables { + u16 length; + u16 adminstatus; + u16 msgfasttx; + u16 msgtxinterval; + u16 txparams; + u16 timers; + u16 crc8; +}; + +/* Offsets into Alternate Ram */ +#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ +#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ +#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ +#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ +#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ +#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ + +/* Alternate Ram Bandwidth Masks */ +#define I40E_ALT_BW_VALUE_MASK 0xFF +#define I40E_ALT_BW_RELATIVE_MASK 0x40000000 +#define I40E_ALT_BW_VALID_MASK 0x80000000 + +/* RSS Hash Table Size */ +#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 +#endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/i40e/base/i40e_virtchnl.h b/drivers/net/i40e/base/i40e_virtchnl.h new file mode 100644 index 00000000..26208f3f --- /dev/null +++ b/drivers/net/i40e/base/i40e_virtchnl.h @@ -0,0 +1,399 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _I40E_VIRTCHNL_H_ +#define _I40E_VIRTCHNL_H_ + +#include "i40e_type.h" + +/* Description: + * This header file describes the VF-PF communication protocol used + * by the various i40e drivers. + * + * Admin queue buffer usage: + * desc->opcode is always i40e_aqc_opc_send_msg_to_pf + * flags, retval, datalen, and data addr are all used normally. + * Firmware copies the cookie fields when sending messages between the PF and + * VF, but uses all other fields internally. Due to this limitation, we + * must send all messages as "indirect", i.e. using an external buffer. + * + * All the vsi indexes are relative to the VF. Each VF can have maximum of + * three VSIs. All the queue indexes are relative to the VSI. Each VF can + * have a maximum of sixteen queues for all of its VSIs. + * + * The PF is required to return a status code in v_retval for all messages + * except RESET_VF, which does not require any response. The return value is of + * i40e_status_code type, defined in the i40e_type.h. + * + * In general, VF driver initialization should roughly follow the order of these + * opcodes. The VF driver must first validate the API version of the PF driver, + * then request a reset, then get resources, then configure queues and + * interrupts. After these operations are complete, the VF driver may start + * its queues, optionally add MAC and VLAN filters, and process traffic. + */ + +/* Opcodes for VF-PF communication. These are placed in the v_opcode field + * of the virtchnl_msg structure. + */ +enum i40e_virtchnl_ops { +/* The PF sends status change events to VFs using + * the I40E_VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. + */ + I40E_VIRTCHNL_OP_UNKNOWN = 0, + I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + I40E_VIRTCHNL_OP_RESET_VF = 2, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, + I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, + I40E_VIRTCHNL_OP_ADD_VLAN = 12, + I40E_VIRTCHNL_OP_DEL_VLAN = 13, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + I40E_VIRTCHNL_OP_GET_STATS = 15, + I40E_VIRTCHNL_OP_FCOE = 16, + I40E_VIRTCHNL_OP_EVENT = 17, +#ifdef I40E_SOL_VF_SUPPORT + I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19, +#endif +}; + +/* Virtual channel message descriptor. This overlays the admin queue + * descriptor. All other data is passed in external buffers. + */ + +struct i40e_virtchnl_msg { + u8 pad[8]; /* AQ flags/opcode/len/retval fields */ + enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + enum i40e_status_code v_retval; /* ditto for desc->retval */ + u32 vfid; /* used by PF when sending to VF */ +}; + +/* Message descriptions and data structures.*/ + +/* I40E_VIRTCHNL_OP_VERSION + * VF posts its version number to the PF. PF responds with its version number + * in the same format, along with a return code. + * Reply from PF has its major/minor versions also in param0 and param1. + * If there is a major version mismatch, then the VF cannot operate. + * If there is a minor version mismatch, then the VF can operate but should + * add a warning to the system log. + * + * This enum element MUST always be specified as == 1, regardless of other + * changes in the API. The PF must always respond to this message without + * error regardless of version mismatch. + */ +#define I40E_VIRTCHNL_VERSION_MAJOR 1 +#define I40E_VIRTCHNL_VERSION_MINOR 1 +#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + +struct i40e_virtchnl_version_info { + u32 major; + u32 minor; +}; + +/* I40E_VIRTCHNL_OP_RESET_VF + * VF sends this request to PF with no parameters + * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register + * until reset completion is indicated. The admin queue must be reinitialized + * after this operation. + * + * When reset is complete, PF must ensure that all queues in all VSIs associated + * with the VF are stopped, all queue configurations in the HMC are set to 0, + * and all MAC and VLAN filters (except the default MAC address) on all VSIs + * are cleared. + */ + +/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities + * PF responds with an indirect message containing + * i40e_virtchnl_vf_resource and one or more + * i40e_virtchnl_vsi_resource structures. + */ + +struct i40e_virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + enum i40e_vsi_type vsi_type; + u16 qset_handle; + u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS]; +}; +/* VF offload flags */ +#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 + +struct i40e_virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_offload_flags; + u32 max_fcoe_contexts; + u32 max_fcoe_filters; + + struct i40e_virtchnl_vsi_resource vsi_res[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE + * VF sends this message to set up parameters for one TX queue. + * External data buffer contains one instance of i40e_virtchnl_txq_info. + * PF configures requested queue and returns a status code. + */ + +/* Tx queue config info */ +struct i40e_virtchnl_txq_info { + u16 vsi_id; + u16 queue_id; + u16 ring_len; /* number of descriptors, multiple of 8 */ + u16 headwb_enabled; + u64 dma_ring_addr; + u64 dma_headwb_addr; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE + * VF sends this message to set up parameters for one RX queue. + * External data buffer contains one instance of i40e_virtchnl_rxq_info. + * PF configures requested queue and returns a status code. + */ + +/* Rx queue config info */ +struct i40e_virtchnl_rxq_info { + u16 vsi_id; + u16 queue_id; + u32 ring_len; /* number of descriptors, multiple of 32 */ + u16 hdr_size; + u16 splithdr_enabled; + u32 databuffer_size; + u32 max_pkt_size; + u64 dma_ring_addr; + enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES + * VF sends this message to set parameters for all active TX and RX queues + * associated with the specified VSI. + * PF configures queues and returns status. + * If the number of queues specified is greater than the number of queues + * associated with the VSI, an error is returned and no queues are configured. + */ +struct i40e_virtchnl_queue_pair_info { + /* NOTE: vsi_id and queue_id should be identical for both queues. */ + struct i40e_virtchnl_txq_info txq; + struct i40e_virtchnl_rxq_info rxq; +}; + +struct i40e_virtchnl_vsi_queue_config_info { + u16 vsi_id; + u16 num_queue_pairs; + struct i40e_virtchnl_queue_pair_info qpair[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP + * VF uses this message to map vectors to queues. + * The rxq_map and txq_map fields are bitmaps used to indicate which queues + * are to be associated with the specified vector. + * The "other" causes are always mapped to vector 0. + * PF configures interrupt mapping and returns status. + */ +struct i40e_virtchnl_vector_map { + u16 vsi_id; + u16 vector_id; + u16 rxq_map; + u16 txq_map; + u16 rxitr_idx; + u16 txitr_idx; +}; + +struct i40e_virtchnl_irq_map_info { + u16 num_vectors; + struct i40e_virtchnl_vector_map vecmap[1]; +}; + +/* I40E_VIRTCHNL_OP_ENABLE_QUEUES + * I40E_VIRTCHNL_OP_DISABLE_QUEUES + * VF sends these message to enable or disable TX/RX queue pairs. + * The queues fields are bitmaps indicating which queues to act upon. + * (Currently, we only support 16 queues per VF, but we make the field + * u32 to allow for expansion.) + * PF performs requested action and returns status. + */ +struct i40e_virtchnl_queue_select { + u16 vsi_id; + u16 pad; + u32 rx_queues; + u32 tx_queues; +}; + +/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS + * VF sends this message in order to add one or more unicast or multicast + * address filters for the specified VSI. + * PF adds the filters and returns status. + */ + +/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS + * VF sends this message in order to remove one or more unicast or multicast + * filters for the specified VSI. + * PF removes the filters and returns status. + */ + +struct i40e_virtchnl_ether_addr { + u8 addr[I40E_ETH_LENGTH_OF_ADDRESS]; + u8 pad[2]; +}; + +struct i40e_virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct i40e_virtchnl_ether_addr list[1]; +}; + +#ifdef I40E_SOL_VF_SUPPORT +/* I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG + * VF sends this message to get the default MTU and list of additional ethernet + * addresses it is allowed to use. + * PF responds with an indirect message containing + * i40e_virtchnl_addnl_solaris_config with zero or more + * i40e_virtchnl_ether_addr structures. + * + * It is expected that this operation will only ever be needed for Solaris VFs + * running under a Solaris PF. + */ +struct i40e_virtchnl_addnl_solaris_config { + u16 default_mtu; + struct i40e_virtchnl_ether_addr_list al; +}; + +#endif +/* I40E_VIRTCHNL_OP_ADD_VLAN + * VF sends this message to add one or more VLAN tag filters for receives. + * PF adds the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +/* I40E_VIRTCHNL_OP_DEL_VLAN + * VF sends this message to remove one or more VLAN tag filters for receives. + * PF removes the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +struct i40e_virtchnl_vlan_filter_list { + u16 vsi_id; + u16 num_elements; + u16 vlan_id[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct i40e_virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001 +#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* I40E_VIRTCHNL_OP_GET_STATS + * VF sends this message to request stats for the selected VSI. VF uses + * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id + * field is ignored by the PF. + * + * PF replies with struct i40e_eth_stats in an external buffer. + */ + +/* I40E_VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum i40e_virtchnl_event_codes { + I40E_VIRTCHNL_EVENT_UNKNOWN = 0, + I40E_VIRTCHNL_EVENT_LINK_CHANGE, + I40E_VIRTCHNL_EVENT_RESET_IMPENDING, + I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +}; +#define I40E_PF_EVENT_SEVERITY_INFO 0 +#define I40E_PF_EVENT_SEVERITY_ATTENTION 1 +#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2 +#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +struct i40e_virtchnl_pf_event { + enum i40e_virtchnl_event_codes event; + union { + struct { + enum i40e_aq_link_speed link_speed; + bool link_status; + } link_event; + } event_data; + + int severity; +}; + +/* VF reset states - these are written into the RSTAT register: + * I40E_VFGEN_RSTAT1 on the PF + * I40E_VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum i40e_vfr_states { + I40E_VFR_INPROGRESS = 0, + I40E_VFR_COMPLETED, + I40E_VFR_VFACTIVE, + I40E_VFR_UNKNOWN, +}; + +#endif /* _I40E_VIRTCHNL_H_ */ diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c new file mode 100644 index 00000000..bc28d3c3 --- /dev/null +++ b/drivers/net/i40e/i40e_ethdev.c @@ -0,0 +1,9106 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <stdarg.h> +#include <inttypes.h> +#include <assert.h> + +#include <rte_string_fns.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_memzone.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> +#include <rte_alarm.h> +#include <rte_dev.h> +#include <rte_eth_ctrl.h> + +#include "i40e_logs.h" +#include "base/i40e_prototype.h" +#include "base/i40e_adminq_cmd.h" +#include "base/i40e_type.h" +#include "base/i40e_register.h" +#include "base/i40e_dcb.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" +#include "i40e_pf.h" +#include "i40e_regs.h" + +#define I40E_CLEAR_PXE_WAIT_MS 200 + +/* Maximun number of capability elements */ +#define I40E_MAX_CAP_ELE_NUM 128 + +/* Wait count and inteval */ +#define I40E_CHK_Q_ENA_COUNT 1000 +#define I40E_CHK_Q_ENA_INTERVAL_US 1000 + +/* Maximun number of VSI */ +#define I40E_MAX_NUM_VSIS (384UL) + +#define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */ + +/* Flow control default timer */ +#define I40E_DEFAULT_PAUSE_TIME 0xFFFFU + +/* Flow control default high water */ +#define I40E_DEFAULT_HIGH_WATER (0x1C40/1024) + +/* Flow control default low water */ +#define I40E_DEFAULT_LOW_WATER (0x1A40/1024) + +/* Flow control enable fwd bit */ +#define I40E_PRTMAC_FWD_CTRL 0x00000001 + +/* Receive Packet Buffer size */ +#define I40E_RXPBSIZE (968 * 1024) + +/* Kilobytes shift */ +#define I40E_KILOSHIFT 10 + +/* Receive Average Packet Size in Byte*/ +#define I40E_PACKET_AVERAGE_SIZE 128 + +/* Mask of PF interrupt causes */ +#define I40E_PFINT_ICR0_ENA_MASK ( \ + I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \ + I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \ + I40E_PFINT_ICR0_ENA_GRST_MASK | \ + I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \ + I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \ + I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \ + I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \ + I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \ + I40E_PFINT_ICR0_ENA_VFLR_MASK | \ + I40E_PFINT_ICR0_ENA_ADMINQ_MASK) + +#define I40E_FLOW_TYPES ( \ + (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ + (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ + (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \ + (1UL << RTE_ETH_FLOW_L2_PAYLOAD)) + +/* Additional timesync values. */ +#define I40E_PTP_40GB_INCVAL 0x0199999999ULL +#define I40E_PTP_10GB_INCVAL 0x0333333333ULL +#define I40E_PTP_1GB_INCVAL 0x2000000000ULL +#define I40E_PRTTSYN_TSYNENA 0x80000000 +#define I40E_PRTTSYN_TSYNTYPE 0x0e000000 +#define I40E_CYCLECOUNTER_MASK 0xffffffffffffffffULL + +#define I40E_MAX_PERCENT 100 +#define I40E_DEFAULT_DCB_APP_NUM 1 +#define I40E_DEFAULT_DCB_APP_PRIO 3 + +#define I40E_INSET_NONE 0x00000000000000000ULL + +/* bit0 ~ bit 7 */ +#define I40E_INSET_DMAC 0x0000000000000001ULL +#define I40E_INSET_SMAC 0x0000000000000002ULL +#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL +#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL +#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL + +/* bit 8 ~ bit 15 */ +#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL +#define I40E_INSET_IPV4_DST 0x0000000000000200ULL +#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL +#define I40E_INSET_IPV6_DST 0x0000000000000800ULL +#define I40E_INSET_SRC_PORT 0x0000000000001000ULL +#define I40E_INSET_DST_PORT 0x0000000000002000ULL +#define I40E_INSET_SCTP_VT 0x0000000000004000ULL + +/* bit 16 ~ bit 31 */ +#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL +#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL +#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL +#define I40E_INSET_IPV6_TC 0x0000000000080000ULL +#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL +#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL +#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL +#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL + +/* bit 32 ~ bit 47, tunnel fields */ +#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL +#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL +#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL +#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL +#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL +#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL + +/* bit 48 ~ bit 55 */ +#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL + +/* bit 56 ~ bit 63, Flex Payload */ +#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL +#define I40E_INSET_FLEX_PAYLOAD \ + (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \ + I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \ + I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \ + I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8) + +/** + * Below are values for writing un-exposed registers suggested + * by silicon experts + */ +/* Destination MAC address */ +#define I40E_REG_INSET_L2_DMAC 0xE000000000000000ULL +/* Source MAC address */ +#define I40E_REG_INSET_L2_SMAC 0x1C00000000000000ULL +/* Outer (S-Tag) VLAN tag in the outer L2 header */ +#define I40E_REG_INSET_L2_OUTER_VLAN 0x0200000000000000ULL +/* Inner (C-Tag) or single VLAN tag in the outer L2 header */ +#define I40E_REG_INSET_L2_INNER_VLAN 0x0080000000000000ULL +/* Single VLAN tag in the inner L2 header */ +#define I40E_REG_INSET_TUNNEL_VLAN 0x0100000000000000ULL +/* Source IPv4 address */ +#define I40E_REG_INSET_L3_SRC_IP4 0x0001800000000000ULL +/* Destination IPv4 address */ +#define I40E_REG_INSET_L3_DST_IP4 0x0000001800000000ULL +/* IPv4 Type of Service (TOS) */ +#define I40E_REG_INSET_L3_IP4_TOS 0x0040000000000000ULL +/* IPv4 Protocol */ +#define I40E_REG_INSET_L3_IP4_PROTO 0x0004000000000000ULL +/* IPv4 Time to Live */ +#define I40E_REG_INSET_L3_IP4_TTL 0x0004000000000000ULL +/* Source IPv6 address */ +#define I40E_REG_INSET_L3_SRC_IP6 0x0007F80000000000ULL +/* Destination IPv6 address */ +#define I40E_REG_INSET_L3_DST_IP6 0x000007F800000000ULL +/* IPv6 Traffic Class (TC) */ +#define I40E_REG_INSET_L3_IP6_TC 0x0040000000000000ULL +/* IPv6 Next Header */ +#define I40E_REG_INSET_L3_IP6_NEXT_HDR 0x0008000000000000ULL +/* IPv6 Hop Limit */ +#define I40E_REG_INSET_L3_IP6_HOP_LIMIT 0x0008000000000000ULL +/* Source L4 port */ +#define I40E_REG_INSET_L4_SRC_PORT 0x0000000400000000ULL +/* Destination L4 port */ +#define I40E_REG_INSET_L4_DST_PORT 0x0000000200000000ULL +/* SCTP verification tag */ +#define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG 0x0000000180000000ULL +/* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/ +#define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC 0x0000000001C00000ULL +/* Source port of tunneling UDP */ +#define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT 0x0000000000200000ULL +/* Destination port of tunneling UDP */ +#define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT 0x0000000000100000ULL +/* UDP Tunneling ID, NVGRE/GRE key */ +#define I40E_REG_INSET_TUNNEL_ID 0x00000000000C0000ULL +/* Last ether type */ +#define I40E_REG_INSET_LAST_ETHER_TYPE 0x0000000000004000ULL +/* Tunneling outer destination IPv4 address */ +#define I40E_REG_INSET_TUNNEL_L3_DST_IP4 0x00000000000000C0ULL +/* Tunneling outer destination IPv6 address */ +#define I40E_REG_INSET_TUNNEL_L3_DST_IP6 0x0000000000003FC0ULL +/* 1st word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD1 0x0000000000002000ULL +/* 2nd word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD2 0x0000000000001000ULL +/* 3rd word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD3 0x0000000000000800ULL +/* 4th word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD4 0x0000000000000400ULL +/* 5th word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD5 0x0000000000000200ULL +/* 6th word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD6 0x0000000000000100ULL +/* 7th word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD7 0x0000000000000080ULL +/* 8th word of flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORD8 0x0000000000000040ULL +/* all 8 words flex payload */ +#define I40E_REG_INSET_FLEX_PAYLOAD_WORDS 0x0000000000003FC0ULL +#define I40E_REG_INSET_MASK_DEFAULT 0x0000000000000000ULL + +#define I40E_TRANSLATE_INSET 0 +#define I40E_TRANSLATE_REG 1 + +#define I40E_INSET_IPV4_TOS_MASK 0x0009FF00UL +#define I40E_INSET_IPv4_TTL_MASK 0x000D00FFUL +#define I40E_INSET_IPV4_PROTO_MASK 0x000DFF00UL +#define I40E_INSET_IPV6_TC_MASK 0x0009F00FUL +#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL +#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL + +#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4)) +#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16 +#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \ + I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) + +/* PCI offset for querying capability */ +#define PCI_DEV_CAP_REG 0xA4 +/* PCI offset for enabling/disabling Extended Tag */ +#define PCI_DEV_CTRL_REG 0xA8 +/* Bit mask of Extended Tag capability */ +#define PCI_DEV_CAP_EXT_TAG_MASK 0x20 +/* Bit shift of Extended Tag enable/disable */ +#define PCI_DEV_CTRL_EXT_TAG_SHIFT 8 +/* Bit mask of Extended Tag enable/disable */ +#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT) + +static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev); +static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev); +static int i40e_dev_configure(struct rte_eth_dev *dev); +static int i40e_dev_start(struct rte_eth_dev *dev); +static void i40e_dev_stop(struct rte_eth_dev *dev); +static void i40e_dev_close(struct rte_eth_dev *dev); +static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev); +static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev); +static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev); +static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int i40e_dev_set_link_up(struct rte_eth_dev *dev); +static int i40e_dev_set_link_down(struct rte_eth_dev *dev); +static void i40e_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static int i40e_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstats *xstats, unsigned n); +static void i40e_dev_stats_reset(struct rte_eth_dev *dev); +static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev, + uint16_t queue_id, + uint8_t stat_idx, + uint8_t is_rx); +static void i40e_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int i40e_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, + int on); +static int i40e_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid); +static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev, + uint16_t queue, + int on); +static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on); +static int i40e_dev_led_on(struct rte_eth_dev *dev); +static int i40e_dev_led_off(struct rte_eth_dev *dev); +static int i40e_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int i40e_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_pfc_conf *pfc_conf); +static void i40e_macaddr_add(struct rte_eth_dev *dev, + struct ether_addr *mac_addr, + uint32_t index, + uint32_t pool); +static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index); +static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); + +static int i40e_get_cap(struct i40e_hw *hw); +static int i40e_pf_parameter_init(struct rte_eth_dev *dev); +static int i40e_pf_setup(struct i40e_pf *pf); +static int i40e_dev_rxtx_init(struct i40e_pf *pf); +static int i40e_vmdq_setup(struct rte_eth_dev *dev); +static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb); +static int i40e_dcb_setup(struct rte_eth_dev *dev); +static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg, + bool offset_loaded, uint64_t *offset, uint64_t *stat); +static void i40e_stat_update_48(struct i40e_hw *hw, + uint32_t hireg, + uint32_t loreg, + bool offset_loaded, + uint64_t *offset, + uint64_t *stat); +static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue); +static void i40e_dev_interrupt_handler( + __rte_unused struct rte_intr_handle *handle, void *param); +static int i40e_res_pool_init(struct i40e_res_pool_info *pool, + uint32_t base, uint32_t num); +static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool); +static int i40e_res_pool_free(struct i40e_res_pool_info *pool, + uint32_t base); +static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool, + uint16_t num); +static int i40e_dev_init_vlan(struct rte_eth_dev *dev); +static int i40e_veb_release(struct i40e_veb *veb); +static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, + struct i40e_vsi *vsi); +static int i40e_pf_config_mq_rx(struct i40e_pf *pf); +static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on); +static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, + struct i40e_macvlan_filter *mv_f, + int num, + struct ether_addr *addr); +static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, + struct i40e_macvlan_filter *mv_f, + int num, + uint16_t vlan); +static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi); +static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); +static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); +static void i40e_filter_input_set_init(struct i40e_pf *pf); +static int i40e_ethertype_filter_set(struct i40e_pf *pf, + struct rte_eth_ethertype_filter *filter, + bool add); +static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); +static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev, + struct rte_eth_dcb_info *dcb_info); +static void i40e_configure_registers(struct i40e_hw *hw); +static void i40e_hw_init(struct rte_eth_dev *dev); +static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi); +static int i40e_mirror_rule_set(struct rte_eth_dev *dev, + struct rte_eth_mirror_conf *mirror_conf, + uint8_t sw_id, uint8_t on); +static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id); + +static int i40e_timesync_enable(struct rte_eth_dev *dev); +static int i40e_timesync_disable(struct rte_eth_dev *dev); +static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags); +static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp); +static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw); + +static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); + +static int i40e_timesync_read_time(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int i40e_timesync_write_time(struct rte_eth_dev *dev, + const struct timespec *timestamp); + +static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); + +static int i40e_get_reg_length(struct rte_eth_dev *dev); + +static int i40e_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); + +static int i40e_get_eeprom_length(struct rte_eth_dev *dev); + +static int i40e_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); + +static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr); + +static const struct rte_pci_id pci_id_i40e_map[] = { +#define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" +{ .vendor_id = 0, /* sentinel */ }, +}; + +static const struct eth_dev_ops i40e_eth_dev_ops = { + .dev_configure = i40e_dev_configure, + .dev_start = i40e_dev_start, + .dev_stop = i40e_dev_stop, + .dev_close = i40e_dev_close, + .promiscuous_enable = i40e_dev_promiscuous_enable, + .promiscuous_disable = i40e_dev_promiscuous_disable, + .allmulticast_enable = i40e_dev_allmulticast_enable, + .allmulticast_disable = i40e_dev_allmulticast_disable, + .dev_set_link_up = i40e_dev_set_link_up, + .dev_set_link_down = i40e_dev_set_link_down, + .link_update = i40e_dev_link_update, + .stats_get = i40e_dev_stats_get, + .xstats_get = i40e_dev_xstats_get, + .stats_reset = i40e_dev_stats_reset, + .xstats_reset = i40e_dev_stats_reset, + .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set, + .dev_infos_get = i40e_dev_info_get, + .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get, + .vlan_filter_set = i40e_vlan_filter_set, + .vlan_tpid_set = i40e_vlan_tpid_set, + .vlan_offload_set = i40e_vlan_offload_set, + .vlan_strip_queue_set = i40e_vlan_strip_queue_set, + .vlan_pvid_set = i40e_vlan_pvid_set, + .rx_queue_start = i40e_dev_rx_queue_start, + .rx_queue_stop = i40e_dev_rx_queue_stop, + .tx_queue_start = i40e_dev_tx_queue_start, + .tx_queue_stop = i40e_dev_tx_queue_stop, + .rx_queue_setup = i40e_dev_rx_queue_setup, + .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable, + .rx_queue_release = i40e_dev_rx_queue_release, + .rx_queue_count = i40e_dev_rx_queue_count, + .rx_descriptor_done = i40e_dev_rx_descriptor_done, + .tx_queue_setup = i40e_dev_tx_queue_setup, + .tx_queue_release = i40e_dev_tx_queue_release, + .dev_led_on = i40e_dev_led_on, + .dev_led_off = i40e_dev_led_off, + .flow_ctrl_get = i40e_flow_ctrl_get, + .flow_ctrl_set = i40e_flow_ctrl_set, + .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set, + .mac_addr_add = i40e_macaddr_add, + .mac_addr_remove = i40e_macaddr_remove, + .reta_update = i40e_dev_rss_reta_update, + .reta_query = i40e_dev_rss_reta_query, + .rss_hash_update = i40e_dev_rss_hash_update, + .rss_hash_conf_get = i40e_dev_rss_hash_conf_get, + .udp_tunnel_port_add = i40e_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = i40e_dev_udp_tunnel_port_del, + .filter_ctrl = i40e_dev_filter_ctrl, + .rxq_info_get = i40e_rxq_info_get, + .txq_info_get = i40e_txq_info_get, + .mirror_rule_set = i40e_mirror_rule_set, + .mirror_rule_reset = i40e_mirror_rule_reset, + .timesync_enable = i40e_timesync_enable, + .timesync_disable = i40e_timesync_disable, + .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp, + .get_dcb_info = i40e_dev_get_dcb_info, + .timesync_adjust_time = i40e_timesync_adjust_time, + .timesync_read_time = i40e_timesync_read_time, + .timesync_write_time = i40e_timesync_write_time, + .get_reg_length = i40e_get_reg_length, + .get_reg = i40e_get_regs, + .get_eeprom_length = i40e_get_eeprom_length, + .get_eeprom = i40e_get_eeprom, + .mac_addr_set = i40e_set_default_mac_addr, +}; + +/* store statistics names and its offset in stats structure */ +struct rte_i40e_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned offset; +}; + +static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = { + {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)}, + {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)}, + {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)}, + {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)}, + {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats, + rx_unknown_protocol)}, + {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)}, + {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)}, + {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)}, + {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)}, +}; + +#define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \ + sizeof(rte_i40e_stats_strings[0])) + +static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = { + {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats, + tx_dropped_link_down)}, + {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)}, + {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats, + illegal_bytes)}, + {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)}, + {"mac_local_errors", offsetof(struct i40e_hw_port_stats, + mac_local_faults)}, + {"mac_remote_errors", offsetof(struct i40e_hw_port_stats, + mac_remote_faults)}, + {"rx_length_errors", offsetof(struct i40e_hw_port_stats, + rx_length_errors)}, + {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)}, + {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)}, + {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)}, + {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)}, + {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)}, + {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats, + rx_size_127)}, + {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats, + rx_size_255)}, + {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats, + rx_size_511)}, + {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats, + rx_size_1023)}, + {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats, + rx_size_1522)}, + {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats, + rx_size_big)}, + {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats, + rx_undersize)}, + {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats, + rx_oversize)}, + {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats, + mac_short_packet_dropped)}, + {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats, + rx_fragments)}, + {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)}, + {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)}, + {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats, + tx_size_127)}, + {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats, + tx_size_255)}, + {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats, + tx_size_511)}, + {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats, + tx_size_1023)}, + {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats, + tx_size_1522)}, + {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats, + tx_size_big)}, + {"rx_flow_director_atr_match_packets", + offsetof(struct i40e_hw_port_stats, fd_atr_match)}, + {"rx_flow_director_sb_match_packets", + offsetof(struct i40e_hw_port_stats, fd_sb_match)}, + {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats, + tx_lpi_status)}, + {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats, + rx_lpi_status)}, + {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats, + tx_lpi_count)}, + {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats, + rx_lpi_count)}, +}; + +#define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \ + sizeof(rte_i40e_hw_port_strings[0])) + +static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = { + {"xon_packets", offsetof(struct i40e_hw_port_stats, + priority_xon_rx)}, + {"xoff_packets", offsetof(struct i40e_hw_port_stats, + priority_xoff_rx)}, +}; + +#define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \ + sizeof(rte_i40e_rxq_prio_strings[0])) + +static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = { + {"xon_packets", offsetof(struct i40e_hw_port_stats, + priority_xon_tx)}, + {"xoff_packets", offsetof(struct i40e_hw_port_stats, + priority_xoff_tx)}, + {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats, + priority_xon_2_xoff)}, +}; + +#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \ + sizeof(rte_i40e_txq_prio_strings[0])) + +static struct eth_driver rte_i40e_pmd = { + .pci_drv = { + .name = "rte_i40e_pmd", + .id_table = pci_id_i40e_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_DETACHABLE, + }, + .eth_dev_init = eth_i40e_dev_init, + .eth_dev_uninit = eth_i40e_dev_uninit, + .dev_private_size = sizeof(struct i40e_adapter), +}; + +static inline int +rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &(dev->data->dev_link); + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static inline int +rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &(dev->data->dev_link); + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/* + * Driver initialization routine. + * Invoked once at EAL init time. + * Register itself as the [Poll Mode] Driver of PCI IXGBE devices. + */ +static int +rte_i40e_pmd_init(const char *name __rte_unused, + const char *params __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + rte_eth_driver_register(&rte_i40e_pmd); + + return 0; +} + +static struct rte_driver rte_i40e_driver = { + .type = PMD_PDEV, + .init = rte_i40e_pmd_init, +}; + +PMD_REGISTER_DRIVER(rte_i40e_driver); + +/* + * Initialize registers for flexible payload, which should be set by NVM. + * This should be removed from code once it is fixed in NVM. + */ +#ifndef I40E_GLQF_ORT +#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) +#endif +#ifndef I40E_GLQF_PIT +#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) +#endif + +static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw) +{ + I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D); + + /* GLQF_PIT Registers */ + I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480); + I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440); +} + +#define I40E_FLOW_CONTROL_ETHERTYPE 0x8808 + +/* + * Add a ethertype filter to drop all flow control frames transmitted + * from VSIs. +*/ +static void +i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | + I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; + int ret; + + ret = i40e_aq_add_rem_control_packet_filter(hw, NULL, + I40E_FLOW_CONTROL_ETHERTYPE, flags, + pf->main_vsi_seid, 0, + TRUE, NULL, NULL); + if (ret) + PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control " + " frames from VSIs."); +} + +static int +eth_i40e_dev_init(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi; + int ret; + uint32_t len; + uint8_t aq_fail = 0; + + PMD_INIT_FUNC_TRACE(); + + dev->dev_ops = &i40e_eth_dev_ops; + dev->rx_pkt_burst = i40e_recv_pkts; + dev->tx_pkt_burst = i40e_xmit_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + i40e_set_rx_function(dev); + i40e_set_tx_function(dev); + return 0; + } + pci_dev = dev->pci_dev; + + rte_eth_copy_pci_info(dev, pci_dev); + + pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + pf->adapter->eth_dev = dev; + pf->dev_data = dev->data; + + hw->back = I40E_PF_TO_ADAPTER(pf); + hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr); + if (!hw->hw_addr) { + PMD_INIT_LOG(ERR, "Hardware is not available, " + "as address is NULL"); + return -ENODEV; + } + + hw->vendor_id = pci_dev->id.vendor_id; + hw->device_id = pci_dev->id.device_id; + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; + hw->bus.device = pci_dev->addr.devid; + hw->bus.func = pci_dev->addr.function; + hw->adapter_stopped = 0; + + /* Make sure all is clean before doing PF reset */ + i40e_clear_hw(hw); + + /* Initialize the hardware */ + i40e_hw_init(dev); + + /* Reset here to make sure all is clean for each PF */ + ret = i40e_pf_reset(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret); + return ret; + } + + /* Initialize the shared code (base driver) */ + ret = i40e_init_shared_code(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret); + return ret; + } + + /* + * To work around the NVM issue,initialize registers + * for flexible payload by software. + * It should be removed once issues are fixed in NVM. + */ + i40e_flex_payload_reg_init(hw); + + /* Initialize the input set for filters (hash and fd) to default value */ + i40e_filter_input_set_init(pf); + + /* Initialize the parameters for adminq */ + i40e_init_adminq_parameter(hw); + ret = i40e_init_adminq(hw); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret); + return -EIO; + } + PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x", + hw->aq.fw_maj_ver, hw->aq.fw_min_ver, + hw->aq.api_maj_ver, hw->aq.api_min_ver, + ((hw->nvm.version >> 12) & 0xf), + ((hw->nvm.version >> 4) & 0xff), + (hw->nvm.version & 0xf), hw->nvm.eetrack); + + /* Clear PXE mode */ + i40e_clear_pxe_mode(hw); + + /* + * On X710, performance number is far from the expectation on recent + * firmware versions. The fix for this issue may not be integrated in + * the following firmware version. So the workaround in software driver + * is needed. It needs to modify the initial values of 3 internal only + * registers. Note that the workaround can be removed when it is fixed + * in firmware in the future. + */ + i40e_configure_registers(hw); + + /* Get hw capabilities */ + ret = i40e_get_cap(hw); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret); + goto err_get_capabilities; + } + + /* Initialize parameters for PF */ + ret = i40e_pf_parameter_init(dev); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret); + goto err_parameter_init; + } + + /* Initialize the queue management */ + ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to init queue pool"); + goto err_qp_pool_init; + } + ret = i40e_res_pool_init(&pf->msix_pool, 1, + hw->func_caps.num_msix_vectors - 1); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to init MSIX pool"); + goto err_msix_pool_init; + } + + /* Initialize lan hmc */ + ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, + hw->func_caps.num_rx_qp, 0, 0); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret); + goto err_init_lan_hmc; + } + + /* Configure lan hmc */ + ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret); + goto err_configure_lan_hmc; + } + + /* Get and check the mac address */ + i40e_get_mac_addr(hw, hw->mac.addr); + if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "mac address is not valid"); + ret = -EIO; + goto err_get_mac_addr; + } + /* Copy the permanent MAC address */ + ether_addr_copy((struct ether_addr *) hw->mac.addr, + (struct ether_addr *) hw->mac.perm_addr); + + /* Disable flow control */ + hw->fc.requested_mode = I40E_FC_NONE; + i40e_set_fc(hw, &aq_fail, TRUE); + + /* Set the global registers with default ether type value */ + ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to set the default outer " + "VLAN ether type"); + goto err_setup_pf_switch; + } + ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER, ETHER_TYPE_VLAN); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to set the default outer " + "VLAN ether type"); + goto err_setup_pf_switch; + } + + /* PF setup, which includes VSI setup */ + ret = i40e_pf_setup(pf); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret); + goto err_setup_pf_switch; + } + + vsi = pf->main_vsi; + + /* Disable double vlan by default */ + i40e_vsi_config_double_vlan(vsi, FALSE); + + if (!vsi->max_macaddrs) + len = ETHER_ADDR_LEN; + else + len = ETHER_ADDR_LEN * vsi->max_macaddrs; + + /* Should be after VSI initialized */ + dev->data->mac_addrs = rte_zmalloc("i40e", len, 0); + if (!dev->data->mac_addrs) { + PMD_INIT_LOG(ERR, "Failed to allocated memory " + "for storing mac address"); + goto err_mac_alloc; + } + ether_addr_copy((struct ether_addr *)hw->mac.perm_addr, + &dev->data->mac_addrs[0]); + + /* initialize pf host driver to setup SRIOV resource if applicable */ + i40e_pf_host_init(dev); + + /* register callback func to eal lib */ + rte_intr_callback_register(&(pci_dev->intr_handle), + i40e_dev_interrupt_handler, (void *)dev); + + /* configure and enable device interrupt */ + i40e_pf_config_irq0(hw, TRUE); + i40e_pf_enable_irq0(hw); + + /* enable uio intr after callback register */ + rte_intr_enable(&(pci_dev->intr_handle)); + /* + * Add an ethertype filter to drop all flow control frames transmitted + * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC + * frames to wire. + */ + i40e_add_tx_flow_control_drop_filter(pf); + + /* Set the max frame size to 0x2600 by default, + * in case other drivers changed the default value. + */ + i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL); + + /* initialize mirror rule list */ + TAILQ_INIT(&pf->mirror_list); + + /* Init dcb to sw mode by default */ + ret = i40e_dcb_init_configure(dev, TRUE); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(INFO, "Failed to init dcb."); + pf->flags &= ~I40E_FLAG_DCB; + } + + return 0; + +err_mac_alloc: + i40e_vsi_release(pf->main_vsi); +err_setup_pf_switch: +err_get_mac_addr: +err_configure_lan_hmc: + (void)i40e_shutdown_lan_hmc(hw); +err_init_lan_hmc: + i40e_res_pool_destroy(&pf->msix_pool); +err_msix_pool_init: + i40e_res_pool_destroy(&pf->qp_pool); +err_qp_pool_init: +err_parameter_init: +err_get_capabilities: + (void)i40e_shutdown_adminq(hw); + + return ret; +} + +static int +eth_i40e_dev_uninit(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev; + struct i40e_hw *hw; + struct i40e_filter_control_settings settings; + int ret; + uint8_t aq_fail = 0; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pci_dev = dev->pci_dev; + + if (hw->adapter_stopped == 0) + i40e_dev_close(dev); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + /* Disable LLDP */ + ret = i40e_aq_stop_lldp(hw, true, NULL); + if (ret != I40E_SUCCESS) /* Its failure can be ignored */ + PMD_INIT_LOG(INFO, "Failed to stop lldp"); + + /* Clear PXE mode */ + i40e_clear_pxe_mode(hw); + + /* Unconfigure filter control */ + memset(&settings, 0, sizeof(settings)); + ret = i40e_set_filter_control(hw, &settings); + if (ret) + PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d", + ret); + + /* Disable flow control */ + hw->fc.requested_mode = I40E_FC_NONE; + i40e_set_fc(hw, &aq_fail, TRUE); + + /* uninitialize pf host driver */ + i40e_pf_host_uninit(dev); + + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; + + /* disable uio intr before callback unregister */ + rte_intr_disable(&(pci_dev->intr_handle)); + + /* register callback func to eal lib */ + rte_intr_callback_unregister(&(pci_dev->intr_handle), + i40e_dev_interrupt_handler, (void *)dev); + + return 0; +} + +static int +i40e_dev_configure(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode; + int i, ret; + + /* Initialize to TRUE. If any of Rx queues doesn't meet the + * bulk allocation or vector Rx preconditions we will reset it. + */ + ad->rx_bulk_alloc_allowed = true; + ad->rx_vec_allowed = true; + ad->tx_simple_allowed = true; + ad->tx_vec_allowed = true; + + if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) { + ret = i40e_fdir_setup(pf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to setup flow director."); + return -ENOTSUP; + } + ret = i40e_fdir_configure(dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "failed to configure fdir."); + goto err; + } + } else + i40e_fdir_teardown(pf); + + ret = i40e_dev_init_vlan(dev); + if (ret < 0) + goto err; + + /* VMDQ setup. + * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and + * RSS setting have different requirements. + * General PMD driver call sequence are NIC init, configure, + * rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it + * will try to lookup the VSI that specific queue belongs to if VMDQ + * applicable. So, VMDQ setting has to be done before + * rx/tx_queue_setup(). This function is good to place vmdq_setup. + * For RSS setting, it will try to calculate actual configured RX queue + * number, which will be available after rx_queue_setup(). dev_start() + * function is good to place RSS setup. + */ + if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) { + ret = i40e_vmdq_setup(dev); + if (ret) + goto err; + } + + if (mq_mode & ETH_MQ_RX_DCB_FLAG) { + ret = i40e_dcb_setup(dev); + if (ret) { + PMD_DRV_LOG(ERR, "failed to configure DCB."); + goto err_dcb; + } + } + + return 0; + +err_dcb: + /* need to release vmdq resource if exists */ + for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { + i40e_vsi_release(pf->vmdq[i].vsi); + pf->vmdq[i].vsi = NULL; + } + rte_free(pf->vmdq); + pf->vmdq = NULL; +err: + /* need to release fdir resource if exists */ + i40e_fdir_teardown(pf); + return ret; +} + +void +i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t msix_vect = vsi->msix_intr; + uint16_t i; + + for (i = 0; i < vsi->nb_qps; i++) { + I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0); + I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0); + rte_wmb(); + } + + if (vsi->type != I40E_VSI_SRIOV) { + if (!rte_intr_allow_others(intr_handle)) { + I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, + I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK); + I40E_WRITE_REG(hw, + I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT), + 0); + } else { + I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), + I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK); + I40E_WRITE_REG(hw, + I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT, + msix_vect - 1), 0); + } + } else { + uint32_t reg; + reg = (hw->func_caps.num_msix_vectors_vf - 1) * + vsi->user_param + (msix_vect - 1); + + I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), + I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); + } + I40E_WRITE_FLUSH(hw); +} + +static void +__vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, + int base_queue, int nb_queue) +{ + int i; + uint32_t val; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + + /* Bind all RX queues to allocated MSIX interrupt */ + for (i = 0; i < nb_queue; i++) { + val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | + I40E_QINT_RQCTL_ITR_INDX_MASK | + ((base_queue + i + 1) << + I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | + (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | + I40E_QINT_RQCTL_CAUSE_ENA_MASK; + + if (i == nb_queue - 1) + val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK; + I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val); + } + + /* Write first RX queue to Link list register as the head element */ + if (vsi->type != I40E_VSI_SRIOV) { + uint16_t interval = + i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); + + if (msix_vect == I40E_MISC_VEC_ID) { + I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, + (base_queue << + I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)); + I40E_WRITE_REG(hw, + I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT), + interval); + } else { + I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), + (base_queue << + I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); + I40E_WRITE_REG(hw, + I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT, + msix_vect - 1), + interval); + } + } else { + uint32_t reg; + + if (msix_vect == I40E_MISC_VEC_ID) { + I40E_WRITE_REG(hw, + I40E_VPINT_LNKLST0(vsi->user_param), + (base_queue << + I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)); + } else { + /* num_msix_vectors_vf needs to minus irq0 */ + reg = (hw->func_caps.num_msix_vectors_vf - 1) * + vsi->user_param + (msix_vect - 1); + + I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), + (base_queue << + I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) | + (0x0 << + I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)); + } + } + + I40E_WRITE_FLUSH(hw); +} + +void +i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t msix_vect = vsi->msix_intr; + uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd); + uint16_t queue_idx = 0; + int record = 0; + uint32_t val; + int i; + + for (i = 0; i < vsi->nb_qps; i++) { + I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0); + I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0); + } + + /* INTENA flag is not auto-cleared for interrupt */ + val = I40E_READ_REG(hw, I40E_GLINT_CTL); + val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK | + I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK | + I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; + I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); + + /* VF bind interrupt */ + if (vsi->type == I40E_VSI_SRIOV) { + __vsi_queues_bind_intr(vsi, msix_vect, + vsi->base_queue, vsi->nb_qps); + return; + } + + /* PF & VMDq bind interrupt */ + if (rte_intr_dp_is_en(intr_handle)) { + if (vsi->type == I40E_VSI_MAIN) { + queue_idx = 0; + record = 1; + } else if (vsi->type == I40E_VSI_VMDQ2) { + struct i40e_vsi *main_vsi = + I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter); + queue_idx = vsi->base_queue - main_vsi->nb_qps; + record = 1; + } + } + + for (i = 0; i < vsi->nb_used_qps; i++) { + if (nb_msix <= 1) { + if (!rte_intr_allow_others(intr_handle)) + /* allow to share MISC_VEC_ID */ + msix_vect = I40E_MISC_VEC_ID; + + /* no enough msix_vect, map all to one */ + __vsi_queues_bind_intr(vsi, msix_vect, + vsi->base_queue + i, + vsi->nb_used_qps - i); + for (; !!record && i < vsi->nb_used_qps; i++) + intr_handle->intr_vec[queue_idx + i] = + msix_vect; + break; + } + /* 1:1 queue/msix_vect mapping */ + __vsi_queues_bind_intr(vsi, msix_vect, + vsi->base_queue + i, 1); + if (!!record) + intr_handle->intr_vec[queue_idx + i] = msix_vect; + + msix_vect++; + nb_msix--; + } +} + +static void +i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t interval = i40e_calc_itr_interval(\ + RTE_LIBRTE_I40E_ITR_INTERVAL); + uint16_t msix_intr, i; + + if (rte_intr_allow_others(intr_handle)) + for (i = 0; i < vsi->nb_msix; i++) { + msix_intr = vsi->msix_intr + i; + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1), + I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (interval << + I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); + } + else + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | + (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) | + (interval << + I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)); + + I40E_WRITE_FLUSH(hw); +} + +static void +i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi) +{ + struct rte_eth_dev *dev = vsi->adapter->eth_dev; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t msix_intr, i; + + if (rte_intr_allow_others(intr_handle)) + for (i = 0; i < vsi->nb_msix; i++) { + msix_intr = vsi->msix_intr + i; + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1), + 0); + } + else + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0); + + I40E_WRITE_FLUSH(hw); +} + +static inline uint8_t +i40e_parse_link_speeds(uint16_t link_speeds) +{ + uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN; + + if (link_speeds & ETH_LINK_SPEED_40G) + link_speed |= I40E_LINK_SPEED_40GB; + if (link_speeds & ETH_LINK_SPEED_20G) + link_speed |= I40E_LINK_SPEED_20GB; + if (link_speeds & ETH_LINK_SPEED_10G) + link_speed |= I40E_LINK_SPEED_10GB; + if (link_speeds & ETH_LINK_SPEED_1G) + link_speed |= I40E_LINK_SPEED_1GB; + if (link_speeds & ETH_LINK_SPEED_100M) + link_speed |= I40E_LINK_SPEED_100MB; + + return link_speed; +} + +static int +i40e_phy_conf_link(__rte_unused struct i40e_hw *hw, + __rte_unused uint8_t abilities, + __rte_unused uint8_t force_speed) +{ + /* Skip any phy config on both 10G and 40G interfaces, as a workaround + * for the link control limitation of that all link control should be + * handled by firmware. It should follow up if link control will be + * opened to software driver in future firmware versions. + */ + return I40E_SUCCESS; +} + +static int +i40e_apply_link_speed(struct rte_eth_dev *dev) +{ + uint8_t speed; + uint8_t abilities = 0; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_conf *conf = &dev->data->dev_conf; + + speed = i40e_parse_link_speeds(conf->link_speeds); + abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED)) + abilities |= I40E_AQ_PHY_AN_ENABLED; + else + abilities |= I40E_AQ_PHY_LINK_ENABLED; + + return i40e_phy_conf_link(hw, abilities, speed); +} + +static int +i40e_dev_start(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *main_vsi = pf->main_vsi; + int ret, i; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + uint32_t intr_vector = 0; + + hw->adapter_stopped = 0; + + if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { + PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; autonegotiation disabled", + dev->data->port_id); + return -EINVAL; + } + + rte_intr_disable(intr_handle); + + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) && + dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), + 0); + if (!intr_handle->intr_vec) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec\n", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + /* Initialize VSI */ + ret = i40e_dev_rxtx_init(pf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to init rx/tx queues"); + goto err_up; + } + + /* Map queues with MSIX interrupt */ + main_vsi->nb_used_qps = dev->data->nb_rx_queues - + pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + i40e_vsi_queues_bind_intr(main_vsi); + i40e_vsi_enable_queues_intr(main_vsi); + + /* Map VMDQ VSI queues with MSIX interrupt */ + for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { + pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi); + i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi); + } + + /* enable FDIR MSIX interrupt */ + if (pf->fdir.fdir_vsi) { + i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi); + i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi); + } + + /* Enable all queues which have been configured */ + ret = i40e_dev_switch_queues(pf, TRUE); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to enable VSI"); + goto err_up; + } + + /* Enable receiving broadcast packets */ + ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(INFO, "fail to set vsi broadcast"); + + for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { + ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid, + true, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(INFO, "fail to set vsi broadcast"); + } + + /* Apply link configure */ + if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_20G | ETH_LINK_SPEED_40G)) { + PMD_DRV_LOG(ERR, "Invalid link setting"); + goto err_up; + } + ret = i40e_apply_link_speed(dev); + if (I40E_SUCCESS != ret) { + PMD_DRV_LOG(ERR, "Fail to apply link setting"); + goto err_up; + } + + if (!rte_intr_allow_others(intr_handle)) { + rte_intr_callback_unregister(intr_handle, + i40e_dev_interrupt_handler, + (void *)dev); + /* configure and enable device interrupt */ + i40e_pf_config_irq0(hw, FALSE); + i40e_pf_enable_irq0(hw); + + if (dev->data->dev_conf.intr_conf.lsc != 0) + PMD_INIT_LOG(INFO, "lsc won't enable because of" + " no intr multiplex\n"); + } + + /* enable uio intr after callback register */ + rte_intr_enable(intr_handle); + + return I40E_SUCCESS; + +err_up: + i40e_dev_switch_queues(pf, FALSE); + i40e_dev_clear_queues(dev); + + return ret; +} + +static void +i40e_dev_stop(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *main_vsi = pf->main_vsi; + struct i40e_mirror_rule *p_mirror; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + int i; + + /* Disable all queues */ + i40e_dev_switch_queues(pf, FALSE); + + /* un-map queues with interrupt registers */ + i40e_vsi_disable_queues_intr(main_vsi); + i40e_vsi_queues_unbind_intr(main_vsi); + + for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { + i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi); + i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi); + } + + if (pf->fdir.fdir_vsi) { + i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi); + i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi); + } + /* Clear all queues and release memory */ + i40e_dev_clear_queues(dev); + + /* Set link down */ + i40e_dev_set_link_down(dev); + + /* Remove all mirror rules */ + while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) { + TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules); + rte_free(p_mirror); + } + pf->nb_mirror_rule = 0; + + if (!rte_intr_allow_others(intr_handle)) + /* resume to the default handler */ + rte_intr_callback_register(intr_handle, + i40e_dev_interrupt_handler, + (void *)dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + +static void +i40e_dev_close(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + int i; + + PMD_INIT_FUNC_TRACE(); + + i40e_dev_stop(dev); + hw->adapter_stopped = 1; + i40e_dev_free_queues(dev); + + /* Disable interrupt */ + i40e_pf_disable_irq0(hw); + rte_intr_disable(&(dev->pci_dev->intr_handle)); + + /* shutdown and destroy the HMC */ + i40e_shutdown_lan_hmc(hw); + + /* release all the existing VSIs and VEBs */ + i40e_fdir_teardown(pf); + i40e_vsi_release(pf->main_vsi); + + for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { + i40e_vsi_release(pf->vmdq[i].vsi); + pf->vmdq[i].vsi = NULL; + } + + rte_free(pf->vmdq); + pf->vmdq = NULL; + + /* shutdown the adminq */ + i40e_aq_queue_shutdown(hw, true); + i40e_shutdown_adminq(hw); + + i40e_res_pool_destroy(&pf->qp_pool); + i40e_res_pool_destroy(&pf->msix_pool); + + /* force a PF reset to clean anything leftover */ + reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL); + I40E_WRITE_REG(hw, I40E_PFGEN_CTRL, + (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); + I40E_WRITE_FLUSH(hw); +} + +static void +i40e_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + int status; + + status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, + true, NULL); + if (status != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous"); + + status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, + TRUE, NULL); + if (status != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous"); + +} + +static void +i40e_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + int status; + + status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, + false, NULL); + if (status != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous"); + + status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, + false, NULL); + if (status != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous"); +} + +static void +i40e_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + int ret; + + ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous"); +} + +static void +i40e_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + int ret; + + if (dev->data->promiscuous == 1) + return; /* must remain in all_multicast mode */ + + ret = i40e_aq_set_vsi_multicast_promiscuous(hw, + vsi->seid, FALSE, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous"); +} + +/* + * Set device link up. + */ +static int +i40e_dev_set_link_up(struct rte_eth_dev *dev) +{ + /* re-apply link speed setting */ + return i40e_apply_link_speed(dev); +} + +/* + * Set device link down. + */ +static int +i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev) +{ + uint8_t speed = I40E_LINK_SPEED_UNKNOWN; + uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + return i40e_phy_conf_link(hw, abilities, speed); +} + +int +i40e_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete) +{ +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_link_status link_status; + struct rte_eth_link link, old; + int status; + unsigned rep_cnt = MAX_REPEAT_TIME; + + memset(&link, 0, sizeof(link)); + memset(&old, 0, sizeof(old)); + memset(&link_status, 0, sizeof(link_status)); + rte_i40e_dev_atomic_read_link_status(dev, &old); + + do { + /* Get link status information from hardware */ + status = i40e_aq_get_link_info(hw, false, &link_status, NULL); + if (status != I40E_SUCCESS) { + link.link_speed = ETH_SPEED_NUM_100M; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + PMD_DRV_LOG(ERR, "Failed to get link info"); + goto out; + } + + link.link_status = link_status.link_info & I40E_AQ_LINK_UP; + if (!wait_to_complete) + break; + + rte_delay_ms(CHECK_INTERVAL); + } while (!link.link_status && rep_cnt--); + + if (!link.link_status) + goto out; + + /* i40e uses full duplex only */ + link.link_duplex = ETH_LINK_FULL_DUPLEX; + + /* Parse the link status */ + switch (link_status.link_speed) { + case I40E_LINK_SPEED_100MB: + link.link_speed = ETH_SPEED_NUM_100M; + break; + case I40E_LINK_SPEED_1GB: + link.link_speed = ETH_SPEED_NUM_1G; + break; + case I40E_LINK_SPEED_10GB: + link.link_speed = ETH_SPEED_NUM_10G; + break; + case I40E_LINK_SPEED_20GB: + link.link_speed = ETH_SPEED_NUM_20G; + break; + case I40E_LINK_SPEED_40GB: + link.link_speed = ETH_SPEED_NUM_40G; + break; + default: + link.link_speed = ETH_SPEED_NUM_100M; + break; + } + + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + +out: + rte_i40e_dev_atomic_write_link_status(dev, &link); + if (link.link_status == old.link_status) + return -1; + + return 0; +} + +/* Get all the statistics of a VSI */ +void +i40e_update_vsi_stats(struct i40e_vsi *vsi) +{ + struct i40e_eth_stats *oes = &vsi->eth_stats_offset; + struct i40e_eth_stats *nes = &vsi->eth_stats; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx); + + i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx), + vsi->offset_loaded, &oes->rx_bytes, + &nes->rx_bytes); + i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx), + vsi->offset_loaded, &oes->rx_unicast, + &nes->rx_unicast); + i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx), + vsi->offset_loaded, &oes->rx_multicast, + &nes->rx_multicast); + i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx), + vsi->offset_loaded, &oes->rx_broadcast, + &nes->rx_broadcast); + i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded, + &oes->rx_discards, &nes->rx_discards); + /* GLV_REPC not supported */ + /* GLV_RMPC not supported */ + i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded, + &oes->rx_unknown_protocol, + &nes->rx_unknown_protocol); + i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx), + vsi->offset_loaded, &oes->tx_bytes, + &nes->tx_bytes); + i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx), + vsi->offset_loaded, &oes->tx_unicast, + &nes->tx_unicast); + i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx), + vsi->offset_loaded, &oes->tx_multicast, + &nes->tx_multicast); + i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx), + vsi->offset_loaded, &oes->tx_broadcast, + &nes->tx_broadcast); + /* GLV_TDPC not supported */ + i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded, + &oes->tx_errors, &nes->tx_errors); + vsi->offset_loaded = true; + + PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************", + vsi->vsi_id); + PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes); + PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast); + PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast); + PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast); + PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards); + PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"", + nes->rx_unknown_protocol); + PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes); + PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast); + PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast); + PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast); + PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards); + PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors); + PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************", + vsi->vsi_id); +} + +static void +i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) +{ + unsigned int i; + struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */ + struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */ + + /* Get statistics of struct i40e_eth_stats */ + i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port), + I40E_GLPRT_GORCL(hw->port), + pf->offset_loaded, &os->eth.rx_bytes, + &ns->eth.rx_bytes); + i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port), + I40E_GLPRT_UPRCL(hw->port), + pf->offset_loaded, &os->eth.rx_unicast, + &ns->eth.rx_unicast); + i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port), + I40E_GLPRT_MPRCL(hw->port), + pf->offset_loaded, &os->eth.rx_multicast, + &ns->eth.rx_multicast); + i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port), + I40E_GLPRT_BPRCL(hw->port), + pf->offset_loaded, &os->eth.rx_broadcast, + &ns->eth.rx_broadcast); + /* Workaround: CRC size should not be included in byte statistics, + * so subtract ETHER_CRC_LEN from the byte counter for each rx packet. + */ + ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast + + ns->eth.rx_broadcast) * ETHER_CRC_LEN; + + i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port), + pf->offset_loaded, &os->eth.rx_discards, + &ns->eth.rx_discards); + /* GLPRT_REPC not supported */ + /* GLPRT_RMPC not supported */ + i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port), + pf->offset_loaded, + &os->eth.rx_unknown_protocol, + &ns->eth.rx_unknown_protocol); + i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port), + I40E_GLPRT_GOTCL(hw->port), + pf->offset_loaded, &os->eth.tx_bytes, + &ns->eth.tx_bytes); + i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port), + I40E_GLPRT_UPTCL(hw->port), + pf->offset_loaded, &os->eth.tx_unicast, + &ns->eth.tx_unicast); + i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port), + I40E_GLPRT_MPTCL(hw->port), + pf->offset_loaded, &os->eth.tx_multicast, + &ns->eth.tx_multicast); + i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port), + I40E_GLPRT_BPTCL(hw->port), + pf->offset_loaded, &os->eth.tx_broadcast, + &ns->eth.tx_broadcast); + ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast + + ns->eth.tx_broadcast) * ETHER_CRC_LEN; + /* GLPRT_TEPC not supported */ + + /* additional port specific stats */ + i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port), + pf->offset_loaded, &os->tx_dropped_link_down, + &ns->tx_dropped_link_down); + i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port), + pf->offset_loaded, &os->crc_errors, + &ns->crc_errors); + i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port), + pf->offset_loaded, &os->illegal_bytes, + &ns->illegal_bytes); + /* GLPRT_ERRBC not supported */ + i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port), + pf->offset_loaded, &os->mac_local_faults, + &ns->mac_local_faults); + i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port), + pf->offset_loaded, &os->mac_remote_faults, + &ns->mac_remote_faults); + i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port), + pf->offset_loaded, &os->rx_length_errors, + &ns->rx_length_errors); + i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port), + pf->offset_loaded, &os->link_xon_rx, + &ns->link_xon_rx); + i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port), + pf->offset_loaded, &os->link_xoff_rx, + &ns->link_xoff_rx); + for (i = 0; i < 8; i++) { + i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i), + pf->offset_loaded, + &os->priority_xon_rx[i], + &ns->priority_xon_rx[i]); + i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), + pf->offset_loaded, + &os->priority_xoff_rx[i], + &ns->priority_xoff_rx[i]); + } + i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port), + pf->offset_loaded, &os->link_xon_tx, + &ns->link_xon_tx); + i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port), + pf->offset_loaded, &os->link_xoff_tx, + &ns->link_xoff_tx); + for (i = 0; i < 8; i++) { + i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i), + pf->offset_loaded, + &os->priority_xon_tx[i], + &ns->priority_xon_tx[i]); + i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), + pf->offset_loaded, + &os->priority_xoff_tx[i], + &ns->priority_xoff_tx[i]); + i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i), + pf->offset_loaded, + &os->priority_xon_2_xoff[i], + &ns->priority_xon_2_xoff[i]); + } + i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port), + I40E_GLPRT_PRC64L(hw->port), + pf->offset_loaded, &os->rx_size_64, + &ns->rx_size_64); + i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port), + I40E_GLPRT_PRC127L(hw->port), + pf->offset_loaded, &os->rx_size_127, + &ns->rx_size_127); + i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port), + I40E_GLPRT_PRC255L(hw->port), + pf->offset_loaded, &os->rx_size_255, + &ns->rx_size_255); + i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port), + I40E_GLPRT_PRC511L(hw->port), + pf->offset_loaded, &os->rx_size_511, + &ns->rx_size_511); + i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port), + I40E_GLPRT_PRC1023L(hw->port), + pf->offset_loaded, &os->rx_size_1023, + &ns->rx_size_1023); + i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port), + I40E_GLPRT_PRC1522L(hw->port), + pf->offset_loaded, &os->rx_size_1522, + &ns->rx_size_1522); + i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port), + I40E_GLPRT_PRC9522L(hw->port), + pf->offset_loaded, &os->rx_size_big, + &ns->rx_size_big); + i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port), + pf->offset_loaded, &os->rx_undersize, + &ns->rx_undersize); + i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port), + pf->offset_loaded, &os->rx_fragments, + &ns->rx_fragments); + i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port), + pf->offset_loaded, &os->rx_oversize, + &ns->rx_oversize); + i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port), + pf->offset_loaded, &os->rx_jabber, + &ns->rx_jabber); + i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port), + I40E_GLPRT_PTC64L(hw->port), + pf->offset_loaded, &os->tx_size_64, + &ns->tx_size_64); + i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port), + I40E_GLPRT_PTC127L(hw->port), + pf->offset_loaded, &os->tx_size_127, + &ns->tx_size_127); + i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port), + I40E_GLPRT_PTC255L(hw->port), + pf->offset_loaded, &os->tx_size_255, + &ns->tx_size_255); + i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port), + I40E_GLPRT_PTC511L(hw->port), + pf->offset_loaded, &os->tx_size_511, + &ns->tx_size_511); + i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port), + I40E_GLPRT_PTC1023L(hw->port), + pf->offset_loaded, &os->tx_size_1023, + &ns->tx_size_1023); + i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port), + I40E_GLPRT_PTC1522L(hw->port), + pf->offset_loaded, &os->tx_size_1522, + &ns->tx_size_1522); + i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port), + I40E_GLPRT_PTC9522L(hw->port), + pf->offset_loaded, &os->tx_size_big, + &ns->tx_size_big); + i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index), + pf->offset_loaded, + &os->fd_sb_match, &ns->fd_sb_match); + /* GLPRT_MSPDC not supported */ + /* GLPRT_XEC not supported */ + + pf->offset_loaded = true; + + if (pf->main_vsi) + i40e_update_vsi_stats(pf->main_vsi); +} + +/* Get all statistics of a port */ +static void +i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */ + unsigned i; + + /* call read registers - updates values, now write them to struct */ + i40e_read_stats_registers(pf, hw); + + stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + + pf->main_vsi->eth_stats.rx_multicast + + pf->main_vsi->eth_stats.rx_broadcast - + pf->main_vsi->eth_stats.rx_discards; + stats->opackets = pf->main_vsi->eth_stats.tx_unicast + + pf->main_vsi->eth_stats.tx_multicast + + pf->main_vsi->eth_stats.tx_broadcast; + stats->ibytes = ns->eth.rx_bytes; + stats->obytes = ns->eth.tx_bytes; + stats->oerrors = ns->eth.tx_errors + + pf->main_vsi->eth_stats.tx_errors; + stats->imcasts = pf->main_vsi->eth_stats.rx_multicast; + + /* Rx Errors */ + stats->imissed = ns->eth.rx_discards + + pf->main_vsi->eth_stats.rx_discards; + stats->ierrors = ns->crc_errors + + ns->rx_length_errors + ns->rx_undersize + + ns->rx_oversize + ns->rx_fragments + ns->rx_jabber; + + PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************"); + PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes); + PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast); + PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", ns->eth.rx_multicast); + PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", ns->eth.rx_broadcast); + PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", ns->eth.rx_discards); + PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"", + ns->eth.rx_unknown_protocol); + PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes); + PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast); + PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", ns->eth.tx_multicast); + PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", ns->eth.tx_broadcast); + PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", ns->eth.tx_discards); + PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors); + + PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"", + ns->tx_dropped_link_down); + PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors); + PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"", + ns->illegal_bytes); + PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes); + PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"", + ns->mac_local_faults); + PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"", + ns->mac_remote_faults); + PMD_DRV_LOG(DEBUG, "rx_length_errors: %"PRIu64"", + ns->rx_length_errors); + PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx); + PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx); + for (i = 0; i < 8; i++) { + PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %"PRIu64"", + i, ns->priority_xon_rx[i]); + PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %"PRIu64"", + i, ns->priority_xoff_rx[i]); + } + PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx); + PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx); + for (i = 0; i < 8; i++) { + PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %"PRIu64"", + i, ns->priority_xon_tx[i]); + PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %"PRIu64"", + i, ns->priority_xoff_tx[i]); + PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %"PRIu64"", + i, ns->priority_xon_2_xoff[i]); + } + PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64); + PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127); + PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255); + PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511); + PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023); + PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522); + PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big); + PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize); + PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments); + PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize); + PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber); + PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64); + PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127); + PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255); + PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511); + PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023); + PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522); + PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big); + PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"", + ns->mac_short_packet_dropped); + PMD_DRV_LOG(DEBUG, "checksum_error: %"PRIu64"", + ns->checksum_error); + PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match); + PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************"); +} + +/* Reset the statistics */ +static void +i40e_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Mark PF and VSI stats to update the offset, aka "reset" */ + pf->offset_loaded = false; + if (pf->main_vsi) + pf->main_vsi->offset_loaded = false; + + /* read the stats, reading current register values into offset */ + i40e_read_stats_registers(pf, hw); +} + +static uint32_t +i40e_xstats_calc_num(void) +{ + return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS + + (I40E_NB_RXQ_PRIO_XSTATS * 8) + + (I40E_NB_TXQ_PRIO_XSTATS * 8); +} + +static int +i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, + unsigned n) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + unsigned i, count, prio; + struct i40e_hw_port_stats *hw_stats = &pf->stats; + + count = i40e_xstats_calc_num(); + if (n < count) + return count; + + i40e_read_stats_registers(pf, hw); + + if (xstats == NULL) + return 0; + + count = 0; + + /* Get stats from i40e_eth_stats struct */ + for (i = 0; i < I40E_NB_ETH_XSTATS; i++) { + snprintf(xstats[count].name, sizeof(xstats[count].name), + "%s", rte_i40e_stats_strings[i].name); + xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) + + rte_i40e_stats_strings[i].offset); + count++; + } + + /* Get individiual stats from i40e_hw_port struct */ + for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) { + snprintf(xstats[count].name, sizeof(xstats[count].name), + "%s", rte_i40e_hw_port_strings[i].name); + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + rte_i40e_hw_port_strings[i].offset); + count++; + } + + for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) { + for (prio = 0; prio < 8; prio++) { + snprintf(xstats[count].name, + sizeof(xstats[count].name), + "rx_priority%u_%s", prio, + rte_i40e_rxq_prio_strings[i].name); + xstats[count].value = + *(uint64_t *)(((char *)hw_stats) + + rte_i40e_rxq_prio_strings[i].offset + + (sizeof(uint64_t) * prio)); + count++; + } + } + + for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) { + for (prio = 0; prio < 8; prio++) { + snprintf(xstats[count].name, + sizeof(xstats[count].name), + "tx_priority%u_%s", prio, + rte_i40e_txq_prio_strings[i].name); + xstats[count].value = + *(uint64_t *)(((char *)hw_stats) + + rte_i40e_txq_prio_strings[i].offset + + (sizeof(uint64_t) * prio)); + count++; + } + } + + return count; +} + +static int +i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t queue_id, + __rte_unused uint8_t stat_idx, + __rte_unused uint8_t is_rx) +{ + PMD_INIT_FUNC_TRACE(); + + return -ENOSYS; +} + +static void +i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + + dev_info->max_rx_queues = vsi->nb_qps; + dev_info->max_tx_queues = vsi->nb_qps; + dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; + dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; + dev_info->max_mac_addrs = vsi->max_macaddrs; + dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + dev_info->reta_size = pf->hash_lut_size; + dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = I40E_DEFAULT_RX_PTHRESH, + .hthresh = I40E_DEFAULT_RX_HTHRESH, + .wthresh = I40E_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = I40E_DEFAULT_TX_PTHRESH, + .hthresh = I40E_DEFAULT_TX_HTHRESH, + .wthresh = I40E_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH, + .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOOFFLOADS, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = I40E_MAX_RING_DESC, + .nb_min = I40E_MIN_RING_DESC, + .nb_align = I40E_ALIGN_RING_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = I40E_MAX_RING_DESC, + .nb_min = I40E_MIN_RING_DESC, + .nb_align = I40E_ALIGN_RING_DESC, + }; + + if (pf->flags & I40E_FLAG_VMDQ) { + dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi; + dev_info->vmdq_queue_base = dev_info->max_rx_queues; + dev_info->vmdq_queue_num = pf->vmdq_nb_qps * + pf->max_nb_vmdq_vsi; + dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE; + dev_info->max_rx_queues += dev_info->vmdq_queue_num; + dev_info->max_tx_queues += dev_info->vmdq_queue_num; + } + + if (i40e_is_40G_device(hw->device_id)) + /* For XL710 */ + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; + else + /* For X710 */ + dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G; +} + +static int +i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + PMD_INIT_FUNC_TRACE(); + + if (on) + return i40e_vsi_add_vlan(vsi, vlan_id); + else + return i40e_vsi_delete_vlan(vsi, vlan_id); +} + +static int +i40e_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t reg_r = 0, reg_w = 0; + uint16_t reg_id = 0; + int ret = 0; + + switch (vlan_type) { + case ETH_VLAN_TYPE_OUTER: + reg_id = 2; + break; + case ETH_VLAN_TYPE_INNER: + reg_id = 3; + break; + default: + ret = -EINVAL; + PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type); + return ret; + } + ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), + ®_r, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Fail to debug read from " + "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id); + ret = -EIO; + return ret; + } + PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: " + "0x%08"PRIx64"", reg_id, reg_r); + + reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK)); + reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT); + if (reg_r == reg_w) { + ret = 0; + PMD_DRV_LOG(DEBUG, "No need to write"); + return ret; + } + + ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), + reg_w, NULL); + if (ret != I40E_SUCCESS) { + ret = -EIO; + PMD_DRV_LOG(ERR, "Fail to debug write to " + "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id); + return ret; + } + PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to " + "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id); + + return ret; +} + +static void +i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + + if (mask & ETH_VLAN_FILTER_MASK) { + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + i40e_vsi_config_vlan_filter(vsi, TRUE); + else + i40e_vsi_config_vlan_filter(vsi, FALSE); + } + + if (mask & ETH_VLAN_STRIP_MASK) { + /* Enable or disable VLAN stripping */ + if (dev->data->dev_conf.rxmode.hw_vlan_strip) + i40e_vsi_config_vlan_stripping(vsi, TRUE); + else + i40e_vsi_config_vlan_stripping(vsi, FALSE); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + if (dev->data->dev_conf.rxmode.hw_vlan_extend) + i40e_vsi_config_double_vlan(vsi, TRUE); + else + i40e_vsi_config_double_vlan(vsi, FALSE); + } +} + +static void +i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t queue, + __rte_unused int on) +{ + PMD_INIT_FUNC_TRACE(); +} + +static int +i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi); + struct i40e_vsi_vlan_pvid_info info; + + memset(&info, 0, sizeof(info)); + info.on = on; + if (info.on) + info.config.pvid = pvid; + else { + info.config.reject.tagged = + data->dev_conf.txmode.hw_vlan_reject_tagged; + info.config.reject.untagged = + data->dev_conf.txmode.hw_vlan_reject_untagged; + } + + return i40e_vsi_vlan_pvid_set(vsi, &info); +} + +static int +i40e_dev_led_on(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t mode = i40e_led_get(hw); + + if (mode == 0) + i40e_led_set(hw, 0xf, true); /* 0xf means led always true */ + + return 0; +} + +static int +i40e_dev_led_off(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t mode = i40e_led_get(hw); + + if (mode != 0) + i40e_led_set(hw, 0, false); + + return 0; +} + +static int +i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + fc_conf->pause_time = pf->fc_conf.pause_time; + fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]; + fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]; + + /* Return current mode according to actual setting*/ + switch (hw->fc.current_mode) { + case I40E_FC_FULL: + fc_conf->mode = RTE_FC_FULL; + break; + case I40E_FC_TX_PAUSE: + fc_conf->mode = RTE_FC_TX_PAUSE; + break; + case I40E_FC_RX_PAUSE: + fc_conf->mode = RTE_FC_RX_PAUSE; + break; + case I40E_FC_NONE: + default: + fc_conf->mode = RTE_FC_NONE; + }; + + return 0; +} + +static int +i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + uint32_t mflcn_reg, fctrl_reg, reg; + uint32_t max_high_water; + uint8_t i, aq_failure; + int err; + struct i40e_hw *hw; + struct i40e_pf *pf; + enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = { + [RTE_FC_NONE] = I40E_FC_NONE, + [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE, + [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE, + [RTE_FC_FULL] = I40E_FC_FULL + }; + + /* high_water field in the rte_eth_fc_conf using the kilobytes unit */ + + max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT; + if ((fc_conf->high_water > max_high_water) || + (fc_conf->high_water < fc_conf->low_water)) { + PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, " + "High_water must <= %d.", max_high_water); + return -EINVAL; + } + + hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode]; + + pf->fc_conf.pause_time = fc_conf->pause_time; + pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water; + pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water; + + PMD_INIT_FUNC_TRACE(); + + /* All the link flow control related enable/disable register + * configuration is handle by the F/W + */ + err = i40e_set_fc(hw, &aq_failure, true); + if (err < 0) + return -ENOSYS; + + if (i40e_is_40G_device(hw->device_id)) { + /* Configure flow control refresh threshold, + * the value for stat_tx_pause_refresh_timer[8] + * is used for global pause operation. + */ + + I40E_WRITE_REG(hw, + I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8), + pf->fc_conf.pause_time); + + /* configure the timer value included in transmitted pause + * frame, + * the value for stat_tx_pause_quanta[8] is used for global + * pause operation + */ + I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8), + pf->fc_conf.pause_time); + + fctrl_reg = I40E_READ_REG(hw, + I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL); + + if (fc_conf->mac_ctrl_frame_fwd != 0) + fctrl_reg |= I40E_PRTMAC_FWD_CTRL; + else + fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL; + + I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL, + fctrl_reg); + } else { + /* Configure pause time (2 TCs per register) */ + reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++) + I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg); + + /* Configure flow control refresh threshold value */ + I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV, + pf->fc_conf.pause_time / 2); + + mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN); + + /* set or clear MFLCN.PMCF & MFLCN.DPF bits + *depending on configuration + */ + if (fc_conf->mac_ctrl_frame_fwd != 0) { + mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK; + mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK; + } else { + mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK; + mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK; + } + + I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg); + } + + /* config the water marker both based on the packets and bytes */ + I40E_WRITE_REG(hw, I40E_GLRPB_PHW, + (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE); + I40E_WRITE_REG(hw, I40E_GLRPB_PLW, + (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE); + I40E_WRITE_REG(hw, I40E_GLRPB_GHW, + pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT); + I40E_WRITE_REG(hw, I40E_GLRPB_GLW, + pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT); + + I40E_WRITE_FLUSH(hw); + + return 0; +} + +static int +i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_eth_pfc_conf *pfc_conf) +{ + PMD_INIT_FUNC_TRACE(); + + return -ENOSYS; +} + +/* Add a MAC address, and update filters */ +static void +i40e_macaddr_add(struct rte_eth_dev *dev, + struct ether_addr *mac_addr, + __rte_unused uint32_t index, + uint32_t pool) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_mac_filter_info mac_filter; + struct i40e_vsi *vsi; + int ret; + + /* If VMDQ not enabled or configured, return */ + if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) { + PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u", + pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled", + pool); + return; + } + + if (pool > pf->nb_cfg_vmdq_vsi) { + PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u", + pool, pf->nb_cfg_vmdq_vsi); + return; + } + + (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + else + mac_filter.filter_type = RTE_MAC_PERFECT_MATCH; + + if (pool == 0) + vsi = pf->main_vsi; + else + vsi = pf->vmdq[pool - 1].vsi; + + ret = i40e_vsi_add_mac(vsi, &mac_filter); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter"); + return; + } +} + +/* Remove a MAC address, and update filters */ +static void +i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *vsi; + struct rte_eth_dev_data *data = dev->data; + struct ether_addr *macaddr; + int ret; + uint32_t i; + uint64_t pool_sel; + + macaddr = &(data->mac_addrs[index]); + + pool_sel = dev->data->mac_pool_sel[index]; + + for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) { + if (pool_sel & (1ULL << i)) { + if (i == 0) + vsi = pf->main_vsi; + else { + /* No VMDQ pool enabled or configured */ + if (!(pf->flags | I40E_FLAG_VMDQ) || + (i > pf->nb_cfg_vmdq_vsi)) { + PMD_DRV_LOG(ERR, "No VMDQ pool enabled" + "/configured"); + return; + } + vsi = pf->vmdq[i - 1].vsi; + } + ret = i40e_vsi_delete_mac(vsi, macaddr); + + if (ret) { + PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter"); + return; + } + } + } +} + +/* Set perfect match or hash match of MAC and VLAN for a VF */ +static int +i40e_vf_mac_filter_set(struct i40e_pf *pf, + struct rte_eth_mac_filter *filter, + bool add) +{ + struct i40e_hw *hw; + struct i40e_mac_filter_info mac_filter; + struct ether_addr old_mac; + struct ether_addr *new_mac; + struct i40e_pf_vf *vf = NULL; + uint16_t vf_id; + int ret; + + if (pf == NULL) { + PMD_DRV_LOG(ERR, "Invalid PF argument."); + return -EINVAL; + } + hw = I40E_PF_TO_HW(pf); + + if (filter == NULL) { + PMD_DRV_LOG(ERR, "Invalid mac filter argument."); + return -EINVAL; + } + + new_mac = &filter->mac_addr; + + if (is_zero_ether_addr(new_mac)) { + PMD_DRV_LOG(ERR, "Invalid ethernet address."); + return -EINVAL; + } + + vf_id = filter->dst_id; + + if (vf_id > pf->vf_num - 1 || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + vf = &pf->vfs[vf_id]; + + if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) { + PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address."); + return -EINVAL; + } + + if (add) { + (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN); + (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes, + ETHER_ADDR_LEN); + (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr, + ETHER_ADDR_LEN); + + mac_filter.filter_type = filter->filter_type; + ret = i40e_vsi_add_mac(vf->vsi, &mac_filter); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add MAC filter."); + return -1; + } + ether_addr_copy(new_mac, &pf->dev_addr); + } else { + (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, + ETHER_ADDR_LEN); + ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to delete MAC filter."); + return -1; + } + + /* Clear device address as it has been removed */ + if (is_same_ether_addr(&(pf->dev_addr), new_mac)) + memset(&pf->dev_addr, 0, sizeof(struct ether_addr)); + } + + return 0; +} + +/* MAC filter handle */ +static int +i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, + void *arg) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_eth_mac_filter *filter; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + int ret = I40E_NOT_SUPPORTED; + + filter = (struct rte_eth_mac_filter *)(arg); + + switch (filter_op) { + case RTE_ETH_FILTER_NOP: + ret = I40E_SUCCESS; + break; + case RTE_ETH_FILTER_ADD: + i40e_pf_disable_irq0(hw); + if (filter->is_vf) + ret = i40e_vf_mac_filter_set(pf, filter, 1); + i40e_pf_enable_irq0(hw); + break; + case RTE_ETH_FILTER_DELETE: + i40e_pf_disable_irq0(hw); + if (filter->is_vf) + ret = i40e_vf_mac_filter_set(pf, filter, 0); + i40e_pf_enable_irq0(hw); + break; + default: + PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); + ret = I40E_ERR_PARAM; + break; + } + + return ret; +} + +static int +i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) +{ + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int ret; + + if (!lut) + return -EINVAL; + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE, + lut, lut_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS lookup table"); + return ret; + } + } else { + uint32_t *lut_dw = (uint32_t *)lut; + uint16_t i, lut_size_dw = lut_size / 4; + + for (i = 0; i < lut_size_dw; i++) + lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i)); + } + + return 0; +} + +static int +i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) +{ + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int ret; + + if (!vsi || !lut) + return -EINVAL; + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE, + lut, lut_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to set RSS lookup table"); + return ret; + } + } else { + uint32_t *lut_dw = (uint32_t *)lut; + uint16_t i, lut_size_dw = lut_size / 4; + + for (i = 0; i < lut_size_dw; i++) + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]); + I40E_WRITE_FLUSH(hw); + } + + return 0; +} + +static int +i40e_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + uint16_t i, lut_size = pf->hash_lut_size; + uint16_t idx, shift; + uint8_t *lut; + int ret; + + if (reta_size != lut_size || + reta_size > ETH_RSS_RETA_SIZE_512) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)\n", reta_size, lut_size); + return -EINVAL; + } + + lut = rte_zmalloc("i40e_rss_lut", reta_size, 0); + if (!lut) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size); + if (ret) + goto out; + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + lut[i] = reta_conf[idx].reta[shift]; + } + ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size); + +out: + rte_free(lut); + + return ret; +} + +static int +i40e_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + uint16_t i, lut_size = pf->hash_lut_size; + uint16_t idx, shift; + uint8_t *lut; + int ret; + + if (reta_size != lut_size || + reta_size > ETH_RSS_RETA_SIZE_512) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)\n", reta_size, lut_size); + return -EINVAL; + } + + lut = rte_zmalloc("i40e_rss_lut", reta_size, 0); + if (!lut) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + + ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size); + if (ret) + goto out; + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = lut[i]; + } + +out: + rte_free(lut); + + return ret; +} + +/** + * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver) + * @hw: pointer to the HW structure + * @mem: pointer to mem struct to fill out + * @size: size of memory requested + * @alignment: what to align the allocation to + **/ +enum i40e_status_code +i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, + struct i40e_dma_mem *mem, + u64 size, + u32 alignment) +{ + const struct rte_memzone *mz = NULL; + char z_name[RTE_MEMZONE_NAMESIZE]; + + if (!mem) + return I40E_ERR_PARAM; + + snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand()); + mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0, + alignment, RTE_PGSIZE_2M); + if (!mz) + return I40E_ERR_NO_MEMORY; + + mem->size = size; + mem->va = mz->addr; + mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); + mem->zone = (const void *)mz; + PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: " + "%"PRIu64, mz->name, mem->pa); + + return I40E_SUCCESS; +} + +/** + * i40e_free_dma_mem_d - specific memory free for shared code (base driver) + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +enum i40e_status_code +i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, + struct i40e_dma_mem *mem) +{ + if (!mem) + return I40E_ERR_PARAM; + + PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: " + "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name, + mem->pa); + rte_memzone_free((const struct rte_memzone *)mem->zone); + mem->zone = NULL; + mem->va = NULL; + mem->pa = (u64)0; + + return I40E_SUCCESS; +} + +/** + * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver) + * @hw: pointer to the HW structure + * @mem: pointer to mem struct to fill out + * @size: size of memory requested + **/ +enum i40e_status_code +i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw, + struct i40e_virt_mem *mem, + u32 size) +{ + if (!mem) + return I40E_ERR_PARAM; + + mem->size = size; + mem->va = rte_zmalloc("i40e", size, 0); + + if (mem->va) + return I40E_SUCCESS; + else + return I40E_ERR_NO_MEMORY; +} + +/** + * i40e_free_virt_mem_d - specific memory free for shared code (base driver) + * @hw: pointer to the HW structure + * @mem: pointer to mem struct to free + **/ +enum i40e_status_code +i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw, + struct i40e_virt_mem *mem) +{ + if (!mem) + return I40E_ERR_PARAM; + + rte_free(mem->va); + mem->va = NULL; + + return I40E_SUCCESS; +} + +void +i40e_init_spinlock_d(struct i40e_spinlock *sp) +{ + rte_spinlock_init(&sp->spinlock); +} + +void +i40e_acquire_spinlock_d(struct i40e_spinlock *sp) +{ + rte_spinlock_lock(&sp->spinlock); +} + +void +i40e_release_spinlock_d(struct i40e_spinlock *sp) +{ + rte_spinlock_unlock(&sp->spinlock); +} + +void +i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp) +{ + return; +} + +/** + * Get the hardware capabilities, which will be parsed + * and saved into struct i40e_hw. + */ +static int +i40e_get_cap(struct i40e_hw *hw) +{ + struct i40e_aqc_list_capabilities_element_resp *buf; + uint16_t len, size = 0; + int ret; + + /* Calculate a huge enough buff for saving response data temporarily */ + len = sizeof(struct i40e_aqc_list_capabilities_element_resp) * + I40E_MAX_CAP_ELE_NUM; + buf = rte_zmalloc("i40e", len, 0); + if (!buf) { + PMD_DRV_LOG(ERR, "Failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + /* Get, parse the capabilities and save it to hw */ + ret = i40e_aq_discover_capabilities(hw, buf, len, &size, + i40e_aqc_opc_list_func_capabilities, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to discover capabilities"); + + /* Free the temporary buffer after being used */ + rte_free(buf); + + return ret; +} + +static int +i40e_pf_parameter_init(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint16_t qp_count = 0, vsi_count = 0; + + if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) { + PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV"); + return -EINVAL; + } + /* Add the parameter init for LFC */ + pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME; + pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER; + pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER; + + pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED; + pf->max_num_vsi = hw->func_caps.num_vsis; + pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF; + pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; + + /* FDir queue/VSI allocation */ + pf->fdir_qp_offset = 0; + if (hw->func_caps.fd) { + pf->flags |= I40E_FLAG_FDIR; + pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR; + } else { + pf->fdir_nb_qps = 0; + } + qp_count += pf->fdir_nb_qps; + vsi_count += 1; + + /* LAN queue/VSI allocation */ + pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps; + if (!hw->func_caps.rss) { + pf->lan_nb_qps = 1; + } else { + pf->flags |= I40E_FLAG_RSS; + if (hw->mac.type == I40E_MAC_X722) + pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE; + pf->lan_nb_qps = pf->lan_nb_qp_max; + } + qp_count += pf->lan_nb_qps; + vsi_count += 1; + + /* VF queue/VSI allocation */ + pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps; + if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) { + pf->flags |= I40E_FLAG_SRIOV; + pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; + pf->vf_num = dev->pci_dev->max_vfs; + PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, " + "in total %u queues", pf->vf_num, pf->vf_nb_qps, + pf->vf_nb_qps * pf->vf_num); + } else { + pf->vf_nb_qps = 0; + pf->vf_num = 0; + } + qp_count += pf->vf_nb_qps * pf->vf_num; + vsi_count += pf->vf_num; + + /* VMDq queue/VSI allocation */ + pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num; + pf->vmdq_nb_qps = 0; + pf->max_nb_vmdq_vsi = 0; + if (hw->func_caps.vmdq) { + if (qp_count < hw->func_caps.num_tx_qp && + vsi_count < hw->func_caps.num_vsis) { + pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp - + qp_count) / pf->vmdq_nb_qp_max; + + /* Limit the maximum number of VMDq vsi to the maximum + * ethdev can support + */ + pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi, + hw->func_caps.num_vsis - vsi_count); + pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi, + ETH_64_POOLS); + if (pf->max_nb_vmdq_vsi) { + pf->flags |= I40E_FLAG_VMDQ; + pf->vmdq_nb_qps = pf->vmdq_nb_qp_max; + PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues " + "per VMDQ VSI, in total %u queues", + pf->max_nb_vmdq_vsi, + pf->vmdq_nb_qps, pf->vmdq_nb_qps * + pf->max_nb_vmdq_vsi); + } else { + PMD_DRV_LOG(INFO, "No enough queues left for " + "VMDq"); + } + } else { + PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq"); + } + } + qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi; + vsi_count += pf->max_nb_vmdq_vsi; + + if (hw->func_caps.dcb) + pf->flags |= I40E_FLAG_DCB; + + if (qp_count > hw->func_caps.num_tx_qp) { + PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds " + "the hardware maximum %u", qp_count, + hw->func_caps.num_tx_qp); + return -EINVAL; + } + if (vsi_count > hw->func_caps.num_vsis) { + PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds " + "the hardware maximum %u", vsi_count, + hw->func_caps.num_vsis); + return -EINVAL; + } + + return 0; +} + +static int +i40e_pf_get_switch_config(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_aqc_get_switch_config_resp *switch_config; + struct i40e_aqc_switch_config_element_resp *element; + uint16_t start_seid = 0, num_reported; + int ret; + + switch_config = (struct i40e_aqc_get_switch_config_resp *)\ + rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0); + if (!switch_config) { + PMD_DRV_LOG(ERR, "Failed to allocated memory"); + return -ENOMEM; + } + + /* Get the switch configurations */ + ret = i40e_aq_get_switch_config(hw, switch_config, + I40E_AQ_LARGE_BUF, &start_seid, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get switch configurations"); + goto fail; + } + num_reported = rte_le_to_cpu_16(switch_config->header.num_reported); + if (num_reported != 1) { /* The number should be 1 */ + PMD_DRV_LOG(ERR, "Wrong number of switch config reported"); + goto fail; + } + + /* Parse the switch configuration elements */ + element = &(switch_config->element[0]); + if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) { + pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid); + pf->main_vsi_seid = rte_le_to_cpu_16(element->seid); + } else + PMD_DRV_LOG(INFO, "Unknown element type"); + +fail: + rte_free(switch_config); + + return ret; +} + +static int +i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base, + uint32_t num) +{ + struct pool_entry *entry; + + if (pool == NULL || num == 0) + return -EINVAL; + + entry = rte_zmalloc("i40e", sizeof(*entry), 0); + if (entry == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool"); + return -ENOMEM; + } + + /* queue heap initialize */ + pool->num_free = num; + pool->num_alloc = 0; + pool->base = base; + LIST_INIT(&pool->alloc_list); + LIST_INIT(&pool->free_list); + + /* Initialize element */ + entry->base = 0; + entry->len = num; + + LIST_INSERT_HEAD(&pool->free_list, entry, next); + return 0; +} + +static void +i40e_res_pool_destroy(struct i40e_res_pool_info *pool) +{ + struct pool_entry *entry, *next_entry; + + if (pool == NULL) + return; + + for (entry = LIST_FIRST(&pool->alloc_list); + entry && (next_entry = LIST_NEXT(entry, next), 1); + entry = next_entry) { + LIST_REMOVE(entry, next); + rte_free(entry); + } + + for (entry = LIST_FIRST(&pool->free_list); + entry && (next_entry = LIST_NEXT(entry, next), 1); + entry = next_entry) { + LIST_REMOVE(entry, next); + rte_free(entry); + } + + pool->num_free = 0; + pool->num_alloc = 0; + pool->base = 0; + LIST_INIT(&pool->alloc_list); + LIST_INIT(&pool->free_list); +} + +static int +i40e_res_pool_free(struct i40e_res_pool_info *pool, + uint32_t base) +{ + struct pool_entry *entry, *next, *prev, *valid_entry = NULL; + uint32_t pool_offset; + int insert; + + if (pool == NULL) { + PMD_DRV_LOG(ERR, "Invalid parameter"); + return -EINVAL; + } + + pool_offset = base - pool->base; + /* Lookup in alloc list */ + LIST_FOREACH(entry, &pool->alloc_list, next) { + if (entry->base == pool_offset) { + valid_entry = entry; + LIST_REMOVE(entry, next); + break; + } + } + + /* Not find, return */ + if (valid_entry == NULL) { + PMD_DRV_LOG(ERR, "Failed to find entry"); + return -EINVAL; + } + + /** + * Found it, move it to free list and try to merge. + * In order to make merge easier, always sort it by qbase. + * Find adjacent prev and last entries. + */ + prev = next = NULL; + LIST_FOREACH(entry, &pool->free_list, next) { + if (entry->base > valid_entry->base) { + next = entry; + break; + } + prev = entry; + } + + insert = 0; + /* Try to merge with next one*/ + if (next != NULL) { + /* Merge with next one */ + if (valid_entry->base + valid_entry->len == next->base) { + next->base = valid_entry->base; + next->len += valid_entry->len; + rte_free(valid_entry); + valid_entry = next; + insert = 1; + } + } + + if (prev != NULL) { + /* Merge with previous one */ + if (prev->base + prev->len == valid_entry->base) { + prev->len += valid_entry->len; + /* If it merge with next one, remove next node */ + if (insert == 1) { + LIST_REMOVE(valid_entry, next); + rte_free(valid_entry); + } else { + rte_free(valid_entry); + insert = 1; + } + } + } + + /* Not find any entry to merge, insert */ + if (insert == 0) { + if (prev != NULL) + LIST_INSERT_AFTER(prev, valid_entry, next); + else if (next != NULL) + LIST_INSERT_BEFORE(next, valid_entry, next); + else /* It's empty list, insert to head */ + LIST_INSERT_HEAD(&pool->free_list, valid_entry, next); + } + + pool->num_free += valid_entry->len; + pool->num_alloc -= valid_entry->len; + + return 0; +} + +static int +i40e_res_pool_alloc(struct i40e_res_pool_info *pool, + uint16_t num) +{ + struct pool_entry *entry, *valid_entry; + + if (pool == NULL || num == 0) { + PMD_DRV_LOG(ERR, "Invalid parameter"); + return -EINVAL; + } + + if (pool->num_free < num) { + PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u", + num, pool->num_free); + return -ENOMEM; + } + + valid_entry = NULL; + /* Lookup in free list and find most fit one */ + LIST_FOREACH(entry, &pool->free_list, next) { + if (entry->len >= num) { + /* Find best one */ + if (entry->len == num) { + valid_entry = entry; + break; + } + if (valid_entry == NULL || valid_entry->len > entry->len) + valid_entry = entry; + } + } + + /* Not find one to satisfy the request, return */ + if (valid_entry == NULL) { + PMD_DRV_LOG(ERR, "No valid entry found"); + return -ENOMEM; + } + /** + * The entry have equal queue number as requested, + * remove it from alloc_list. + */ + if (valid_entry->len == num) { + LIST_REMOVE(valid_entry, next); + } else { + /** + * The entry have more numbers than requested, + * create a new entry for alloc_list and minus its + * queue base and number in free_list. + */ + entry = rte_zmalloc("res_pool", sizeof(*entry), 0); + if (entry == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "resource pool"); + return -ENOMEM; + } + entry->base = valid_entry->base; + entry->len = num; + valid_entry->base += num; + valid_entry->len -= num; + valid_entry = entry; + } + + /* Insert it into alloc list, not sorted */ + LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next); + + pool->num_free -= valid_entry->len; + pool->num_alloc += valid_entry->len; + + return valid_entry->base + pool->base; +} + +/** + * bitmap_is_subset - Check whether src2 is subset of src1 + **/ +static inline int +bitmap_is_subset(uint8_t src1, uint8_t src2) +{ + return !((src1 ^ src2) & src2); +} + +static enum i40e_status_code +validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + + /* If DCB is not supported, only default TC is supported */ + if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) { + PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported"); + return I40E_NOT_SUPPORTED; + } + + if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) { + PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to " + "HW support 0x%x", hw->func_caps.enabled_tcmap, + enabled_tcmap); + return I40E_NOT_SUPPORTED; + } + return I40E_SUCCESS; +} + +int +i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi, + struct i40e_vsi_vlan_pvid_info *info) +{ + struct i40e_hw *hw; + struct i40e_vsi_context ctxt; + uint8_t vlan_flags = 0; + int ret; + + if (vsi == NULL || info == NULL) { + PMD_DRV_LOG(ERR, "invalid parameters"); + return I40E_ERR_PARAM; + } + + if (info->on) { + vsi->info.pvid = info->config.pvid; + /** + * If insert pvid is enabled, only tagged pkts are + * allowed to be sent out. + */ + vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID | + I40E_AQ_VSI_PVLAN_MODE_TAGGED; + } else { + vsi->info.pvid = 0; + if (info->config.reject.tagged == 0) + vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED; + + if (info->config.reject.untagged == 0) + vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED; + } + vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID | + I40E_AQ_VSI_PVLAN_MODE_MASK); + vsi->info.port_vlan_flags |= vlan_flags; + vsi->info.valid_sections = + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); + memset(&ctxt, 0, sizeof(ctxt)); + (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.seid = vsi->seid; + + hw = I40E_VSI_TO_HW(vsi); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to update VSI params"); + + return ret; +} + +static int +i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int i, ret; + struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data; + + ret = validate_tcmap_parameter(vsi, enabled_tcmap); + if (ret != I40E_SUCCESS) + return ret; + + if (!vsi->seid) { + PMD_DRV_LOG(ERR, "seid not valid"); + return -EINVAL; + } + + memset(&tc_bw_data, 0, sizeof(tc_bw_data)); + tc_bw_data.tc_valid_bits = enabled_tcmap; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + tc_bw_data.tc_bw_credits[i] = + (enabled_tcmap & (1 << i)) ? 1 : 0; + + ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to configure TC BW"); + return ret; + } + + (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles, + sizeof(vsi->info.qs_handle)); + return I40E_SUCCESS; +} + +static enum i40e_status_code +i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi, + struct i40e_aqc_vsi_properties_data *info, + uint8_t enabled_tcmap) +{ + enum i40e_status_code ret; + int i, total_tc = 0; + uint16_t qpnum_per_tc, bsf, qp_idx; + + ret = validate_tcmap_parameter(vsi, enabled_tcmap); + if (ret != I40E_SUCCESS) + return ret; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + if (enabled_tcmap & (1 << i)) + total_tc++; + vsi->enabled_tc = enabled_tcmap; + + /* Number of queues per enabled TC */ + qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc); + qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC); + bsf = rte_bsf32(qpnum_per_tc); + + /* Adjust the queue number to actual queues that can be applied */ + if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1)) + vsi->nb_qps = qpnum_per_tc * total_tc; + + /** + * Configure TC and queue mapping parameters, for enabled TC, + * allocate qpnum_per_tc queues to this traffic. For disabled TC, + * default queue will serve it. + */ + qp_idx = 0; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & (1 << i)) { + info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx << + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | + (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); + qp_idx += qpnum_per_tc; + } else + info->tc_mapping[i] = 0; + } + + /* Associate queue number with VSI */ + if (vsi->type == I40E_VSI_SRIOV) { + info->mapping_flags |= + rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); + for (i = 0; i < vsi->nb_qps; i++) + info->queue_mapping[i] = + rte_cpu_to_le_16(vsi->base_queue + i); + } else { + info->mapping_flags |= + rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG); + info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue); + } + info->valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID); + + return I40E_SUCCESS; +} + +static int +i40e_veb_release(struct i40e_veb *veb) +{ + struct i40e_vsi *vsi; + struct i40e_hw *hw; + + if (veb == NULL || veb->associate_vsi == NULL) + return -EINVAL; + + if (!TAILQ_EMPTY(&veb->head)) { + PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove"); + return -EACCES; + } + + vsi = veb->associate_vsi; + hw = I40E_VSI_TO_HW(vsi); + + vsi->uplink_seid = veb->uplink_seid; + i40e_aq_delete_element(hw, veb->seid, NULL); + rte_free(veb); + vsi->veb = NULL; + return I40E_SUCCESS; +} + +/* Setup a veb */ +static struct i40e_veb * +i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi) +{ + struct i40e_veb *veb; + int ret; + struct i40e_hw *hw; + + if (NULL == pf || vsi == NULL) { + PMD_DRV_LOG(ERR, "veb setup failed, " + "associated VSI shouldn't null"); + return NULL; + } + hw = I40E_PF_TO_HW(pf); + + veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0); + if (!veb) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for veb"); + goto fail; + } + + veb->associate_vsi = vsi; + TAILQ_INIT(&veb->head); + veb->uplink_seid = vsi->uplink_seid; + + ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid, + I40E_DEFAULT_TCMAP, false, &veb->seid, false, NULL); + + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d", + hw->aq.asq_last_status); + goto fail; + } + + /* get statistics index */ + ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL, + &veb->stats_idx, NULL, NULL, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d", + hw->aq.asq_last_status); + goto fail; + } + + /* Get VEB bandwidth, to be implemented */ + /* Now associated vsi binding to the VEB, set uplink to this VEB */ + vsi->uplink_seid = veb->seid; + + return veb; +fail: + rte_free(veb); + return NULL; +} + +int +i40e_vsi_release(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf; + struct i40e_hw *hw; + struct i40e_vsi_list *vsi_list; + int ret; + struct i40e_mac_filter *f; + + if (!vsi) + return I40E_SUCCESS; + + pf = I40E_VSI_TO_PF(vsi); + hw = I40E_VSI_TO_HW(vsi); + + /* VSI has child to attach, release child first */ + if (vsi->veb) { + TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) { + if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS) + return -1; + TAILQ_REMOVE(&vsi->veb->head, vsi_list, list); + } + i40e_veb_release(vsi->veb); + } + + /* Remove all macvlan filters of the VSI */ + i40e_vsi_remove_all_macvlan_filter(vsi); + TAILQ_FOREACH(f, &vsi->mac_list, next) + rte_free(f); + + if (vsi->type != I40E_VSI_MAIN) { + /* Remove vsi from parent's sibling list */ + if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) { + PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL"); + return I40E_ERR_PARAM; + } + TAILQ_REMOVE(&vsi->parent_vsi->veb->head, + &vsi->sib_vsi_list, list); + + /* Remove all switch element of the VSI */ + ret = i40e_aq_delete_element(hw, vsi->seid, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to delete element"); + } + i40e_res_pool_free(&pf->qp_pool, vsi->base_queue); + + if (vsi->type != I40E_VSI_SRIOV) + i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr); + rte_free(vsi); + + return I40E_SUCCESS; +} + +static int +i40e_update_default_filter_setting(struct i40e_vsi *vsi) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_aqc_remove_macvlan_element_data def_filter; + struct i40e_mac_filter_info filter; + int ret; + + if (vsi->type != I40E_VSI_MAIN) + return I40E_ERR_CONFIG; + memset(&def_filter, 0, sizeof(def_filter)); + (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr, + ETH_ADDR_LEN); + def_filter.vlan_tag = 0; + def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL); + if (ret != I40E_SUCCESS) { + struct i40e_mac_filter *f; + struct ether_addr *mac; + + PMD_DRV_LOG(WARNING, "Cannot remove the default " + "macvlan filter"); + /* It needs to add the permanent mac into mac list */ + f = rte_zmalloc("macv_filter", sizeof(*f), 0); + if (f == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + mac = &f->mac_info.mac_addr; + (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr, + ETH_ADDR_LEN); + f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH; + TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); + vsi->mac_num++; + + return ret; + } + (void)rte_memcpy(&filter.mac_addr, + (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN); + filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + return i40e_vsi_add_mac(vsi, &filter); +} + +/* + * i40e_vsi_get_bw_config - Query VSI BW Information + * @vsi: the VSI to be queried + * + * Returns 0 on success, negative value on failure + */ +static enum i40e_status_code +i40e_vsi_get_bw_config(struct i40e_vsi *vsi) +{ + struct i40e_aqc_query_vsi_bw_config_resp bw_config; + struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config; + struct i40e_hw *hw = &vsi->adapter->hw; + i40e_status ret; + int i; + uint32_t bw_max; + + memset(&bw_config, 0, sizeof(bw_config)); + ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u", + hw->aq.asq_last_status); + return ret; + } + + memset(&ets_sla_config, 0, sizeof(ets_sla_config)); + ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, + &ets_sla_config, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith " + "configuration %u", hw->aq.asq_last_status); + return ret; + } + + /* store and print out BW info */ + vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit); + vsi->bw_info.bw_max = bw_config.max_bw; + PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit); + PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max); + bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) | + (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) << + I40E_16_BIT_WIDTH); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + vsi->bw_info.bw_ets_share_credits[i] = + ets_sla_config.share_credits[i]; + vsi->bw_info.bw_ets_credits[i] = + rte_le_to_cpu_16(ets_sla_config.credits[i]); + /* 4 bits per TC, 4th bit is reserved */ + vsi->bw_info.bw_ets_max[i] = + (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) & + RTE_LEN2MASK(3, uint8_t)); + PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i, + vsi->bw_info.bw_ets_share_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i, + vsi->bw_info.bw_ets_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i, + vsi->bw_info.bw_ets_max[i]); + } + + return I40E_SUCCESS; +} + +/* i40e_enable_pf_lb + * @pf: pointer to the pf structure + * + * allow loopback on pf + */ +static inline void +i40e_enable_pf_lb(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi_context ctxt; + int ret; + + /* Use the FW API if FW >= v5.0 */ + if (hw->aq.fw_maj_ver < 5) { + PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback"); + return; + } + + memset(&ctxt, 0, sizeof(ctxt)); + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = hw->pf_id; + ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL); + if (ret) { + PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d", + ret, hw->aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id |= + rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) + PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n", + hw->aq.asq_last_status); +} + +/* Setup a VSI */ +struct i40e_vsi * +i40e_vsi_setup(struct i40e_pf *pf, + enum i40e_vsi_type type, + struct i40e_vsi *uplink_vsi, + uint16_t user_param) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + struct i40e_mac_filter_info filter; + int ret; + struct i40e_vsi_context ctxt; + struct ether_addr broadcast = + {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}; + + if (type != I40E_VSI_MAIN && uplink_vsi == NULL) { + PMD_DRV_LOG(ERR, "VSI setup failed, " + "VSI link shouldn't be NULL"); + return NULL; + } + + if (type == I40E_VSI_MAIN && uplink_vsi != NULL) { + PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI " + "uplink VSI should be NULL"); + return NULL; + } + + /* If uplink vsi didn't setup VEB, create one first */ + if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) { + uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi); + + if (NULL == uplink_vsi->veb) { + PMD_DRV_LOG(ERR, "VEB setup failed"); + return NULL; + } + /* set ALLOWLOOPBACk on pf, when veb is created */ + i40e_enable_pf_lb(pf); + } + + vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0); + if (!vsi) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi"); + return NULL; + } + TAILQ_INIT(&vsi->mac_list); + vsi->type = type; + vsi->adapter = I40E_PF_TO_ADAPTER(pf); + vsi->max_macaddrs = I40E_NUM_MACADDR_MAX; + vsi->parent_vsi = uplink_vsi; + vsi->user_param = user_param; + /* Allocate queues */ + switch (vsi->type) { + case I40E_VSI_MAIN : + vsi->nb_qps = pf->lan_nb_qps; + break; + case I40E_VSI_SRIOV : + vsi->nb_qps = pf->vf_nb_qps; + break; + case I40E_VSI_VMDQ2: + vsi->nb_qps = pf->vmdq_nb_qps; + break; + case I40E_VSI_FDIR: + vsi->nb_qps = pf->fdir_nb_qps; + break; + default: + goto fail_mem; + } + /* + * The filter status descriptor is reported in rx queue 0, + * while the tx queue for fdir filter programming has no + * such constraints, can be non-zero queues. + * To simplify it, choose FDIR vsi use queue 0 pair. + * To make sure it will use queue 0 pair, queue allocation + * need be done before this function is called + */ + if (type != I40E_VSI_FDIR) { + ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps); + if (ret < 0) { + PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d", + vsi->seid, ret); + goto fail_mem; + } + vsi->base_queue = ret; + } else + vsi->base_queue = I40E_FDIR_QUEUE_ID; + + /* VF has MSIX interrupt in VF range, don't allocate here */ + if (type == I40E_VSI_MAIN) { + ret = i40e_res_pool_alloc(&pf->msix_pool, + RTE_MIN(vsi->nb_qps, + RTE_MAX_RXTX_INTR_VEC_ID)); + if (ret < 0) { + PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d", + vsi->seid, ret); + goto fail_queue_alloc; + } + vsi->msix_intr = ret; + vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID); + } else if (type != I40E_VSI_SRIOV) { + ret = i40e_res_pool_alloc(&pf->msix_pool, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret); + goto fail_queue_alloc; + } + vsi->msix_intr = ret; + vsi->nb_msix = 1; + } else { + vsi->msix_intr = 0; + vsi->nb_msix = 0; + } + + /* Add VSI */ + if (type == I40E_VSI_MAIN) { + /* For main VSI, no need to add since it's default one */ + vsi->uplink_seid = pf->mac_seid; + vsi->seid = pf->main_vsi_seid; + /* Bind queues with specific MSIX interrupt */ + /** + * Needs 2 interrupt at least, one for misc cause which will + * enabled from OS side, Another for queues binding the + * interrupt from device side only. + */ + + /* Get default VSI parameters from hardware */ + memset(&ctxt, 0, sizeof(ctxt)); + ctxt.seid = vsi->seid; + ctxt.pf_num = hw->pf_id; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.vf_num = 0; + ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get VSI params"); + goto fail_msix_alloc; + } + (void)rte_memcpy(&vsi->info, &ctxt.info, + sizeof(struct i40e_aqc_vsi_properties_data)); + vsi->vsi_id = ctxt.vsi_number; + vsi->info.valid_sections = 0; + + /* Configure tc, enabled TC0 only */ + if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) != + I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to update TC bandwidth"); + goto fail_msix_alloc; + } + + /* TC, queue mapping */ + memset(&ctxt, 0, sizeof(ctxt)); + vsi->info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); + vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | + I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; + (void)rte_memcpy(&ctxt.info, &vsi->info, + sizeof(struct i40e_aqc_vsi_properties_data)); + ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, + I40E_DEFAULT_TCMAP); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to configure " + "TC queue mapping"); + goto fail_msix_alloc; + } + ctxt.seid = vsi->seid; + ctxt.pf_num = hw->pf_id; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.vf_num = 0; + + /* Update VSI parameters */ + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to update VSI params"); + goto fail_msix_alloc; + } + + (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + sizeof(vsi->info.tc_mapping)); + (void)rte_memcpy(&vsi->info.queue_mapping, + &ctxt.info.queue_mapping, + sizeof(vsi->info.queue_mapping)); + vsi->info.mapping_flags = ctxt.info.mapping_flags; + vsi->info.valid_sections = 0; + + (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr, + ETH_ADDR_LEN); + + /** + * Updating default filter settings are necessary to prevent + * reception of tagged packets. + * Some old firmware configurations load a default macvlan + * filter which accepts both tagged and untagged packets. + * The updating is to use a normal filter instead if needed. + * For NVM 4.2.2 or after, the updating is not needed anymore. + * The firmware with correct configurations load the default + * macvlan filter which is expected and cannot be removed. + */ + i40e_update_default_filter_setting(vsi); + i40e_config_qinq(hw, vsi); + } else if (type == I40E_VSI_SRIOV) { + memset(&ctxt, 0, sizeof(ctxt)); + /** + * For other VSI, the uplink_seid equals to uplink VSI's + * uplink_seid since they share same VEB + */ + vsi->uplink_seid = uplink_vsi->uplink_seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = hw->func_caps.vf_base_id + user_param; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = 0x1; + ctxt.flags = I40E_AQ_VSI_TYPE_VF; + + /* Use the VEB configuration if FW >= v5.0 */ + if (hw->aq.fw_maj_ver >= 5) { + /* Configure switch ID */ + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } + + /* Configure port/vlan */ + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); + ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; + ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, + I40E_DEFAULT_TCMAP); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to configure " + "TC queue mapping"); + goto fail_msix_alloc; + } + ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID); + /** + * Since VSI is not created yet, only configure parameter, + * will add vsi below. + */ + + i40e_config_qinq(hw, vsi); + } else if (type == I40E_VSI_VMDQ2) { + memset(&ctxt, 0, sizeof(ctxt)); + /* + * For other VSI, the uplink_seid equals to uplink VSI's + * uplink_seid since they share same VEB + */ + vsi->uplink_seid = uplink_vsi->uplink_seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = 0x1; + ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; + + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID); + /* user_param carries flag to enable loop back */ + if (user_param) { + ctxt.info.switch_id = + rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB); + ctxt.info.switch_id |= + rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } + + /* Configure port/vlan */ + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); + ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; + ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, + I40E_DEFAULT_TCMAP); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to configure " + "TC queue mapping"); + goto fail_msix_alloc; + } + ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID); + } else if (type == I40E_VSI_FDIR) { + memset(&ctxt, 0, sizeof(ctxt)); + vsi->uplink_seid = uplink_vsi->uplink_seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = 0x1; /* regular data port */ + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, + I40E_DEFAULT_TCMAP); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to configure " + "TC queue mapping."); + goto fail_msix_alloc; + } + ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID); + } else { + PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet"); + goto fail_msix_alloc; + } + + if (vsi->type != I40E_VSI_MAIN) { + ret = i40e_aq_add_vsi(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d", + hw->aq.asq_last_status); + goto fail_msix_alloc; + } + memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); + vsi->info.valid_sections = 0; + vsi->seid = ctxt.seid; + vsi->vsi_id = ctxt.vsi_number; + vsi->sib_vsi_list.vsi = vsi; + TAILQ_INSERT_TAIL(&uplink_vsi->veb->head, + &vsi->sib_vsi_list, list); + } + + /* MAC/VLAN configuration */ + (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); + filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + + ret = i40e_vsi_add_mac(vsi, &filter); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter"); + goto fail_msix_alloc; + } + + /* Get VSI BW information */ + i40e_vsi_get_bw_config(vsi); + return vsi; +fail_msix_alloc: + i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr); +fail_queue_alloc: + i40e_res_pool_free(&pf->qp_pool,vsi->base_queue); +fail_mem: + rte_free(vsi); + return NULL; +} + +/* Configure vlan filter on or off */ +int +i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on) +{ + int i, num; + struct i40e_mac_filter *f; + struct i40e_mac_filter_info *mac_filter; + enum rte_mac_filter_type desired_filter; + int ret = I40E_SUCCESS; + + if (on) { + /* Filter to match MAC and VLAN */ + desired_filter = RTE_MACVLAN_PERFECT_MATCH; + } else { + /* Filter to match only MAC */ + desired_filter = RTE_MAC_PERFECT_MATCH; + } + + num = vsi->mac_num; + + mac_filter = rte_zmalloc("mac_filter_info_data", + num * sizeof(*mac_filter), 0); + if (mac_filter == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + i = 0; + + /* Remove all existing mac */ + TAILQ_FOREACH(f, &vsi->mac_list, next) { + mac_filter[i] = f->mac_info; + ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr); + if (ret) { + PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter", + on ? "enable" : "disable"); + goto DONE; + } + i++; + } + + /* Override with new filter */ + for (i = 0; i < num; i++) { + mac_filter[i].filter_type = desired_filter; + ret = i40e_vsi_add_mac(vsi, &mac_filter[i]); + if (ret) { + PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter", + on ? "enable" : "disable"); + goto DONE; + } + } + +DONE: + rte_free(mac_filter); + return ret; +} + +/* Configure vlan stripping on or off */ +int +i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_vsi_context ctxt; + uint8_t vlan_flags; + int ret = I40E_SUCCESS; + + /* Check if it has been already on or off */ + if (vsi->info.valid_sections & + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) { + if (on) { + if ((vsi->info.port_vlan_flags & + I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0) + return 0; /* already on */ + } else { + if ((vsi->info.port_vlan_flags & + I40E_AQ_VSI_PVLAN_EMOD_MASK) == + I40E_AQ_VSI_PVLAN_EMOD_MASK) + return 0; /* already off */ + } + } + + if (on) + vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; + else + vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING; + vsi->info.valid_sections = + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); + vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK); + vsi->info.port_vlan_flags |= vlan_flags; + ctxt.seid = vsi->seid; + (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) + PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping", + on ? "enable" : "disable"); + + return ret; +} + +static int +i40e_dev_init_vlan(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *data = dev->data; + int ret; + int mask = 0; + + /* Apply vlan offload setting */ + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; + i40e_vlan_offload_set(dev, mask); + + /* Apply double-vlan setting, not implemented yet */ + + /* Apply pvid setting */ + ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid, + data->dev_conf.txmode.hw_vlan_insert_pvid); + if (ret) + PMD_DRV_LOG(INFO, "Failed to update VSI params"); + + return ret; +} + +static int +i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + + return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL); +} + +static int +i40e_update_flow_control(struct i40e_hw *hw) +{ +#define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX) + struct i40e_link_status link_status; + uint32_t rxfc = 0, txfc = 0, reg; + uint8_t an_info; + int ret; + + memset(&link_status, 0, sizeof(link_status)); + ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get link status information"); + goto write_reg; /* Disable flow control */ + } + + an_info = hw->phy.link_info.an_info; + if (!(an_info & I40E_AQ_AN_COMPLETED)) { + PMD_DRV_LOG(INFO, "Link auto negotiation not completed"); + ret = I40E_ERR_NOT_READY; + goto write_reg; /* Disable flow control */ + } + /** + * If link auto negotiation is enabled, flow control needs to + * be configured according to it + */ + switch (an_info & I40E_LINK_PAUSE_RXTX) { + case I40E_LINK_PAUSE_RXTX: + rxfc = 1; + txfc = 1; + hw->fc.current_mode = I40E_FC_FULL; + break; + case I40E_AQ_LINK_PAUSE_RX: + rxfc = 1; + hw->fc.current_mode = I40E_FC_RX_PAUSE; + break; + case I40E_AQ_LINK_PAUSE_TX: + txfc = 1; + hw->fc.current_mode = I40E_FC_TX_PAUSE; + break; + default: + hw->fc.current_mode = I40E_FC_NONE; + break; + } + +write_reg: + I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG, + txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT); + reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN); + reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK; + reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT; + I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg); + + return ret; +} + +/* PF setup */ +static int +i40e_pf_setup(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_filter_control_settings settings; + struct i40e_vsi *vsi; + int ret; + + /* Clear all stats counters */ + pf->offset_loaded = FALSE; + memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats)); + memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats)); + + ret = i40e_pf_get_switch_config(pf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret); + return ret; + } + if (pf->flags & I40E_FLAG_FDIR) { + /* make queue allocated first, let FDIR use queue pair 0*/ + ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR); + if (ret != I40E_FDIR_QUEUE_ID) { + PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :" + " ret =%d", ret); + pf->flags &= ~I40E_FLAG_FDIR; + } + } + /* main VSI setup */ + vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0); + if (!vsi) { + PMD_DRV_LOG(ERR, "Setup of main vsi failed"); + return I40E_ERR_NOT_READY; + } + pf->main_vsi = vsi; + + /* Configure filter control */ + memset(&settings, 0, sizeof(settings)); + if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128) + settings.hash_lut_size = I40E_HASH_LUT_SIZE_128; + else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512) + settings.hash_lut_size = I40E_HASH_LUT_SIZE_512; + else { + PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n", + hw->func_caps.rss_table_size); + return I40E_ERR_PARAM; + } + PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table " + "size: %u\n", hw->func_caps.rss_table_size); + pf->hash_lut_size = hw->func_caps.rss_table_size; + + /* Enable ethtype and macvlan filters */ + settings.enable_ethtype = TRUE; + settings.enable_macvlan = TRUE; + ret = i40e_set_filter_control(hw, &settings); + if (ret) + PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d", + ret); + + /* Update flow control according to the auto negotiation */ + i40e_update_flow_control(hw); + + return I40E_SUCCESS; +} + +int +i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on) +{ + uint32_t reg; + uint16_t j; + + /** + * Set or clear TX Queue Disable flags, + * which is required by hardware. + */ + i40e_pre_tx_queue_cfg(hw, q_idx, on); + rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US); + + /* Wait until the request is finished */ + for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) { + rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US); + reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx)); + if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^ + ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) + & 0x1))) { + break; + } + } + if (on) { + if (reg & I40E_QTX_ENA_QENA_STAT_MASK) + return I40E_SUCCESS; /* already on, skip next steps */ + + I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0); + reg |= I40E_QTX_ENA_QENA_REQ_MASK; + } else { + if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) + return I40E_SUCCESS; /* already off, skip next steps */ + reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; + } + /* Write the register */ + I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg); + /* Check the result */ + for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) { + rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US); + reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx)); + if (on) { + if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) && + (reg & I40E_QTX_ENA_QENA_STAT_MASK)) + break; + } else { + if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) && + !(reg & I40E_QTX_ENA_QENA_STAT_MASK)) + break; + } + } + /* Check if it is timeout */ + if (j >= I40E_CHK_Q_ENA_COUNT) { + PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]", + (on ? "enable" : "disable"), q_idx); + return I40E_ERR_TIMEOUT; + } + + return I40E_SUCCESS; +} + +/* Swith on or off the tx queues */ +static int +i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on) +{ + struct rte_eth_dev_data *dev_data = pf->dev_data; + struct i40e_tx_queue *txq; + struct rte_eth_dev *dev = pf->adapter->eth_dev; + uint16_t i; + int ret; + + for (i = 0; i < dev_data->nb_tx_queues; i++) { + txq = dev_data->tx_queues[i]; + /* Don't operate the queue if not configured or + * if starting only per queue */ + if (!txq || !txq->q_set || (on && txq->tx_deferred_start)) + continue; + if (on) + ret = i40e_dev_tx_queue_start(dev, i); + else + ret = i40e_dev_tx_queue_stop(dev, i); + if ( ret != I40E_SUCCESS) + return ret; + } + + return I40E_SUCCESS; +} + +int +i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on) +{ + uint32_t reg; + uint16_t j; + + /* Wait until the request is finished */ + for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) { + rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US); + reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx)); + if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^ + ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1)) + break; + } + + if (on) { + if (reg & I40E_QRX_ENA_QENA_STAT_MASK) + return I40E_SUCCESS; /* Already on, skip next steps */ + reg |= I40E_QRX_ENA_QENA_REQ_MASK; + } else { + if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) + return I40E_SUCCESS; /* Already off, skip next steps */ + reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; + } + + /* Write the register */ + I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg); + /* Check the result */ + for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) { + rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US); + reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx)); + if (on) { + if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) && + (reg & I40E_QRX_ENA_QENA_STAT_MASK)) + break; + } else { + if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) && + !(reg & I40E_QRX_ENA_QENA_STAT_MASK)) + break; + } + } + + /* Check if it is timeout */ + if (j >= I40E_CHK_Q_ENA_COUNT) { + PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]", + (on ? "enable" : "disable"), q_idx); + return I40E_ERR_TIMEOUT; + } + + return I40E_SUCCESS; +} +/* Switch on or off the rx queues */ +static int +i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on) +{ + struct rte_eth_dev_data *dev_data = pf->dev_data; + struct i40e_rx_queue *rxq; + struct rte_eth_dev *dev = pf->adapter->eth_dev; + uint16_t i; + int ret; + + for (i = 0; i < dev_data->nb_rx_queues; i++) { + rxq = dev_data->rx_queues[i]; + /* Don't operate the queue if not configured or + * if starting only per queue */ + if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start)) + continue; + if (on) + ret = i40e_dev_rx_queue_start(dev, i); + else + ret = i40e_dev_rx_queue_stop(dev, i); + if (ret != I40E_SUCCESS) + return ret; + } + + return I40E_SUCCESS; +} + +/* Switch on or off all the rx/tx queues */ +int +i40e_dev_switch_queues(struct i40e_pf *pf, bool on) +{ + int ret; + + if (on) { + /* enable rx queues before enabling tx queues */ + ret = i40e_dev_switch_rx_queues(pf, on); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to switch rx queues"); + return ret; + } + ret = i40e_dev_switch_tx_queues(pf, on); + } else { + /* Stop tx queues before stopping rx queues */ + ret = i40e_dev_switch_tx_queues(pf, on); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to switch tx queues"); + return ret; + } + ret = i40e_dev_switch_rx_queues(pf, on); + } + + return ret; +} + +/* Initialize VSI for TX */ +static int +i40e_dev_tx_init(struct i40e_pf *pf) +{ + struct rte_eth_dev_data *data = pf->dev_data; + uint16_t i; + uint32_t ret = I40E_SUCCESS; + struct i40e_tx_queue *txq; + + for (i = 0; i < data->nb_tx_queues; i++) { + txq = data->tx_queues[i]; + if (!txq || !txq->q_set) + continue; + ret = i40e_tx_queue_init(txq); + if (ret != I40E_SUCCESS) + break; + } + if (ret == I40E_SUCCESS) + i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf) + ->eth_dev); + + return ret; +} + +/* Initialize VSI for RX */ +static int +i40e_dev_rx_init(struct i40e_pf *pf) +{ + struct rte_eth_dev_data *data = pf->dev_data; + int ret = I40E_SUCCESS; + uint16_t i; + struct i40e_rx_queue *rxq; + + i40e_pf_config_mq_rx(pf); + for (i = 0; i < data->nb_rx_queues; i++) { + rxq = data->rx_queues[i]; + if (!rxq || !rxq->q_set) + continue; + + ret = i40e_rx_queue_init(rxq); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to do RX queue " + "initialization"); + break; + } + } + if (ret == I40E_SUCCESS) + i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf) + ->eth_dev); + + return ret; +} + +static int +i40e_dev_rxtx_init(struct i40e_pf *pf) +{ + int err; + + err = i40e_dev_tx_init(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do TX initialization"); + return err; + } + err = i40e_dev_rx_init(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do RX initialization"); + return err; + } + + return err; +} + +static int +i40e_vmdq_setup(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *conf = &dev->data->dev_conf; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int i, err, conf_vsis, j, loop; + struct i40e_vsi *vsi; + struct i40e_vmdq_info *vmdq_info; + struct rte_eth_vmdq_rx_conf *vmdq_conf; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + /* + * Disable interrupt to avoid message from VF. Furthermore, it will + * avoid race condition in VSI creation/destroy. + */ + i40e_pf_disable_irq0(hw); + + if ((pf->flags & I40E_FLAG_VMDQ) == 0) { + PMD_INIT_LOG(ERR, "FW doesn't support VMDQ"); + return -ENOTSUP; + } + + conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools; + if (conf_vsis > pf->max_nb_vmdq_vsi) { + PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u", + conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools, + pf->max_nb_vmdq_vsi); + return -ENOTSUP; + } + + if (pf->vmdq != NULL) { + PMD_INIT_LOG(INFO, "VMDQ already configured"); + return 0; + } + + pf->vmdq = rte_zmalloc("vmdq_info_struct", + sizeof(*vmdq_info) * conf_vsis, 0); + + if (pf->vmdq == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory"); + return -ENOMEM; + } + + vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf; + + /* Create VMDQ VSI */ + for (i = 0; i < conf_vsis; i++) { + vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi, + vmdq_conf->enable_loop_back); + if (vsi == NULL) { + PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI"); + err = -1; + goto err_vsi_setup; + } + vmdq_info = &pf->vmdq[i]; + vmdq_info->pf = pf; + vmdq_info->vsi = vsi; + } + pf->nb_cfg_vmdq_vsi = conf_vsis; + + /* Configure Vlan */ + loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT; + for (i = 0; i < vmdq_conf->nb_pool_maps; i++) { + for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) { + if (vmdq_conf->pool_map[i].pools & (1UL << j)) { + PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u", + vmdq_conf->pool_map[i].vlan_id, j); + + err = i40e_vsi_add_vlan(pf->vmdq[j].vsi, + vmdq_conf->pool_map[i].vlan_id); + if (err) { + PMD_INIT_LOG(ERR, "Failed to add vlan"); + err = -1; + goto err_vsi_setup; + } + } + } + } + + i40e_pf_enable_irq0(hw); + + return 0; + +err_vsi_setup: + for (i = 0; i < conf_vsis; i++) + if (pf->vmdq[i].vsi == NULL) + break; + else + i40e_vsi_release(pf->vmdq[i].vsi); + + rte_free(pf->vmdq); + pf->vmdq = NULL; + i40e_pf_enable_irq0(hw); + return err; +} + +static void +i40e_stat_update_32(struct i40e_hw *hw, + uint32_t reg, + bool offset_loaded, + uint64_t *offset, + uint64_t *stat) +{ + uint64_t new_data; + + new_data = (uint64_t)I40E_READ_REG(hw, reg); + if (!offset_loaded) + *offset = new_data; + + if (new_data >= *offset) + *stat = (uint64_t)(new_data - *offset); + else + *stat = (uint64_t)((new_data + + ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset); +} + +static void +i40e_stat_update_48(struct i40e_hw *hw, + uint32_t hireg, + uint32_t loreg, + bool offset_loaded, + uint64_t *offset, + uint64_t *stat) +{ + uint64_t new_data; + + new_data = (uint64_t)I40E_READ_REG(hw, loreg); + new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) & + I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH; + + if (!offset_loaded) + *offset = new_data; + + if (new_data >= *offset) + *stat = new_data - *offset; + else + *stat = (uint64_t)((new_data + + ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset); + + *stat &= I40E_48_BIT_MASK; +} + +/* Disable IRQ0 */ +void +i40e_pf_disable_irq0(struct i40e_hw *hw) +{ + /* Disable all interrupt types */ + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0); + I40E_WRITE_FLUSH(hw); +} + +/* Enable IRQ0 */ +void +i40e_pf_enable_irq0(struct i40e_hw *hw) +{ + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); + I40E_WRITE_FLUSH(hw); +} + +static void +i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue) +{ + /* read pending request and disable first */ + i40e_pf_disable_irq0(hw); + I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK); + I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0, + I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK); + + if (no_queue) + /* Link no queues with irq0 */ + I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, + I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK); +} + +static void +i40e_dev_handle_vfr_event(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int i; + uint16_t abs_vf_id; + uint32_t index, offset, val; + + if (!pf->vfs) + return; + /** + * Try to find which VF trigger a reset, use absolute VF id to access + * since the reg is global register. + */ + for (i = 0; i < pf->vf_num; i++) { + abs_vf_id = hw->func_caps.vf_base_id + i; + index = abs_vf_id / I40E_UINT32_BIT_SIZE; + offset = abs_vf_id % I40E_UINT32_BIT_SIZE; + val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index)); + /* VFR event occured */ + if (val & (0x1 << offset)) { + int ret; + + /* Clear the event first */ + I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index), + (0x1 << offset)); + PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id); + /** + * Only notify a VF reset event occured, + * don't trigger another SW reset + */ + ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to do VF reset"); + } + } +} + +static void +i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_arq_event_info info; + uint16_t pending, opcode; + int ret; + + info.buf_len = I40E_AQ_BUF_SZ; + info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0); + if (!info.msg_buf) { + PMD_DRV_LOG(ERR, "Failed to allocate mem"); + return; + } + + pending = 1; + while (pending) { + ret = i40e_clean_arq_element(hw, &info, &pending); + + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, " + "aq_err: %u", hw->aq.asq_last_status); + break; + } + opcode = rte_le_to_cpu_16(info.desc.opcode); + + switch (opcode) { + case i40e_aqc_opc_send_msg_to_pf: + /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/ + i40e_pf_host_handle_vf_msg(dev, + rte_le_to_cpu_16(info.desc.retval), + rte_le_to_cpu_32(info.desc.cookie_high), + rte_le_to_cpu_32(info.desc.cookie_low), + info.msg_buf, + info.msg_len); + break; + default: + PMD_DRV_LOG(ERR, "Request %u is not supported yet", + opcode); + break; + } + } + rte_free(info.msg_buf); +} + +/* + * Interrupt handler is registered as the alarm callback for handling LSC + * interrupt in a definite of time, in order to wait the NIC into a stable + * state. Currently it waits 1 sec in i40e for the link up interrupt, and + * no need for link down interrupt. + */ +static void +i40e_dev_interrupt_delayed_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t icr0; + + /* read interrupt causes again */ + icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0); + +#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER + if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n"); + if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) + PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n"); + if (icr0 & I40E_PFINT_ICR0_GRST_MASK) + PMD_DRV_LOG(INFO, "ICR0: global reset requested\n"); + if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) + PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n"); + if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK) + PMD_DRV_LOG(INFO, "ICR0: a change in the storm control " + "state\n"); + if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: HMC error\n"); + if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n"); +#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */ + + if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { + PMD_DRV_LOG(INFO, "INT:VF reset detected\n"); + i40e_dev_handle_vfr_event(dev); + } + if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { + PMD_DRV_LOG(INFO, "INT:ADMINQ event\n"); + i40e_dev_handle_aq_msg(dev); + } + + /* handle the link up interrupt in an alarm callback */ + i40e_dev_link_update(dev, 0); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); + + i40e_pf_enable_irq0(hw); + rte_intr_enable(&(dev->pci_dev->intr_handle)); +} + +/** + * Interrupt handler triggered by NIC for handling + * specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t icr0; + + /* Disable interrupt */ + i40e_pf_disable_irq0(hw); + + /* read out interrupt causes */ + icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0); + + /* No interrupt event indicated */ + if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) { + PMD_DRV_LOG(INFO, "No interrupt event"); + goto done; + } +#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER + if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error"); + if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) + PMD_DRV_LOG(ERR, "ICR0: malicious programming detected"); + if (icr0 & I40E_PFINT_ICR0_GRST_MASK) + PMD_DRV_LOG(INFO, "ICR0: global reset requested"); + if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) + PMD_DRV_LOG(INFO, "ICR0: PCI exception activated"); + if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK) + PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state"); + if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: HMC error"); + if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK) + PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error"); +#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */ + + if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { + PMD_DRV_LOG(INFO, "ICR0: VF reset detected"); + i40e_dev_handle_vfr_event(dev); + } + if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { + PMD_DRV_LOG(INFO, "ICR0: adminq event"); + i40e_dev_handle_aq_msg(dev); + } + + /* Link Status Change interrupt */ + if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) { +#define I40E_US_PER_SECOND 1000000 + struct rte_eth_link link; + + PMD_DRV_LOG(INFO, "ICR0: link status changed\n"); + memset(&link, 0, sizeof(link)); + rte_i40e_dev_atomic_read_link_status(dev, &link); + i40e_dev_link_update(dev, 0); + + /* + * For link up interrupt, it needs to wait 1 second to let the + * hardware be a stable state. Otherwise several consecutive + * interrupts can be observed. + * For link down interrupt, no need to wait. + */ + if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND, + i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0) + return; + else + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC); + } + +done: + /* Enable interrupt */ + i40e_pf_enable_irq0(hw); + rte_intr_enable(&(dev->pci_dev->intr_handle)); +} + +static int +i40e_add_macvlan_filters(struct i40e_vsi *vsi, + struct i40e_macvlan_filter *filter, + int total) +{ + int ele_num, ele_buff_size; + int num, actual_num, i; + uint16_t flags; + int ret = I40E_SUCCESS; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_aqc_add_macvlan_element_data *req_list; + + if (filter == NULL || total == 0) + return I40E_ERR_PARAM; + ele_num = hw->aq.asq_buf_size / sizeof(*req_list); + ele_buff_size = hw->aq.asq_buf_size; + + req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0); + if (req_list == NULL) { + PMD_DRV_LOG(ERR, "Fail to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + num = 0; + do { + actual_num = (num + ele_num > total) ? (total - num) : ele_num; + memset(req_list, 0, ele_buff_size); + + for (i = 0; i < actual_num; i++) { + (void)rte_memcpy(req_list[i].mac_addr, + &filter[num + i].macaddr, ETH_ADDR_LEN); + req_list[i].vlan_tag = + rte_cpu_to_le_16(filter[num + i].vlan_id); + + switch (filter[num + i].filter_type) { + case RTE_MAC_PERFECT_MATCH: + flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH | + I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; + break; + case RTE_MACVLAN_PERFECT_MATCH: + flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; + break; + case RTE_MAC_HASH_MATCH: + flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH | + I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; + break; + case RTE_MACVLAN_HASH_MATCH: + flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH; + break; + default: + PMD_DRV_LOG(ERR, "Invalid MAC match type\n"); + ret = I40E_ERR_PARAM; + goto DONE; + } + + req_list[i].queue_number = 0; + + req_list[i].flags = rte_cpu_to_le_16(flags); + } + + ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list, + actual_num, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add macvlan filter"); + goto DONE; + } + num += actual_num; + } while (num < total); + +DONE: + rte_free(req_list); + return ret; +} + +static int +i40e_remove_macvlan_filters(struct i40e_vsi *vsi, + struct i40e_macvlan_filter *filter, + int total) +{ + int ele_num, ele_buff_size; + int num, actual_num, i; + uint16_t flags; + int ret = I40E_SUCCESS; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_aqc_remove_macvlan_element_data *req_list; + + if (filter == NULL || total == 0) + return I40E_ERR_PARAM; + + ele_num = hw->aq.asq_buf_size / sizeof(*req_list); + ele_buff_size = hw->aq.asq_buf_size; + + req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0); + if (req_list == NULL) { + PMD_DRV_LOG(ERR, "Fail to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + num = 0; + do { + actual_num = (num + ele_num > total) ? (total - num) : ele_num; + memset(req_list, 0, ele_buff_size); + + for (i = 0; i < actual_num; i++) { + (void)rte_memcpy(req_list[i].mac_addr, + &filter[num + i].macaddr, ETH_ADDR_LEN); + req_list[i].vlan_tag = + rte_cpu_to_le_16(filter[num + i].vlan_id); + + switch (filter[num + i].filter_type) { + case RTE_MAC_PERFECT_MATCH: + flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + break; + case RTE_MACVLAN_PERFECT_MATCH: + flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + break; + case RTE_MAC_HASH_MATCH: + flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH | + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + break; + case RTE_MACVLAN_HASH_MATCH: + flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH; + break; + default: + PMD_DRV_LOG(ERR, "Invalid MAC filter type\n"); + ret = I40E_ERR_PARAM; + goto DONE; + } + req_list[i].flags = rte_cpu_to_le_16(flags); + } + + ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list, + actual_num, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to remove macvlan filter"); + goto DONE; + } + num += actual_num; + } while (num < total); + +DONE: + rte_free(req_list); + return ret; +} + +/* Find out specific MAC filter */ +static struct i40e_mac_filter * +i40e_find_mac_filter(struct i40e_vsi *vsi, + struct ether_addr *macaddr) +{ + struct i40e_mac_filter *f; + + TAILQ_FOREACH(f, &vsi->mac_list, next) { + if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr)) + return f; + } + + return NULL; +} + +static bool +i40e_find_vlan_filter(struct i40e_vsi *vsi, + uint16_t vlan_id) +{ + uint32_t vid_idx, vid_bit; + + if (vlan_id > ETH_VLAN_ID_MAX) + return 0; + + vid_idx = I40E_VFTA_IDX(vlan_id); + vid_bit = I40E_VFTA_BIT(vlan_id); + + if (vsi->vfta[vid_idx] & vid_bit) + return 1; + else + return 0; +} + +static void +i40e_set_vlan_filter(struct i40e_vsi *vsi, + uint16_t vlan_id, bool on) +{ + uint32_t vid_idx, vid_bit; + + if (vlan_id > ETH_VLAN_ID_MAX) + return; + + vid_idx = I40E_VFTA_IDX(vlan_id); + vid_bit = I40E_VFTA_BIT(vlan_id); + + if (on) + vsi->vfta[vid_idx] |= vid_bit; + else + vsi->vfta[vid_idx] &= ~vid_bit; +} + +/** + * Find all vlan options for specific mac addr, + * return with actual vlan found. + */ +static inline int +i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, + struct i40e_macvlan_filter *mv_f, + int num, struct ether_addr *addr) +{ + int i; + uint32_t j, k; + + /** + * Not to use i40e_find_vlan_filter to decrease the loop time, + * although the code looks complex. + */ + if (num < vsi->vlan_num) + return I40E_ERR_PARAM; + + i = 0; + for (j = 0; j < I40E_VFTA_SIZE; j++) { + if (vsi->vfta[j]) { + for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) { + if (vsi->vfta[j] & (1 << k)) { + if (i > num - 1) { + PMD_DRV_LOG(ERR, "vlan number " + "not match"); + return I40E_ERR_PARAM; + } + (void)rte_memcpy(&mv_f[i].macaddr, + addr, ETH_ADDR_LEN); + mv_f[i].vlan_id = + j * I40E_UINT32_BIT_SIZE + k; + i++; + } + } + } + } + return I40E_SUCCESS; +} + +static inline int +i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, + struct i40e_macvlan_filter *mv_f, + int num, + uint16_t vlan) +{ + int i = 0; + struct i40e_mac_filter *f; + + if (num < vsi->mac_num) + return I40E_ERR_PARAM; + + TAILQ_FOREACH(f, &vsi->mac_list, next) { + if (i > num - 1) { + PMD_DRV_LOG(ERR, "buffer number not match"); + return I40E_ERR_PARAM; + } + (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, + ETH_ADDR_LEN); + mv_f[i].vlan_id = vlan; + mv_f[i].filter_type = f->mac_info.filter_type; + i++; + } + + return I40E_SUCCESS; +} + +static int +i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi) +{ + int i, num; + struct i40e_mac_filter *f; + struct i40e_macvlan_filter *mv_f; + int ret = I40E_SUCCESS; + + if (vsi == NULL || vsi->mac_num == 0) + return I40E_ERR_PARAM; + + /* Case that no vlan is set */ + if (vsi->vlan_num == 0) + num = vsi->mac_num; + else + num = vsi->mac_num * vsi->vlan_num; + + mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0); + if (mv_f == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + i = 0; + if (vsi->vlan_num == 0) { + TAILQ_FOREACH(f, &vsi->mac_list, next) { + (void)rte_memcpy(&mv_f[i].macaddr, + &f->mac_info.mac_addr, ETH_ADDR_LEN); + mv_f[i].vlan_id = 0; + i++; + } + } else { + TAILQ_FOREACH(f, &vsi->mac_list, next) { + ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i], + vsi->vlan_num, &f->mac_info.mac_addr); + if (ret != I40E_SUCCESS) + goto DONE; + i += vsi->vlan_num; + } + } + + ret = i40e_remove_macvlan_filters(vsi, mv_f, num); +DONE: + rte_free(mv_f); + + return ret; +} + +int +i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan) +{ + struct i40e_macvlan_filter *mv_f; + int mac_num; + int ret = I40E_SUCCESS; + + if (!vsi || vlan > ETHER_MAX_VLAN_ID) + return I40E_ERR_PARAM; + + /* If it's already set, just return */ + if (i40e_find_vlan_filter(vsi,vlan)) + return I40E_SUCCESS; + + mac_num = vsi->mac_num; + + if (mac_num == 0) { + PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr"); + return I40E_ERR_PARAM; + } + + mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0); + + if (mv_f == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan); + + if (ret != I40E_SUCCESS) + goto DONE; + + ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num); + + if (ret != I40E_SUCCESS) + goto DONE; + + i40e_set_vlan_filter(vsi, vlan, 1); + + vsi->vlan_num++; + ret = I40E_SUCCESS; +DONE: + rte_free(mv_f); + return ret; +} + +int +i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan) +{ + struct i40e_macvlan_filter *mv_f; + int mac_num; + int ret = I40E_SUCCESS; + + /** + * Vlan 0 is the generic filter for untagged packets + * and can't be removed. + */ + if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID) + return I40E_ERR_PARAM; + + /* If can't find it, just return */ + if (!i40e_find_vlan_filter(vsi, vlan)) + return I40E_ERR_PARAM; + + mac_num = vsi->mac_num; + + if (mac_num == 0) { + PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr"); + return I40E_ERR_PARAM; + } + + mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0); + + if (mv_f == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan); + + if (ret != I40E_SUCCESS) + goto DONE; + + ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num); + + if (ret != I40E_SUCCESS) + goto DONE; + + /* This is last vlan to remove, replace all mac filter with vlan 0 */ + if (vsi->vlan_num == 1) { + ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0); + if (ret != I40E_SUCCESS) + goto DONE; + + ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num); + if (ret != I40E_SUCCESS) + goto DONE; + } + + i40e_set_vlan_filter(vsi, vlan, 0); + + vsi->vlan_num--; + ret = I40E_SUCCESS; +DONE: + rte_free(mv_f); + return ret; +} + +int +i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter) +{ + struct i40e_mac_filter *f; + struct i40e_macvlan_filter *mv_f; + int i, vlan_num = 0; + int ret = I40E_SUCCESS; + + /* If it's add and we've config it, return */ + f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr); + if (f != NULL) + return I40E_SUCCESS; + if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) || + (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) { + + /** + * If vlan_num is 0, that's the first time to add mac, + * set mask for vlan_id 0. + */ + if (vsi->vlan_num == 0) { + i40e_set_vlan_filter(vsi, 0, 1); + vsi->vlan_num = 1; + } + vlan_num = vsi->vlan_num; + } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) || + (mac_filter->filter_type == RTE_MAC_HASH_MATCH)) + vlan_num = 1; + + mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); + if (mv_f == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + for (i = 0; i < vlan_num; i++) { + mv_f[i].filter_type = mac_filter->filter_type; + (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr, + ETH_ADDR_LEN); + } + + if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH || + mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) { + ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, + &mac_filter->mac_addr); + if (ret != I40E_SUCCESS) + goto DONE; + } + + ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num); + if (ret != I40E_SUCCESS) + goto DONE; + + /* Add the mac addr into mac list */ + f = rte_zmalloc("macv_filter", sizeof(*f), 0); + if (f == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + ret = I40E_ERR_NO_MEMORY; + goto DONE; + } + (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr, + ETH_ADDR_LEN); + f->mac_info.filter_type = mac_filter->filter_type; + TAILQ_INSERT_TAIL(&vsi->mac_list, f, next); + vsi->mac_num++; + + ret = I40E_SUCCESS; +DONE: + rte_free(mv_f); + + return ret; +} + +int +i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr) +{ + struct i40e_mac_filter *f; + struct i40e_macvlan_filter *mv_f; + int i, vlan_num; + enum rte_mac_filter_type filter_type; + int ret = I40E_SUCCESS; + + /* Can't find it, return an error */ + f = i40e_find_mac_filter(vsi, addr); + if (f == NULL) + return I40E_ERR_PARAM; + + vlan_num = vsi->vlan_num; + filter_type = f->mac_info.filter_type; + if (filter_type == RTE_MACVLAN_PERFECT_MATCH || + filter_type == RTE_MACVLAN_HASH_MATCH) { + if (vlan_num == 0) { + PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n"); + return I40E_ERR_PARAM; + } + } else if (filter_type == RTE_MAC_PERFECT_MATCH || + filter_type == RTE_MAC_HASH_MATCH) + vlan_num = 1; + + mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); + if (mv_f == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + for (i = 0; i < vlan_num; i++) { + mv_f[i].filter_type = filter_type; + (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, + ETH_ADDR_LEN); + } + if (filter_type == RTE_MACVLAN_PERFECT_MATCH || + filter_type == RTE_MACVLAN_HASH_MATCH) { + ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr); + if (ret != I40E_SUCCESS) + goto DONE; + } + + ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num); + if (ret != I40E_SUCCESS) + goto DONE; + + /* Remove the mac addr into mac list */ + TAILQ_REMOVE(&vsi->mac_list, f, next); + rte_free(f); + vsi->mac_num--; + + ret = I40E_SUCCESS; +DONE: + rte_free(mv_f); + return ret; +} + +/* Configure hash enable flags for RSS */ +uint64_t +i40e_config_hena(uint64_t flags) +{ + uint64_t hena = 0; + + if (!flags) + return hena; + + if (flags & ETH_RSS_FRAG_IPV4) + hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4; + if (flags & ETH_RSS_NONFRAG_IPV4_TCP) + hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + if (flags & ETH_RSS_NONFRAG_IPV4_UDP) + hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + if (flags & ETH_RSS_NONFRAG_IPV4_SCTP) + hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; + if (flags & ETH_RSS_NONFRAG_IPV4_OTHER) + hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; + if (flags & ETH_RSS_FRAG_IPV6) + hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6; + if (flags & ETH_RSS_NONFRAG_IPV6_TCP) + hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP; + if (flags & ETH_RSS_NONFRAG_IPV6_UDP) + hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + if (flags & ETH_RSS_NONFRAG_IPV6_SCTP) + hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP; + if (flags & ETH_RSS_NONFRAG_IPV6_OTHER) + hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; + if (flags & ETH_RSS_L2_PAYLOAD) + hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD; + + return hena; +} + +/* Parse the hash enable flags */ +uint64_t +i40e_parse_hena(uint64_t flags) +{ + uint64_t rss_hf = 0; + + if (!flags) + return rss_hf; + if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4)) + rss_hf |= ETH_RSS_FRAG_IPV4; + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP)) + rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP; + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)) + rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER; + if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6)) + rss_hf |= ETH_RSS_FRAG_IPV6; + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP)) + rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP; + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER)) + rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER; + if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD)) + rss_hf |= ETH_RSS_L2_PAYLOAD; + + return rss_hf; +} + +/* Disable RSS */ +static void +i40e_pf_disable_rss(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint64_t hena; + + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; + hena &= ~I40E_RSS_HENA_ALL; + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); + I40E_WRITE_FLUSH(hw); +} + +static int +i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) +{ + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int ret = 0; + + if (!key || key_len == 0) { + PMD_DRV_LOG(DEBUG, "No key to be configured"); + return 0; + } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, "Invalid key length %u", key_len); + return -EINVAL; + } + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + struct i40e_aqc_get_set_rss_key_data *key_dw = + (struct i40e_aqc_get_set_rss_key_data *)key; + + ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw); + if (ret) + PMD_INIT_LOG(ERR, "Failed to configure RSS key " + "via AQ"); + } else { + uint32_t *hash_key = (uint32_t *)key; + uint16_t i; + + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]); + I40E_WRITE_FLUSH(hw); + } + + return ret; +} + +static int +i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len) +{ + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int ret; + + if (!key || !key_len) + return -EINVAL; + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + ret = i40e_aq_get_rss_key(hw, vsi->vsi_id, + (struct i40e_aqc_get_set_rss_key_data *)key); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ"); + return ret; + } + } else { + uint32_t *key_dw = (uint32_t *)key; + uint16_t i; + + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); + } + *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); + + return 0; +} + +static int +i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint64_t rss_hf; + uint64_t hena; + int ret; + + ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key, + rss_conf->rss_key_len); + if (ret) + return ret; + + rss_hf = rss_conf->rss_hf; + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; + hena &= ~I40E_RSS_HENA_ALL; + hena |= i40e_config_hena(rss_hf); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); + I40E_WRITE_FLUSH(hw); + + return 0; +} + +static int +i40e_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL; + uint64_t hena; + + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; + if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */ + if (rss_hf != 0) /* Enable RSS */ + return -EINVAL; + return 0; /* Nothing to do */ + } + /* RSS enabled */ + if (rss_hf == 0) /* Disable RSS */ + return -EINVAL; + + return i40e_hw_rss_hash_set(pf, rss_conf); +} + +static int +i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t hena; + + i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key, + &rss_conf->rss_key_len); + + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; + rss_conf->rss_hf = i40e_parse_hena(hena); + + return 0; +} + +static int +i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag) +{ + switch (filter_type) { + case RTE_TUNNEL_FILTER_IMAC_IVLAN: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN; + break; + case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID; + break; + case RTE_TUNNEL_FILTER_IMAC_TENID: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID; + break; + case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC: + *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC; + break; + case ETH_TUNNEL_FILTER_IMAC: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC; + break; + case ETH_TUNNEL_FILTER_OIP: + *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP; + break; + case ETH_TUNNEL_FILTER_IIP: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP; + break; + default: + PMD_DRV_LOG(ERR, "invalid tunnel filter type"); + return -EINVAL; + } + + return 0; +} + +static int +i40e_dev_tunnel_filter_set(struct i40e_pf *pf, + struct rte_eth_tunnel_filter_conf *tunnel_filter, + uint8_t add) +{ + uint16_t ip_type; + uint32_t ipv4_addr; + uint8_t i, tun_type = 0; + /* internal varialbe to convert ipv6 byte order */ + uint32_t convert_ipv6[4]; + int val, ret = 0; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi = pf->main_vsi; + struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter; + struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter; + + cld_filter = rte_zmalloc("tunnel_filter", + sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data), + 0); + + if (NULL == cld_filter) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -EINVAL; + } + pfilter = cld_filter; + + ether_addr_copy(&tunnel_filter->outer_mac, (struct ether_addr*)&pfilter->outer_mac); + ether_addr_copy(&tunnel_filter->inner_mac, (struct ether_addr*)&pfilter->inner_mac); + + pfilter->inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan); + if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; + ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); + rte_memcpy(&pfilter->ipaddr.v4.data, + &rte_cpu_to_le_32(ipv4_addr), + sizeof(pfilter->ipaddr.v4.data)); + } else { + ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; + for (i = 0; i < 4; i++) { + convert_ipv6[i] = + rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i])); + } + rte_memcpy(&pfilter->ipaddr.v6.data, &convert_ipv6, + sizeof(pfilter->ipaddr.v6.data)); + } + + /* check tunneled type */ + switch (tunnel_filter->tunnel_type) { + case RTE_TUNNEL_TYPE_VXLAN: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN; + break; + case RTE_TUNNEL_TYPE_NVGRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC; + break; + case RTE_TUNNEL_TYPE_IP_IN_GRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP; + break; + default: + /* Other tunnel types is not supported. */ + PMD_DRV_LOG(ERR, "tunnel type is not supported."); + rte_free(cld_filter); + return -EINVAL; + } + + val = i40e_dev_get_filter_type(tunnel_filter->filter_type, + &pfilter->flags); + if (val < 0) { + rte_free(cld_filter); + return -EINVAL; + } + + pfilter->flags |= rte_cpu_to_le_16( + I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | + ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)); + pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id); + + if (add) + ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1); + else + ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, + cld_filter, 1); + + rte_free(cld_filter); + return ret; +} + +static int +i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port) +{ + uint8_t i; + + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + if (pf->vxlan_ports[i] == port) + return i; + } + + return -1; +} + +static int +i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port) +{ + int idx, ret; + uint8_t filter_idx; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + idx = i40e_get_vxlan_port_idx(pf, port); + + /* Check if port already exists */ + if (idx >= 0) { + PMD_DRV_LOG(ERR, "Port %d already offloaded", port); + return -EINVAL; + } + + /* Now check if there is space to add the new port */ + idx = i40e_get_vxlan_port_idx(pf, 0); + if (idx < 0) { + PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached," + "not adding port %d", port); + return -ENOSPC; + } + + ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN, + &filter_idx, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port); + return -1; + } + + PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d", + port, filter_idx); + + /* New port: add it and mark its index in the bitmap */ + pf->vxlan_ports[idx] = port; + pf->vxlan_bitmap |= (1 << idx); + + if (!(pf->flags & I40E_FLAG_VXLAN)) + pf->flags |= I40E_FLAG_VXLAN; + + return 0; +} + +static int +i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port) +{ + int idx; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + + if (!(pf->flags & I40E_FLAG_VXLAN)) { + PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured."); + return -EINVAL; + } + + idx = i40e_get_vxlan_port_idx(pf, port); + + if (idx < 0) { + PMD_DRV_LOG(ERR, "Port %d doesn't exist", port); + return -EINVAL; + } + + if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) { + PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port); + return -1; + } + + PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d", + port, idx); + + pf->vxlan_ports[idx] = 0; + pf->vxlan_bitmap &= ~(1 << idx); + + if (!pf->vxlan_bitmap) + pf->flags &= ~I40E_FLAG_VXLAN; + + return 0; +} + +/* Add UDP tunneling port */ +static int +i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + int ret = 0; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port); + break; + + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); + ret = -1; + break; + + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -1; + break; + } + + return ret; +} + +/* Remove UDP tunneling port */ +static int +i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + int ret = 0; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port); + break; + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); + ret = -1; + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -1; + break; + } + + return ret; +} + +/* Calculate the maximum number of contiguous PF queues that are configured */ +static int +i40e_pf_calc_configured_queues_num(struct i40e_pf *pf) +{ + struct rte_eth_dev_data *data = pf->dev_data; + int i, num; + struct i40e_rx_queue *rxq; + + num = 0; + for (i = 0; i < pf->lan_nb_qps; i++) { + rxq = data->rx_queues[i]; + if (rxq && rxq->q_set) + num++; + else + break; + } + + return num; +} + +/* Configure RSS */ +static int +i40e_pf_config_rss(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_rss_conf rss_conf; + uint32_t i, lut = 0; + uint16_t j, num; + + /* + * If both VMDQ and RSS enabled, not all of PF queues are configured. + * It's necessary to calulate the actual PF queues that are configured. + */ + if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) + num = i40e_pf_calc_configured_queues_num(pf); + else + num = pf->dev_data->nb_rx_queues; + + num = RTE_MIN(num, I40E_MAX_Q_PER_TC); + PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured", + num); + + if (num == 0) { + PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS"); + return -ENOTSUP; + } + + for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { + if (j == num) + j = 0; + lut = (lut << 8) | (j & ((0x1 << + hw->func_caps.rss_table_entry_width) - 1)); + if ((i & 3) == 3) + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); + } + + rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf; + if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) { + i40e_pf_disable_rss(pf); + return 0; + } + if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < + (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { + /* Random default keys */ + static uint32_t rss_key_default[] = {0x6b793944, + 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8, + 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605, + 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581}; + + rss_conf.rss_key = (uint8_t *)rss_key_default; + rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + } + + return i40e_hw_rss_hash_set(pf, &rss_conf); +} + +static int +i40e_tunnel_filter_param_check(struct i40e_pf *pf, + struct rte_eth_tunnel_filter_conf *filter) +{ + if (pf == NULL || filter == NULL) { + PMD_DRV_LOG(ERR, "Invalid parameter"); + return -EINVAL; + } + + if (filter->queue_id >= pf->dev_data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Invalid queue ID"); + return -EINVAL; + } + + if (filter->inner_vlan > ETHER_MAX_VLAN_ID) { + PMD_DRV_LOG(ERR, "Invalid inner VLAN ID"); + return -EINVAL; + } + + if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) && + (is_zero_ether_addr(&filter->outer_mac))) { + PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address"); + return -EINVAL; + } + + if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) && + (is_zero_ether_addr(&filter->inner_mac))) { + PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address"); + return -EINVAL; + } + + return 0; +} + +#define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000 +#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) +static int +i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) +{ + uint32_t val, reg; + int ret = -EINVAL; + + val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)); + PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val); + + if (len == 3) { + reg = val | I40E_GL_PRS_FVBM_MSK_ENA; + } else if (len == 4) { + reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA; + } else { + PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len); + return ret; + } + + if (reg != val) { + ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2), + reg, NULL); + if (ret != 0) + return ret; + } else { + ret = 0; + } + PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n", + I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2))); + + return ret; +} + +static int +i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg) +{ + int ret = -EINVAL; + + if (!hw || !cfg) + return -EINVAL; + + switch (cfg->cfg_type) { + case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN: + ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len); + break; + default: + PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type); + break; + } + + return ret; +} + +static int +i40e_filter_ctrl_global_config(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret = I40E_ERR_PARAM; + + switch (filter_op) { + case RTE_ETH_FILTER_SET: + ret = i40e_dev_global_config_set(hw, + (struct rte_eth_global_cfg *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); + break; + } + + return ret; +} + +static int +i40e_tunnel_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct rte_eth_tunnel_filter_conf *filter; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret = I40E_SUCCESS; + + filter = (struct rte_eth_tunnel_filter_conf *)(arg); + + if (i40e_tunnel_filter_param_check(pf, filter) < 0) + return I40E_ERR_PARAM; + + switch (filter_op) { + case RTE_ETH_FILTER_NOP: + if (!(pf->flags & I40E_FLAG_VXLAN)) + ret = I40E_NOT_SUPPORTED; + break; + case RTE_ETH_FILTER_ADD: + ret = i40e_dev_tunnel_filter_set(pf, filter, 1); + break; + case RTE_ETH_FILTER_DELETE: + ret = i40e_dev_tunnel_filter_set(pf, filter, 0); + break; + default: + PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); + ret = I40E_ERR_PARAM; + break; + } + + return ret; +} + +static int +i40e_pf_config_mq_rx(struct i40e_pf *pf) +{ + int ret = 0; + enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode; + + /* RSS setup */ + if (mq_mode & ETH_MQ_RX_RSS_FLAG) + ret = i40e_pf_config_rss(pf); + else + i40e_pf_disable_rss(pf); + + return ret; +} + +/* Get the symmetric hash enable configurations per port */ +static void +i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable) +{ + uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0); + + *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0; +} + +/* Set the symmetric hash enable configurations per port */ +static void +i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable) +{ + uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0); + + if (enable > 0) { + if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) { + PMD_DRV_LOG(INFO, "Symmetric hash has already " + "been enabled"); + return; + } + reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK; + } else { + if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) { + PMD_DRV_LOG(INFO, "Symmetric hash has already " + "been disabled"); + return; + } + reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK; + } + i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg); + I40E_WRITE_FLUSH(hw); +} + +/* + * Get global configurations of hash function type and symmetric hash enable + * per flow type (pctype). Note that global configuration means it affects all + * the ports on the same NIC. + */ +static int +i40e_get_hash_filter_global_config(struct i40e_hw *hw, + struct rte_eth_hash_global_conf *g_cfg) +{ + uint32_t reg, mask = I40E_FLOW_TYPES; + uint16_t i; + enum i40e_filter_pctype pctype; + + memset(g_cfg, 0, sizeof(*g_cfg)); + reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); + if (reg & I40E_GLQF_CTL_HTOEP_MASK) + g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ; + else + g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR; + PMD_DRV_LOG(DEBUG, "Hash function is %s", + (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR"); + + for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) { + if (!(mask & (1UL << i))) + continue; + mask &= ~(1UL << i); + /* Bit set indicats the coresponding flow type is supported */ + g_cfg->valid_bit_mask[0] |= (1UL << i); + pctype = i40e_flowtype_to_pctype(i); + reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype)); + if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) + g_cfg->sym_hash_enable_mask[0] |= (1UL << i); + } + + return 0; +} + +static int +i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg) +{ + uint32_t i; + uint32_t mask0, i40e_mask = I40E_FLOW_TYPES; + + if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ && + g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR && + g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) { + PMD_DRV_LOG(ERR, "Unsupported hash function type %d", + g_cfg->hash_func); + return -EINVAL; + } + + /* + * As i40e supports less than 32 flow types, only first 32 bits need to + * be checked. + */ + mask0 = g_cfg->valid_bit_mask[0]; + for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) { + if (i == 0) { + /* Check if any unsupported flow type configured */ + if ((mask0 | i40e_mask) ^ i40e_mask) + goto mask_err; + } else { + if (g_cfg->valid_bit_mask[i]) + goto mask_err; + } + } + + return 0; + +mask_err: + PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured"); + + return -EINVAL; +} + +/* + * Set global configurations of hash function type and symmetric hash enable + * per flow type (pctype). Note any modifying global configuration will affect + * all the ports on the same NIC. + */ +static int +i40e_set_hash_filter_global_config(struct i40e_hw *hw, + struct rte_eth_hash_global_conf *g_cfg) +{ + int ret; + uint16_t i; + uint32_t reg; + uint32_t mask0 = g_cfg->valid_bit_mask[0]; + enum i40e_filter_pctype pctype; + + /* Check the input parameters */ + ret = i40e_hash_global_config_check(g_cfg); + if (ret < 0) + return ret; + + for (i = 0; mask0 && i < UINT32_BIT; i++) { + if (!(mask0 & (1UL << i))) + continue; + mask0 &= ~(1UL << i); + pctype = i40e_flowtype_to_pctype(i); + reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ? + I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg); + } + + reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); + if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) { + /* Toeplitz */ + if (reg & I40E_GLQF_CTL_HTOEP_MASK) { + PMD_DRV_LOG(DEBUG, "Hash function already set to " + "Toeplitz"); + goto out; + } + reg |= I40E_GLQF_CTL_HTOEP_MASK; + } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { + /* Simple XOR */ + if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) { + PMD_DRV_LOG(DEBUG, "Hash function already set to " + "Simple XOR"); + goto out; + } + reg &= ~I40E_GLQF_CTL_HTOEP_MASK; + } else + /* Use the default, and keep it as it is */ + goto out; + + i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg); + +out: + I40E_WRITE_FLUSH(hw); + + return 0; +} + +/** + * Valid input sets for hash and flow director filters per PCTYPE + */ +static uint64_t +i40e_get_valid_input_set(enum i40e_filter_pctype pctype, + enum rte_filter_type filter) +{ + uint64_t valid; + + static const uint64_t valid_hash_inset_table[] = { + [I40E_FILTER_PCTYPE_FRAG_IPV4] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC | + I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_FRAG_IPV6] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC | + I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | + I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | + I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | + I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_L2_PAYLOAD] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE | + I40E_INSET_FLEX_PAYLOAD, + }; + + /** + * Flow director supports only fields defined in + * union rte_eth_fdir_flow. + */ + static const uint64_t valid_fdir_inset_table[] = { + [I40E_FILTER_PCTYPE_FRAG_IPV4] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO | + I40E_INSET_IPV4_TTL, + [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_SCTP_VT, + [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO | + I40E_INSET_IPV4_TTL, + [I40E_FILTER_PCTYPE_FRAG_IPV6] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT, + [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_SCTP_VT, + [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT, + [I40E_FILTER_PCTYPE_L2_PAYLOAD] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_LAST_ETHER_TYPE, + }; + + if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) + return 0; + if (filter == RTE_ETH_FILTER_HASH) + valid = valid_hash_inset_table[pctype]; + else + valid = valid_fdir_inset_table[pctype]; + + return valid; +} + +/** + * Validate if the input set is allowed for a specific PCTYPE + */ +static int +i40e_validate_input_set(enum i40e_filter_pctype pctype, + enum rte_filter_type filter, uint64_t inset) +{ + uint64_t valid; + + valid = i40e_get_valid_input_set(pctype, filter); + if (inset & (~valid)) + return -EINVAL; + + return 0; +} + +/* default input set fields combination per pctype */ +static uint64_t +i40e_get_default_input_set(uint16_t pctype) +{ + static const uint64_t default_inset_table[] = { + [I40E_FILTER_PCTYPE_FRAG_IPV4] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST, + [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_SCTP_VT, + [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST, + [I40E_FILTER_PCTYPE_FRAG_IPV6] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST, + [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_SCTP_VT, + [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST, + [I40E_FILTER_PCTYPE_L2_PAYLOAD] = + I40E_INSET_LAST_ETHER_TYPE, + }; + + if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) + return 0; + + return default_inset_table[pctype]; +} + +/** + * Parse the input set from index to logical bit masks + */ +static int +i40e_parse_input_set(uint64_t *inset, + enum i40e_filter_pctype pctype, + enum rte_eth_input_set_field *field, + uint16_t size) +{ + uint16_t i, j; + int ret = -EINVAL; + + static const struct { + enum rte_eth_input_set_field field; + uint64_t inset; + } inset_convert_table[] = { + {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE}, + {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC}, + {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC}, + {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER}, + {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER}, + {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE}, + {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC}, + {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST}, + {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS}, + {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO}, + {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL}, + {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC}, + {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST}, + {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC}, + {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER, + I40E_INSET_IPV6_NEXT_HDR}, + {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS, + I40E_INSET_IPV6_HOP_LIMIT}, + {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT}, + {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT}, + {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT}, + {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT}, + {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT}, + {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT}, + {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG, + I40E_INSET_SCTP_VT}, + {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC, + I40E_INSET_TUNNEL_DMAC}, + {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN, + I40E_INSET_VLAN_TUNNEL}, + {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY, + I40E_INSET_TUNNEL_ID}, + {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD, + I40E_INSET_FLEX_PAYLOAD_W1}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD, + I40E_INSET_FLEX_PAYLOAD_W2}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD, + I40E_INSET_FLEX_PAYLOAD_W3}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD, + I40E_INSET_FLEX_PAYLOAD_W4}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD, + I40E_INSET_FLEX_PAYLOAD_W5}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD, + I40E_INSET_FLEX_PAYLOAD_W6}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD, + I40E_INSET_FLEX_PAYLOAD_W7}, + {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD, + I40E_INSET_FLEX_PAYLOAD_W8}, + }; + + if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX) + return ret; + + /* Only one item allowed for default or all */ + if (size == 1) { + if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) { + *inset = i40e_get_default_input_set(pctype); + return 0; + } else if (field[0] == RTE_ETH_INPUT_SET_NONE) { + *inset = I40E_INSET_NONE; + return 0; + } + } + + for (i = 0, *inset = 0; i < size; i++) { + for (j = 0; j < RTE_DIM(inset_convert_table); j++) { + if (field[i] == inset_convert_table[j].field) { + *inset |= inset_convert_table[j].inset; + break; + } + } + + /* It contains unsupported input set, return immediately */ + if (j == RTE_DIM(inset_convert_table)) + return ret; + } + + return 0; +} + +/** + * Translate the input set from bit masks to register aware bit masks + * and vice versa + */ +static uint64_t +i40e_translate_input_set_reg(uint64_t input) +{ + uint64_t val = 0; + uint16_t i; + + static const struct { + uint64_t inset; + uint64_t inset_reg; + } inset_map[] = { + {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC}, + {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC}, + {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN}, + {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN}, + {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE}, + {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4}, + {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4}, + {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS}, + {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO}, + {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL}, + {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6}, + {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6}, + {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC}, + {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR}, + {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT}, + {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT}, + {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT}, + {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG}, + {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID}, + {I40E_INSET_TUNNEL_DMAC, + I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC}, + {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4}, + {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6}, + {I40E_INSET_TUNNEL_SRC_PORT, + I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT}, + {I40E_INSET_TUNNEL_DST_PORT, + I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT}, + {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN}, + {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1}, + {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2}, + {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3}, + {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4}, + {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5}, + {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6}, + {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7}, + {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8}, + }; + + if (input == 0) + return val; + + /* Translate input set to register aware inset */ + for (i = 0; i < RTE_DIM(inset_map); i++) { + if (input & inset_map[i].inset) + val |= inset_map[i].inset_reg; + } + + return val; +} + +static int +i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem) +{ + uint8_t i, idx = 0; + uint64_t inset_need_mask = inset; + + static const struct { + uint64_t inset; + uint32_t mask; + } inset_mask_map[] = { + {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK}, + {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0}, + {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK}, + {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK}, + {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK}, + {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0}, + {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK}, + {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK}, + }; + + if (!inset || !mask || !nb_elem) + return 0; + + for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) { + /* Clear the inset bit, if no MASK is required, + * for example proto + ttl + */ + if ((inset & inset_mask_map[i].inset) == + inset_mask_map[i].inset && inset_mask_map[i].mask == 0) + inset_need_mask &= ~inset_mask_map[i].inset; + if (!inset_need_mask) + return 0; + } + for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) { + if ((inset_need_mask & inset_mask_map[i].inset) == + inset_mask_map[i].inset) { + if (idx >= nb_elem) { + PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks"); + return -EINVAL; + } + mask[idx] = inset_mask_map[i].mask; + idx++; + } + } + + return idx; +} + +static void +i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) +{ + uint32_t reg = i40e_read_rx_ctl(hw, addr); + + PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg); + if (reg != val) + i40e_write_rx_ctl(hw, addr, val); + PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr, + (uint32_t)i40e_read_rx_ctl(hw, addr)); +} + +static void +i40e_filter_input_set_init(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + enum i40e_filter_pctype pctype; + uint64_t input_set, inset_reg; + uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; + int num, i; + + for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) { + if (!I40E_VALID_PCTYPE(pctype)) + continue; + input_set = i40e_get_default_input_set(pctype); + + num = i40e_generate_inset_mask_reg(input_set, mask_reg, + I40E_INSET_MASK_NUM_REG); + if (num < 0) + return; + inset_reg = i40e_translate_input_set_reg(input_set); + + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + + for (i = 0; i < num; i++) { + i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), + mask_reg[i]); + } + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) { + i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), + 0); + i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), + 0); + } + I40E_WRITE_FLUSH(hw); + + /* store the default input set */ + pf->hash_input_set[pctype] = input_set; + pf->fdir.input_set[pctype] = input_set; + } +} + +int +i40e_hash_filter_inset_select(struct i40e_hw *hw, + struct rte_eth_input_set_conf *conf) +{ + struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; + enum i40e_filter_pctype pctype; + uint64_t input_set, inset_reg = 0; + uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; + int ret, i, num; + + if (!conf) { + PMD_DRV_LOG(ERR, "Invalid pointer"); + return -EFAULT; + } + if (conf->op != RTE_ETH_INPUT_SET_SELECT && + conf->op != RTE_ETH_INPUT_SET_ADD) { + PMD_DRV_LOG(ERR, "Unsupported input set operation"); + return -EINVAL; + } + + pctype = i40e_flowtype_to_pctype(conf->flow_type); + if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) { + PMD_DRV_LOG(ERR, "Not supported flow type (%u)", + conf->flow_type); + return -EINVAL; + } + + ret = i40e_parse_input_set(&input_set, pctype, conf->field, + conf->inset_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to parse input set"); + return -EINVAL; + } + if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_HASH, + input_set) != 0) { + PMD_DRV_LOG(ERR, "Invalid input set"); + return -EINVAL; + } + if (conf->op == RTE_ETH_INPUT_SET_ADD) { + /* get inset value in register */ + inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype)); + inset_reg <<= I40E_32_BIT_WIDTH; + inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype)); + input_set |= pf->hash_input_set[pctype]; + } + num = i40e_generate_inset_mask_reg(input_set, mask_reg, + I40E_INSET_MASK_NUM_REG); + if (num < 0) + return -EINVAL; + + inset_reg |= i40e_translate_input_set_reg(input_set); + + i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + + for (i = 0; i < num; i++) + i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), + mask_reg[i]); + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) + i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), + 0); + I40E_WRITE_FLUSH(hw); + + pf->hash_input_set[pctype] = input_set; + return 0; +} + +int +i40e_fdir_filter_inset_select(struct i40e_pf *pf, + struct rte_eth_input_set_conf *conf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + enum i40e_filter_pctype pctype; + uint64_t input_set, inset_reg = 0; + uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0}; + int ret, i, num; + + if (!hw || !conf) { + PMD_DRV_LOG(ERR, "Invalid pointer"); + return -EFAULT; + } + if (conf->op != RTE_ETH_INPUT_SET_SELECT && + conf->op != RTE_ETH_INPUT_SET_ADD) { + PMD_DRV_LOG(ERR, "Unsupported input set operation"); + return -EINVAL; + } + + pctype = i40e_flowtype_to_pctype(conf->flow_type); + if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) { + PMD_DRV_LOG(ERR, "Not supported flow type (%u)", + conf->flow_type); + return -EINVAL; + } + ret = i40e_parse_input_set(&input_set, pctype, conf->field, + conf->inset_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to parse input set"); + return -EINVAL; + } + if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR, + input_set) != 0) { + PMD_DRV_LOG(ERR, "Invalid input set"); + return -EINVAL; + } + + /* get inset value in register */ + inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1)); + inset_reg <<= I40E_32_BIT_WIDTH; + inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0)); + + /* Can not change the inset reg for flex payload for fdir, + * it is done by writing I40E_PRTQF_FD_FLXINSET + * in i40e_set_flex_mask_on_pctype. + */ + if (conf->op == RTE_ETH_INPUT_SET_SELECT) + inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS; + else + input_set |= pf->fdir.input_set[pctype]; + num = i40e_generate_inset_mask_reg(input_set, mask_reg, + I40E_INSET_MASK_NUM_REG); + if (num < 0) + return -EINVAL; + + inset_reg |= i40e_translate_input_set_reg(input_set); + + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + + for (i = 0; i < num; i++) + i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) + i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), + 0); + I40E_WRITE_FLUSH(hw); + + pf->fdir.input_set[pctype] = input_set; + return 0; +} + +static int +i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) +{ + int ret = 0; + + if (!hw || !info) { + PMD_DRV_LOG(ERR, "Invalid pointer"); + return -EFAULT; + } + + switch (info->info_type) { + case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: + i40e_get_symmetric_hash_enable_per_port(hw, + &(info->info.enable)); + break; + case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: + ret = i40e_get_hash_filter_global_config(hw, + &(info->info.global_conf)); + break; + default: + PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported", + info->info_type); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info) +{ + int ret = 0; + + if (!hw || !info) { + PMD_DRV_LOG(ERR, "Invalid pointer"); + return -EFAULT; + } + + switch (info->info_type) { + case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT: + i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable); + break; + case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG: + ret = i40e_set_hash_filter_global_config(hw, + &(info->info.global_conf)); + break; + case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT: + ret = i40e_hash_filter_inset_select(hw, + &(info->info.input_set_conf)); + break; + + default: + PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported", + info->info_type); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Operations for hash function */ +static int +i40e_hash_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret = 0; + + switch (filter_op) { + case RTE_ETH_FILTER_NOP: + break; + case RTE_ETH_FILTER_GET: + ret = i40e_hash_filter_get(hw, + (struct rte_eth_hash_filter_info *)arg); + break; + case RTE_ETH_FILTER_SET: + ret = i40e_hash_filter_set(hw, + (struct rte_eth_hash_filter_info *)arg); + break; + default: + PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported", + filter_op); + ret = -ENOTSUP; + break; + } + + return ret; +} + +/* + * Configure ethertype filter, which can director packet by filtering + * with mac address and ether_type or only ether_type + */ +static int +i40e_ethertype_filter_set(struct i40e_pf *pf, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_control_filter_stats stats; + uint16_t flags = 0; + int ret; + + if (filter->queue >= pf->dev_data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Invalid queue ID"); + return -EINVAL; + } + if (filter->ether_type == ETHER_TYPE_IPv4 || + filter->ether_type == ETHER_TYPE_IPv6) { + PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" + " control packet filter.", filter->ether_type); + return -EINVAL; + } + if (filter->ether_type == ETHER_TYPE_VLAN) + PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is" + " not supported."); + + if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC)) + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC; + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP; + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE; + + memset(&stats, 0, sizeof(stats)); + ret = i40e_aq_add_rem_control_packet_filter(hw, + filter->mac_addr.addr_bytes, + filter->ether_type, flags, + pf->main_vsi->seid, + filter->queue, add, &stats, NULL); + + PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d," + " mac_etype_used = %u, etype_used = %u," + " mac_etype_free = %u, etype_free = %u\n", + ret, stats.mac_etype_used, stats.etype_used, + stats.mac_etype_free, stats.etype_free); + if (ret < 0) + return -ENOSYS; + return 0; +} + +/* + * Handle operations for ethertype filter. + */ +static int +i40e_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret = 0; + + if (filter_op == RTE_ETH_FILTER_NOP) + return ret; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = i40e_ethertype_filter_set(pf, + (struct rte_eth_ethertype_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = i40e_ethertype_filter_set(pf, + (struct rte_eth_ethertype_filter *)arg, + FALSE); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); + ret = -ENOSYS; + break; + } + return ret; +} + +static int +i40e_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + if (dev == NULL) + return -EINVAL; + + switch (filter_type) { + case RTE_ETH_FILTER_NONE: + /* For global configuration */ + ret = i40e_filter_ctrl_global_config(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_HASH: + ret = i40e_hash_filter_ctrl(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_MACVLAN: + ret = i40e_mac_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = i40e_ethertype_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_TUNNEL: + ret = i40e_tunnel_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_FDIR: + ret = i40e_fdir_ctrl_func(dev, filter_op, arg); + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + return ret; +} + +/* + * Check and enable Extended Tag. + * Enabling Extended Tag is important for 40G performance. + */ +static void +i40e_enable_extended_tag(struct rte_eth_dev *dev) +{ + uint32_t buf = 0; + int ret; + + ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf), + PCI_DEV_CAP_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", + PCI_DEV_CAP_REG); + return; + } + if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) { + PMD_DRV_LOG(ERR, "Does not support Extended Tag"); + return; + } + + buf = 0; + ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf), + PCI_DEV_CTRL_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", + PCI_DEV_CTRL_REG); + return; + } + if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) { + PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled"); + return; + } + buf |= PCI_DEV_CTRL_EXT_TAG_MASK; + ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf), + PCI_DEV_CTRL_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x", + PCI_DEV_CTRL_REG); + return; + } +} + +/* + * As some registers wouldn't be reset unless a global hardware reset, + * hardware initialization is needed to put those registers into an + * expected initial state. + */ +static void +i40e_hw_init(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + i40e_enable_extended_tag(dev); + + /* clear the PF Queue Filter control register */ + i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0); + + /* Disable symmetric hash per port */ + i40e_set_symmetric_hash_enable_per_port(hw, 0); +} + +enum i40e_filter_pctype +i40e_flowtype_to_pctype(uint16_t flow_type) +{ + static const enum i40e_filter_pctype pctype_table[] = { + [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4, + [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = + I40E_FILTER_PCTYPE_NONF_IPV4_UDP, + [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = + I40E_FILTER_PCTYPE_NONF_IPV4_TCP, + [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, + [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = + I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, + [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6, + [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = + I40E_FILTER_PCTYPE_NONF_IPV6_UDP, + [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = + I40E_FILTER_PCTYPE_NONF_IPV6_TCP, + [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP, + [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = + I40E_FILTER_PCTYPE_NONF_IPV6_OTHER, + [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD, + }; + + return pctype_table[flow_type]; +} + +uint16_t +i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) +{ + static const uint16_t flowtype_table[] = { + [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4, + [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = + RTE_ETH_FLOW_NONFRAG_IPV4_UDP, + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = + RTE_ETH_FLOW_NONFRAG_IPV4_TCP, + [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = + RTE_ETH_FLOW_NONFRAG_IPV4_SCTP, + [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = + RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, + [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6, + [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = + RTE_ETH_FLOW_NONFRAG_IPV6_UDP, + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = + RTE_ETH_FLOW_NONFRAG_IPV6_TCP, + [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = + RTE_ETH_FLOW_NONFRAG_IPV6_SCTP, + [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = + RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, + [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD, + }; + + return flowtype_table[pctype]; +} + +/* + * On X710, performance number is far from the expectation on recent firmware + * versions; on XL710, performance number is also far from the expectation on + * recent firmware versions, if promiscuous mode is disabled, or promiscuous + * mode is enabled and port MAC address is equal to the packet destination MAC + * address. The fix for this issue may not be integrated in the following + * firmware version. So the workaround in software driver is needed. It needs + * to modify the initial values of 3 internal only registers for both X710 and + * XL710. Note that the values for X710 or XL710 could be different, and the + * workaround can be removed when it is fixed in firmware in the future. + */ + +/* For both X710 and XL710 */ +#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200 +#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00 + +#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200 +#define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08 + +/* For X710 */ +#define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303 +/* For XL710 */ +#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606 +#define I40E_GL_SWR_PM_UP_THR 0x269FBC + +static void +i40e_configure_registers(struct i40e_hw *hw) +{ + static struct { + uint32_t addr; + uint64_t val; + } reg_table[] = { + {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE}, + {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE}, + {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */ + }; + uint64_t reg; + uint32_t i; + int ret; + + for (i = 0; i < RTE_DIM(reg_table); i++) { + if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) { + if (i40e_is_40G_device(hw->device_id)) /* For XL710 */ + reg_table[i].val = + I40E_GL_SWR_PM_UP_THR_SF_VALUE; + else /* For X710 */ + reg_table[i].val = + I40E_GL_SWR_PM_UP_THR_EF_VALUE; + } + + ret = i40e_aq_debug_read_register(hw, reg_table[i].addr, + ®, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32, + reg_table[i].addr); + break; + } + PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64, + reg_table[i].addr, reg); + if (reg == reg_table[i].val) + continue; + + ret = i40e_aq_debug_write_register(hw, reg_table[i].addr, + reg_table[i].val, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the " + "address of 0x%"PRIx32, reg_table[i].val, + reg_table[i].addr); + break; + } + PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of " + "0x%"PRIx32, reg_table[i].val, reg_table[i].addr); + } +} + +#define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4)) +#define I40E_VSI_TSR_QINQ_CONFIG 0xc030 +#define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4)) +#define I40E_VSI_L2TAGSTXVALID_QINQ 0xab +static int +i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi) +{ + uint32_t reg; + int ret; + + if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) { + PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum"); + return -EINVAL; + } + + /* Configure for double VLAN RX stripping */ + reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id)); + if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) { + reg |= I40E_VSI_TSR_QINQ_CONFIG; + ret = i40e_aq_debug_write_register(hw, + I40E_VSI_TSR(vsi->vsi_id), + reg, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]", + vsi->vsi_id); + return I40E_ERR_CONFIG; + } + } + + /* Configure for double VLAN TX insertion */ + reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id)); + if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) { + reg = I40E_VSI_L2TAGSTXVALID_QINQ; + ret = i40e_aq_debug_write_register(hw, + I40E_VSI_L2TAGSTXVALID( + vsi->vsi_id), reg, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to update " + "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id); + return I40E_ERR_CONFIG; + } + } + + return 0; +} + +/** + * i40e_aq_add_mirror_rule + * @hw: pointer to the hardware structure + * @seid: VEB seid to add mirror rule to + * @dst_id: destination vsi seid + * @entries: Buffer which contains the entities to be mirrored + * @count: number of entities contained in the buffer + * @rule_id:the rule_id of the rule to be added + * + * Add a mirror rule for a given veb. + * + **/ +static enum i40e_status_code +i40e_aq_add_mirror_rule(struct i40e_hw *hw, + uint16_t seid, uint16_t dst_id, + uint16_t rule_type, uint16_t *entries, + uint16_t count, uint16_t *rule_id) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_delete_mirror_rule cmd; + struct i40e_aqc_add_delete_mirror_rule_completion *resp = + (struct i40e_aqc_add_delete_mirror_rule_completion *) + &desc.params.raw; + uint16_t buff_len; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_mirror_rule); + memset(&cmd, 0, sizeof(cmd)); + + buff_len = sizeof(uint16_t) * count; + desc.datalen = rte_cpu_to_le_16(buff_len); + if (buff_len > 0) + desc.flags |= rte_cpu_to_le_16( + (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd.rule_type = rte_cpu_to_le_16(rule_type << + I40E_AQC_MIRROR_RULE_TYPE_SHIFT); + cmd.num_entries = rte_cpu_to_le_16(count); + cmd.seid = rte_cpu_to_le_16(seid); + cmd.destination = rte_cpu_to_le_16(dst_id); + + rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd)); + status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL); + PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d," + "rule_id = %u" + " mirror_rules_used = %u, mirror_rules_free = %u,", + hw->aq.asq_last_status, resp->rule_id, + resp->mirror_rules_used, resp->mirror_rules_free); + *rule_id = rte_le_to_cpu_16(resp->rule_id); + + return status; +} + +/** + * i40e_aq_del_mirror_rule + * @hw: pointer to the hardware structure + * @seid: VEB seid to add mirror rule to + * @entries: Buffer which contains the entities to be mirrored + * @count: number of entities contained in the buffer + * @rule_id:the rule_id of the rule to be delete + * + * Delete a mirror rule for a given veb. + * + **/ +static enum i40e_status_code +i40e_aq_del_mirror_rule(struct i40e_hw *hw, + uint16_t seid, uint16_t rule_type, uint16_t *entries, + uint16_t count, uint16_t rule_id) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_delete_mirror_rule cmd; + uint16_t buff_len = 0; + enum i40e_status_code status; + void *buff = NULL; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_delete_mirror_rule); + memset(&cmd, 0, sizeof(cmd)); + if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { + desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF | + I40E_AQ_FLAG_RD)); + cmd.num_entries = count; + buff_len = sizeof(uint16_t) * count; + desc.datalen = rte_cpu_to_le_16(buff_len); + buff = (void *)entries; + } else + /* rule id is filled in destination field for deleting mirror rule */ + cmd.destination = rte_cpu_to_le_16(rule_id); + + cmd.rule_type = rte_cpu_to_le_16(rule_type << + I40E_AQC_MIRROR_RULE_TYPE_SHIFT); + cmd.seid = rte_cpu_to_le_16(seid); + + rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd)); + status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL); + + return status; +} + +/** + * i40e_mirror_rule_set + * @dev: pointer to the hardware structure + * @mirror_conf: mirror rule info + * @sw_id: mirror rule's sw_id + * @on: enable/disable + * + * set a mirror rule. + * + **/ +static int +i40e_mirror_rule_set(struct rte_eth_dev *dev, + struct rte_eth_mirror_conf *mirror_conf, + uint8_t sw_id, uint8_t on) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_mirror_rule *it, *mirr_rule = NULL; + struct i40e_mirror_rule *parent = NULL; + uint16_t seid, dst_seid, rule_id; + uint16_t i, j = 0; + int ret; + + PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id); + + if (pf->main_vsi->veb == NULL || pf->vfs == NULL) { + PMD_DRV_LOG(ERR, "mirror rule can not be configured" + " without veb or vfs."); + return -ENOSYS; + } + if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) { + PMD_DRV_LOG(ERR, "mirror table is full."); + return -ENOSPC; + } + if (mirror_conf->dst_pool > pf->vf_num) { + PMD_DRV_LOG(ERR, "invalid destination pool %u.", + mirror_conf->dst_pool); + return -EINVAL; + } + + seid = pf->main_vsi->veb->seid; + + TAILQ_FOREACH(it, &pf->mirror_list, rules) { + if (sw_id <= it->index) { + mirr_rule = it; + break; + } + parent = it; + } + if (mirr_rule && sw_id == mirr_rule->index) { + if (on) { + PMD_DRV_LOG(ERR, "mirror rule exists."); + return -EEXIST; + } else { + ret = i40e_aq_del_mirror_rule(hw, seid, + mirr_rule->rule_type, + mirr_rule->entries, + mirr_rule->num_entries, mirr_rule->id); + if (ret < 0) { + PMD_DRV_LOG(ERR, "failed to remove mirror rule:" + " ret = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); + return -ENOSYS; + } + TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); + rte_free(mirr_rule); + pf->nb_mirror_rule--; + return 0; + } + } else if (!on) { + PMD_DRV_LOG(ERR, "mirror rule doesn't exist."); + return -ENOENT; + } + + mirr_rule = rte_zmalloc("i40e_mirror_rule", + sizeof(struct i40e_mirror_rule) , 0); + if (!mirr_rule) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + switch (mirror_conf->rule_type) { + case ETH_MIRROR_VLAN: + for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) { + if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { + mirr_rule->entries[j] = + mirror_conf->vlan.vlan_id[i]; + j++; + } + } + if (j == 0) { + PMD_DRV_LOG(ERR, "vlan is not specified."); + rte_free(mirr_rule); + return -EINVAL; + } + mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN; + break; + case ETH_MIRROR_VIRTUAL_POOL_UP: + case ETH_MIRROR_VIRTUAL_POOL_DOWN: + /* check if the specified pool bit is out of range */ + if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) { + PMD_DRV_LOG(ERR, "pool mask is out of range."); + rte_free(mirr_rule); + return -EINVAL; + } + for (i = 0, j = 0; i < pf->vf_num; i++) { + if (mirror_conf->pool_mask & (1ULL << i)) { + mirr_rule->entries[j] = pf->vfs[i].vsi->seid; + j++; + } + } + if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) { + /* add pf vsi to entries */ + mirr_rule->entries[j] = pf->main_vsi_seid; + j++; + } + if (j == 0) { + PMD_DRV_LOG(ERR, "pool is not specified."); + rte_free(mirr_rule); + return -EINVAL; + } + /* egress and ingress in aq commands means from switch but not port */ + mirr_rule->rule_type = + (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ? + I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS : + I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS; + break; + case ETH_MIRROR_UPLINK_PORT: + /* egress and ingress in aq commands means from switch but not port*/ + mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS; + break; + case ETH_MIRROR_DOWNLINK_PORT: + mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS; + break; + default: + PMD_DRV_LOG(ERR, "unsupported mirror type %d.", + mirror_conf->rule_type); + rte_free(mirr_rule); + return -EINVAL; + } + + /* If the dst_pool is equal to vf_num, consider it as PF */ + if (mirror_conf->dst_pool == pf->vf_num) + dst_seid = pf->main_vsi_seid; + else + dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid; + + ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid, + mirr_rule->rule_type, mirr_rule->entries, + j, &rule_id); + if (ret < 0) { + PMD_DRV_LOG(ERR, "failed to add mirror rule:" + " ret = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); + rte_free(mirr_rule); + return -ENOSYS; + } + + mirr_rule->index = sw_id; + mirr_rule->num_entries = j; + mirr_rule->id = rule_id; + mirr_rule->dst_vsi_seid = dst_seid; + + if (parent) + TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules); + else + TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules); + + pf->nb_mirror_rule++; + return 0; +} + +/** + * i40e_mirror_rule_reset + * @dev: pointer to the device + * @sw_id: mirror rule's sw_id + * + * reset a mirror rule. + * + **/ +static int +i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_mirror_rule *it, *mirr_rule = NULL; + uint16_t seid; + int ret; + + PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id); + + seid = pf->main_vsi->veb->seid; + + TAILQ_FOREACH(it, &pf->mirror_list, rules) { + if (sw_id == it->index) { + mirr_rule = it; + break; + } + } + if (mirr_rule) { + ret = i40e_aq_del_mirror_rule(hw, seid, + mirr_rule->rule_type, + mirr_rule->entries, + mirr_rule->num_entries, mirr_rule->id); + if (ret < 0) { + PMD_DRV_LOG(ERR, "failed to remove mirror rule:" + " status = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); + return -ENOSYS; + } + TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); + rte_free(mirr_rule); + pf->nb_mirror_rule--; + } else { + PMD_DRV_LOG(ERR, "mirror rule doesn't exist."); + return -ENOENT; + } + return 0; +} + +static uint64_t +i40e_read_systime_cyclecounter(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t systim_cycles; + + systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L); + systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H) + << 32; + + return systim_cycles; +} + +static uint64_t +i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_tstamp; + + rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index)); + rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index)) + << 32; + + return rx_tstamp; +} + +static uint64_t +i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t tx_tstamp; + + tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L); + tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H) + << 32; + + return tx_tstamp; +} + +static void +i40e_start_timecounters(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_adapter *adapter = + (struct i40e_adapter *)dev->data->dev_private; + struct rte_eth_link link; + uint32_t tsync_inc_l; + uint32_t tsync_inc_h; + + /* Get current link speed. */ + memset(&link, 0, sizeof(link)); + i40e_dev_link_update(dev, 1); + rte_i40e_dev_atomic_read_link_status(dev, &link); + + switch (link.link_speed) { + case ETH_SPEED_NUM_40G: + tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF; + tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32; + break; + case ETH_SPEED_NUM_10G: + tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF; + tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32; + break; + case ETH_SPEED_NUM_1G: + tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF; + tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32; + break; + default: + tsync_inc_l = 0x0; + tsync_inc_h = 0x0; + } + + /* Set the timesync increment value. */ + I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l); + I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h); + + memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + + adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK; + adapter->systime_tc.cc_shift = 0; + adapter->systime_tc.nsec_mask = 0; + + adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK; + adapter->rx_tstamp_tc.cc_shift = 0; + adapter->rx_tstamp_tc.nsec_mask = 0; + + adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK; + adapter->tx_tstamp_tc.cc_shift = 0; + adapter->tx_tstamp_tc.nsec_mask = 0; +} + +static int +i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) +{ + struct i40e_adapter *adapter = + (struct i40e_adapter *)dev->data->dev_private; + + adapter->systime_tc.nsec += delta; + adapter->rx_tstamp_tc.nsec += delta; + adapter->tx_tstamp_tc.nsec += delta; + + return 0; +} + +static int +i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) +{ + uint64_t ns; + struct i40e_adapter *adapter = + (struct i40e_adapter *)dev->data->dev_private; + + ns = rte_timespec_to_ns(ts); + + /* Set the timecounters to a new value. */ + adapter->systime_tc.nsec = ns; + adapter->rx_tstamp_tc.nsec = ns; + adapter->tx_tstamp_tc.nsec = ns; + + return 0; +} + +static int +i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +{ + uint64_t ns, systime_cycles; + struct i40e_adapter *adapter = + (struct i40e_adapter *)dev->data->dev_private; + + systime_cycles = i40e_read_systime_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); + *ts = rte_ns_to_timespec(ns); + + return 0; +} + +static int +i40e_timesync_enable(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl_l; + uint32_t tsync_ctl_h; + + /* Stop the timesync system time. */ + I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0); + I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0); + /* Reset the timesync system time value. */ + I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0); + I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0); + + i40e_start_timecounters(dev); + + /* Clear timesync registers. */ + I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0); + I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H); + I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0)); + I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1)); + I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2)); + I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3)); + + /* Enable timestamping of PTP packets. */ + tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0); + tsync_ctl_l |= I40E_PRTTSYN_TSYNENA; + + tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1); + tsync_ctl_h |= I40E_PRTTSYN_TSYNENA; + tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE; + + I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l); + I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h); + + return 0; +} + +static int +i40e_timesync_disable(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl_l; + uint32_t tsync_ctl_h; + + /* Disable timestamping of transmitted PTP packets. */ + tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0); + tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA; + + tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1); + tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA; + + I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l); + I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h); + + /* Reset the timesync increment value. */ + I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0); + I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0); + + return 0; +} + +static int +i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, uint32_t flags) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_adapter *adapter = + (struct i40e_adapter *)dev->data->dev_private; + + uint32_t sync_status; + uint32_t index = flags & 0x03; + uint64_t rx_tstamp_cycles; + uint64_t ns; + + sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1); + if ((sync_status & (1 << index)) == 0) + return -EINVAL; + + rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index); + ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_adapter *adapter = + (struct i40e_adapter *)dev->data->dev_private; + + uint32_t sync_status; + uint64_t tx_tstamp_cycles; + uint64_t ns; + + sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0); + if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0) + return -EINVAL; + + tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +/* + * i40e_parse_dcb_configure - parse dcb configure from user + * @dev: the device being configured + * @dcb_cfg: pointer of the result of parse + * @*tc_map: bit map of enabled traffic classes + * + * Returns 0 on success, negative value on failure + */ +static int +i40e_parse_dcb_configure(struct rte_eth_dev *dev, + struct i40e_dcbx_config *dcb_cfg, + uint8_t *tc_map) +{ + struct rte_eth_dcb_rx_conf *dcb_rx_conf; + uint8_t i, tc_bw, bw_lf; + + memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config)); + + dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; + if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) { + PMD_INIT_LOG(ERR, "number of tc exceeds max."); + return -EINVAL; + } + + /* assume each tc has the same bw */ + tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs; + for (i = 0; i < dcb_rx_conf->nb_tcs; i++) + dcb_cfg->etscfg.tcbwtable[i] = tc_bw; + /* to ensure the sum of tcbw is equal to 100 */ + bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs; + for (i = 0; i < bw_lf; i++) + dcb_cfg->etscfg.tcbwtable[i]++; + + /* assume each tc has the same Transmission Selection Algorithm */ + for (i = 0; i < dcb_rx_conf->nb_tcs; i++) + dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + dcb_cfg->etscfg.prioritytable[i] = + dcb_rx_conf->dcb_tc[i]; + + /* FW needs one App to configure HW */ + dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM; + dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO; + dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + if (dcb_rx_conf->nb_tcs == 0) + *tc_map = 1; /* tc0 only */ + else + *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t); + + if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) { + dcb_cfg->pfc.willing = 0; + dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + dcb_cfg->pfc.pfcenable = *tc_map; + } + return 0; +} + + +static enum i40e_status_code +i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi, + struct i40e_aqc_vsi_properties_data *info, + uint8_t enabled_tcmap) +{ + enum i40e_status_code ret; + int i, total_tc = 0; + uint16_t qpnum_per_tc, bsf, qp_idx; + struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + uint16_t used_queues; + + ret = validate_tcmap_parameter(vsi, enabled_tcmap); + if (ret != I40E_SUCCESS) + return ret; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (enabled_tcmap & (1 << i)) + total_tc++; + } + if (total_tc == 0) + total_tc = 1; + vsi->enabled_tc = enabled_tcmap; + + /* different VSI has different queues assigned */ + if (vsi->type == I40E_VSI_MAIN) + used_queues = dev_data->nb_rx_queues - + pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + else if (vsi->type == I40E_VSI_VMDQ2) + used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + else { + PMD_INIT_LOG(ERR, "unsupported VSI type."); + return I40E_ERR_NO_AVAILABLE_VSI; + } + + qpnum_per_tc = used_queues / total_tc; + /* Number of queues per enabled TC */ + if (qpnum_per_tc == 0) { + PMD_INIT_LOG(ERR, " number of queues is less that tcs."); + return I40E_ERR_INVALID_QP_ID; + } + qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc), + I40E_MAX_Q_PER_TC); + bsf = rte_bsf32(qpnum_per_tc); + + /** + * Configure TC and queue mapping parameters, for enabled TC, + * allocate qpnum_per_tc queues to this traffic. For disabled TC, + * default queue will serve it. + */ + qp_idx = 0; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & (1 << i)) { + info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx << + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | + (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); + qp_idx += qpnum_per_tc; + } else + info->tc_mapping[i] = 0; + } + + /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */ + if (vsi->type == I40E_VSI_SRIOV) { + info->mapping_flags |= + rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); + for (i = 0; i < vsi->nb_qps; i++) + info->queue_mapping[i] = + rte_cpu_to_le_16(vsi->base_queue + i); + } else { + info->mapping_flags |= + rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG); + info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue); + } + info->valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID); + + return I40E_SUCCESS; +} + +/* + * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map + * @veb: VEB to be configured + * @tc_map: enabled TC bitmap + * + * Returns 0 on success, negative value on failure + */ +static enum i40e_status_code +i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map) +{ + struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw; + struct i40e_aqc_query_switching_comp_bw_config_resp bw_query; + struct i40e_aqc_query_switching_comp_ets_config_resp ets_query; + struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi); + enum i40e_status_code ret = I40E_SUCCESS; + int i; + uint32_t bw_max; + + /* Check if enabled_tc is same as existing or new TCs */ + if (veb->enabled_tc == tc_map) + return ret; + + /* configure tc bandwidth */ + memset(&veb_bw, 0, sizeof(veb_bw)); + veb_bw.tc_valid_bits = tc_map; + /* Enable ETS TCs with equal BW Share for now across all VSIs */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (tc_map & BIT_ULL(i)) + veb_bw.tc_bw_share_credits[i] = 1; + } + ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid, + &veb_bw, NULL); + if (ret) { + PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation" + " per TC failed = %d", + hw->aq.asq_last_status); + return ret; + } + + memset(&ets_query, 0, sizeof(ets_query)); + ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, + &ets_query, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS" + " configuration %u", hw->aq.asq_last_status); + return ret; + } + memset(&bw_query, 0, sizeof(bw_query)); + ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, + &bw_query, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth" + " configuration %u", hw->aq.asq_last_status); + return ret; + } + + /* store and print out BW info */ + veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit); + veb->bw_info.bw_max = ets_query.tc_bw_max; + PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit); + PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max); + bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) | + (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) << + I40E_16_BIT_WIDTH); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + veb->bw_info.bw_ets_share_credits[i] = + bw_query.tc_bw_share_credits[i]; + veb->bw_info.bw_ets_credits[i] = + rte_le_to_cpu_16(bw_query.tc_bw_limits[i]); + /* 4 bits per TC, 4th bit is reserved */ + veb->bw_info.bw_ets_max[i] = + (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) & + RTE_LEN2MASK(3, uint8_t)); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i, + veb->bw_info.bw_ets_share_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i, + veb->bw_info.bw_ets_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i, + veb->bw_info.bw_ets_max[i]); + } + + veb->enabled_tc = tc_map; + + return ret; +} + + +/* + * i40e_vsi_config_tc - Configure VSI tc setting for given TC map + * @vsi: VSI to be configured + * @tc_map: enabled TC bitmap + * + * Returns 0 on success, negative value on failure + */ +static enum i40e_status_code +i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map) +{ + struct i40e_aqc_configure_vsi_tc_bw_data bw_data; + struct i40e_vsi_context ctxt; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + enum i40e_status_code ret = I40E_SUCCESS; + int i; + + /* Check if enabled_tc is same as existing or new TCs */ + if (vsi->enabled_tc == tc_map) + return ret; + + /* configure tc bandwidth */ + memset(&bw_data, 0, sizeof(bw_data)); + bw_data.tc_valid_bits = tc_map; + /* Enable ETS TCs with equal BW Share for now across all VSIs */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (tc_map & BIT_ULL(i)) + bw_data.tc_bw_credits[i] = 1; + } + ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL); + if (ret) { + PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation" + " per TC failed = %d", + hw->aq.asq_last_status); + goto out; + } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + vsi->info.qs_handle[i] = bw_data.qs_handles[i]; + + /* Update Queue Pairs Mapping for currently enabled UPs */ + ctxt.seid = vsi->seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.info = vsi->info; + i40e_get_cap(hw); + ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map); + if (ret) + goto out; + + /* Update the VSI after updating the VSI queue-mapping information */ + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to configure " + "TC queue mapping = %d", + hw->aq.asq_last_status); + goto out; + } + /* update the local VSI info with updated queue map */ + (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + sizeof(vsi->info.tc_mapping)); + (void)rte_memcpy(&vsi->info.queue_mapping, + &ctxt.info.queue_mapping, + sizeof(vsi->info.queue_mapping)); + vsi->info.mapping_flags = ctxt.info.mapping_flags; + vsi->info.valid_sections = 0; + + /* query and update current VSI BW information */ + ret = i40e_vsi_get_bw_config(vsi); + if (ret) { + PMD_INIT_LOG(ERR, + "Failed updating vsi bw info, err %s aq_err %s", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + goto out; + } + + vsi->enabled_tc = tc_map; + +out: + return ret; +} + +/* + * i40e_dcb_hw_configure - program the dcb setting to hw + * @pf: pf the configuration is taken on + * @new_cfg: new configuration + * @tc_map: enabled TC bitmap + * + * Returns 0 on success, negative value on failure + */ +static enum i40e_status_code +i40e_dcb_hw_configure(struct i40e_pf *pf, + struct i40e_dcbx_config *new_cfg, + uint8_t tc_map) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config; + struct i40e_vsi *main_vsi = pf->main_vsi; + struct i40e_vsi_list *vsi_list; + enum i40e_status_code ret; + int i; + uint32_t val; + + /* Use the FW API if FW > v4.4*/ + if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) || + (hw->aq.fw_maj_ver >= 5))) { + PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API" + " to configure DCB"); + return I40E_ERR_FIRMWARE_API_VERSION; + } + + /* Check if need reconfiguration */ + if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) { + PMD_INIT_LOG(ERR, "No Change in DCB Config required."); + return I40E_SUCCESS; + } + + /* Copy the new config to the current config */ + *old_cfg = *new_cfg; + old_cfg->etsrec = old_cfg->etscfg; + ret = i40e_set_dcb_config(hw); + if (ret) { + PMD_INIT_LOG(ERR, + "Set DCB Config failed, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + /* set receive Arbiter to RR mode and ETS scheme by default */ + for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) { + val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i)); + val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK | + I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK | + I40E_PRTDCB_RETSTCC_ETSTC_SHIFT); + val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] << + I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) & + I40E_PRTDCB_RETSTCC_BWSHARE_MASK; + val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) & + I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK; + val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) & + I40E_PRTDCB_RETSTCC_ETSTC_MASK; + I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val); + } + /* get local mib to check whether it is configured correctly */ + /* IEEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; + /* Get Local DCB Config */ + i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, + &hw->local_dcbx_config); + + /* if Veb is created, need to update TC of it at first */ + if (main_vsi->veb) { + ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map); + if (ret) + PMD_INIT_LOG(WARNING, + "Failed configuring TC for VEB seid=%d\n", + main_vsi->veb->seid); + } + /* Update each VSI */ + i40e_vsi_config_tc(main_vsi, tc_map); + if (main_vsi->veb) { + TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) { + /* Beside main VSI and VMDQ VSIs, only enable default + * TC for other VSIs + */ + if (vsi_list->vsi->type == I40E_VSI_VMDQ2) + ret = i40e_vsi_config_tc(vsi_list->vsi, + tc_map); + else + ret = i40e_vsi_config_tc(vsi_list->vsi, + I40E_DEFAULT_TCMAP); + if (ret) + PMD_INIT_LOG(WARNING, + "Failed configuring TC for VSI seid=%d\n", + vsi_list->vsi->seid); + /* continue */ + } + } + return I40E_SUCCESS; +} + +/* + * i40e_dcb_init_configure - initial dcb config + * @dev: device being configured + * @sw_dcb: indicate whether dcb is sw configured or hw offload + * + * Returns 0 on success, negative value on failure + */ +static int +i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret = 0; + + if ((pf->flags & I40E_FLAG_DCB) == 0) { + PMD_INIT_LOG(ERR, "HW doesn't support DCB"); + return -ENOTSUP; + } + + /* DCB initialization: + * Update DCB configuration from the Firmware and configure + * LLDP MIB change event. + */ + if (sw_dcb == TRUE) { + ret = i40e_aq_stop_lldp(hw, TRUE, NULL); + if (ret != I40E_SUCCESS) + PMD_INIT_LOG(DEBUG, "Failed to stop lldp"); + + ret = i40e_init_dcb(hw); + /* if sw_dcb, lldp agent is stopped, the return from + * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM + * adminq status. + */ + if (ret != I40E_SUCCESS && + hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { + memset(&hw->local_dcbx_config, 0, + sizeof(struct i40e_dcbx_config)); + /* set dcb default configuration */ + hw->local_dcbx_config.etscfg.willing = 0; + hw->local_dcbx_config.etscfg.maxtcs = 0; + hw->local_dcbx_config.etscfg.tcbwtable[0] = 100; + hw->local_dcbx_config.etscfg.tsatable[0] = + I40E_IEEE_TSA_ETS; + hw->local_dcbx_config.etsrec = + hw->local_dcbx_config.etscfg; + hw->local_dcbx_config.pfc.willing = 0; + hw->local_dcbx_config.pfc.pfccap = + I40E_MAX_TRAFFIC_CLASS; + /* FW needs one App to configure HW */ + hw->local_dcbx_config.numapps = 1; + hw->local_dcbx_config.app[0].selector = + I40E_APP_SEL_ETHTYPE; + hw->local_dcbx_config.app[0].priority = 3; + hw->local_dcbx_config.app[0].protocolid = + I40E_APP_PROTOID_FCOE; + ret = i40e_set_dcb_config(hw); + if (ret) { + PMD_INIT_LOG(ERR, "default dcb config fails." + " err = %d, aq_err = %d.", ret, + hw->aq.asq_last_status); + return -ENOSYS; + } + } else { + PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d," + " aq_err = %d.", ret, + hw->aq.asq_last_status); + return -ENOTSUP; + } + } else { + ret = i40e_aq_start_lldp(hw, NULL); + if (ret != I40E_SUCCESS) + PMD_INIT_LOG(DEBUG, "Failed to start lldp"); + + ret = i40e_init_dcb(hw); + if (!ret) { + if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { + PMD_INIT_LOG(ERR, "HW doesn't support" + " DCBX offload."); + return -ENOTSUP; + } + } else { + PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d," + " aq_err = %d.", ret, + hw->aq.asq_last_status); + return -ENOTSUP; + } + } + return 0; +} + +/* + * i40e_dcb_setup - setup dcb related config + * @dev: device being configured + * + * Returns 0 on success, negative value on failure + */ +static int +i40e_dcb_setup(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_dcbx_config dcb_cfg; + uint8_t tc_map = 0; + int ret = 0; + + if ((pf->flags & I40E_FLAG_DCB) == 0) { + PMD_INIT_LOG(ERR, "HW doesn't support DCB"); + return -ENOTSUP; + } + + if (pf->vf_num != 0) + PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis."); + + ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map); + if (ret) { + PMD_INIT_LOG(ERR, "invalid dcb config"); + return -EINVAL; + } + ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map); + if (ret) { + PMD_INIT_LOG(ERR, "dcb sw configure fails"); + return -ENOSYS; + } + + return 0; +} + +static int +i40e_dev_get_dcb_info(struct rte_eth_dev *dev, + struct rte_eth_dcb_info *dcb_info) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config; + uint16_t bsf, tc_mapping; + int i, j = 0; + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) + dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1); + else + dcb_info->nb_tcs = 1; + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i]; + for (i = 0; i < dcb_info->nb_tcs; i++) + dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i]; + + /* get queue mapping if vmdq is disabled */ + if (!pf->nb_cfg_vmdq_vsi) { + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; + tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); + dcb_info->tc_queue.tc_rxq[j][i].base = + (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; + dcb_info->tc_queue.tc_txq[j][i].base = + dcb_info->tc_queue.tc_rxq[j][i].base; + bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; + dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf; + dcb_info->tc_queue.tc_txq[j][i].nb_queue = + dcb_info->tc_queue.tc_rxq[j][i].nb_queue; + } + return 0; + } + + /* get queue mapping if vmdq is enabled */ + do { + vsi = pf->vmdq[j].vsi; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; + tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); + dcb_info->tc_queue.tc_rxq[j][i].base = + (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; + dcb_info->tc_queue.tc_txq[j][i].base = + dcb_info->tc_queue.tc_rxq[j][i].base; + bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; + dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf; + dcb_info->tc_queue.tc_txq[j][i].nb_queue = + dcb_info->tc_queue.tc_rxq[j][i].nb_queue; + } + j++; + } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL)); + return 0; +} + +static int +i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t interval = + i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + if (msix_intr == I40E_MISC_VEC_ID) + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (interval << + I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); + else + I40E_WRITE_REG(hw, + I40E_PFINT_DYN_CTLN(msix_intr - + I40E_RX_VEC_START), + I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (interval << + I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); + + I40E_WRITE_FLUSH(hw); + rte_intr_enable(&dev->pci_dev->intr_handle); + + return 0; +} + +static int +i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + if (msix_intr == I40E_MISC_VEC_ID) + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0); + else + I40E_WRITE_REG(hw, + I40E_PFINT_DYN_CTLN(msix_intr - + I40E_RX_VEC_START), + 0); + I40E_WRITE_FLUSH(hw); + + return 0; +} + +static int i40e_get_reg_length(__rte_unused struct rte_eth_dev *dev) +{ + /* Highest base addr + 32-bit word */ + return I40E_GLGEN_STAT_CLEAR + 4; +} + +static int i40e_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *ptr_data = regs->data; + uint32_t reg_idx, arr_idx, arr_idx2, reg_offset; + const struct i40e_reg_info *reg_info; + + /* The first few registers have to be read using AQ operations */ + reg_idx = 0; + while (i40e_regs_adminq[reg_idx].name) { + reg_info = &i40e_regs_adminq[reg_idx++]; + for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++) + for (arr_idx2 = 0; + arr_idx2 <= reg_info->count2; + arr_idx2++) { + reg_offset = arr_idx * reg_info->stride1 + + arr_idx2 * reg_info->stride2; + ptr_data[reg_offset >> 2] = + i40e_read_rx_ctl(hw, reg_offset); + } + } + + /* The remaining registers can be read using primitives */ + reg_idx = 0; + while (i40e_regs_others[reg_idx].name) { + reg_info = &i40e_regs_others[reg_idx++]; + for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++) + for (arr_idx2 = 0; + arr_idx2 <= reg_info->count2; + arr_idx2++) { + reg_offset = arr_idx * reg_info->stride1 + + arr_idx2 * reg_info->stride2; + ptr_data[reg_offset >> 2] = + I40E_READ_REG(hw, reg_offset); + } + } + + return 0; +} + +static int i40e_get_eeprom_length(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Convert word count to byte count */ + return hw->nvm.sr_size << 1; +} + +static int i40e_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t *data = eeprom->data; + uint16_t offset, length, cnt_words; + int ret_code; + + offset = eeprom->offset >> 1; + length = eeprom->length >> 1; + cnt_words = length; + + if (offset > hw->nvm.sr_size || + offset + length > hw->nvm.sr_size) { + PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range."); + return -EINVAL; + } + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data); + if (ret_code != I40E_SUCCESS || cnt_words != length) { + PMD_DRV_LOG(ERR, "EEPROM read failed."); + return -EIO; + } + + return 0; +} + +static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!is_valid_assigned_ether_addr(mac_addr)) { + PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); + return; + } + + /* Flags: 0x3 updates port address */ + i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL); +} diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h new file mode 100644 index 00000000..cfd23999 --- /dev/null +++ b/drivers/net/i40e/i40e_ethdev.h @@ -0,0 +1,703 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _I40E_ETHDEV_H_ +#define _I40E_ETHDEV_H_ + +#include <rte_eth_ctrl.h> +#include <rte_time.h> + +#define I40E_VLAN_TAG_SIZE 4 + +#define I40E_AQ_LEN 32 +#define I40E_AQ_BUF_SZ 4096 +/* Number of queues per TC should be one of 1, 2, 4, 8, 16, 32, 64 */ +#define I40E_MAX_Q_PER_TC 64 +#define I40E_NUM_DESC_DEFAULT 512 +#define I40E_NUM_DESC_ALIGN 32 +#define I40E_BUF_SIZE_MIN 1024 +#define I40E_FRAME_SIZE_MAX 9728 +#define I40E_QUEUE_BASE_ADDR_UNIT 128 +/* number of VSIs and queue default setting */ +#define I40E_MAX_QP_NUM_PER_VF 16 +#define I40E_DEFAULT_QP_NUM_FDIR 1 +#define I40E_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t)) +#define I40E_VFTA_SIZE (4096 / I40E_UINT32_BIT_SIZE) +/* Maximun number of MAC addresses */ +#define I40E_NUM_MACADDR_MAX 64 + +/* + * vlan_id is a 12 bit number. + * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. + * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element. + * The higher 7 bit val specifies VFTA array index. + */ +#define I40E_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F)) +#define I40E_VFTA_IDX(vlan_id) ((vlan_id) >> 5) + +/* Default TC traffic in case DCB is not enabled */ +#define I40E_DEFAULT_TCMAP 0x1 +#define I40E_FDIR_QUEUE_ID 0 + +/* Always assign pool 0 to main VSI, VMDQ will start from 1 */ +#define I40E_VMDQ_POOL_BASE 1 + +#define I40E_DEFAULT_RX_FREE_THRESH 32 +#define I40E_DEFAULT_RX_PTHRESH 8 +#define I40E_DEFAULT_RX_HTHRESH 8 +#define I40E_DEFAULT_RX_WTHRESH 0 + +#define I40E_DEFAULT_TX_FREE_THRESH 32 +#define I40E_DEFAULT_TX_PTHRESH 32 +#define I40E_DEFAULT_TX_HTHRESH 0 +#define I40E_DEFAULT_TX_WTHRESH 0 +#define I40E_DEFAULT_TX_RSBIT_THRESH 32 + +/* Bit shift and mask */ +#define I40E_4_BIT_WIDTH (CHAR_BIT / 2) +#define I40E_4_BIT_MASK RTE_LEN2MASK(I40E_4_BIT_WIDTH, uint8_t) +#define I40E_8_BIT_WIDTH CHAR_BIT +#define I40E_8_BIT_MASK UINT8_MAX +#define I40E_16_BIT_WIDTH (CHAR_BIT * 2) +#define I40E_16_BIT_MASK UINT16_MAX +#define I40E_32_BIT_WIDTH (CHAR_BIT * 4) +#define I40E_32_BIT_MASK UINT32_MAX +#define I40E_48_BIT_WIDTH (CHAR_BIT * 6) +#define I40E_48_BIT_MASK RTE_LEN2MASK(I40E_48_BIT_WIDTH, uint64_t) + +/* Linux PF host with virtchnl version 1.1 */ +#define PF_IS_V11(vf) \ + (((vf)->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && \ + ((vf)->version_minor == 1)) + +/* index flex payload per layer */ +enum i40e_flxpld_layer_idx { + I40E_FLXPLD_L2_IDX = 0, + I40E_FLXPLD_L3_IDX = 1, + I40E_FLXPLD_L4_IDX = 2, + I40E_MAX_FLXPLD_LAYER = 3, +}; +#define I40E_MAX_FLXPLD_FIED 3 /* max number of flex payload fields */ +#define I40E_FDIR_BITMASK_NUM_WORD 2 /* max number of bitmask words */ +#define I40E_FDIR_MAX_FLEXWORD_NUM 8 /* max number of flexpayload words */ +#define I40E_FDIR_MAX_FLEX_LEN 16 /* len in bytes of flex payload */ +#define I40E_INSET_MASK_NUM_REG 2 /* number of input set mask registers */ + +/* i40e flags */ +#define I40E_FLAG_RSS (1ULL << 0) +#define I40E_FLAG_DCB (1ULL << 1) +#define I40E_FLAG_VMDQ (1ULL << 2) +#define I40E_FLAG_SRIOV (1ULL << 3) +#define I40E_FLAG_HEADER_SPLIT_DISABLED (1ULL << 4) +#define I40E_FLAG_HEADER_SPLIT_ENABLED (1ULL << 5) +#define I40E_FLAG_FDIR (1ULL << 6) +#define I40E_FLAG_VXLAN (1ULL << 7) +#define I40E_FLAG_RSS_AQ_CAPABLE (1ULL << 8) +#define I40E_FLAG_ALL (I40E_FLAG_RSS | \ + I40E_FLAG_DCB | \ + I40E_FLAG_VMDQ | \ + I40E_FLAG_SRIOV | \ + I40E_FLAG_HEADER_SPLIT_DISABLED | \ + I40E_FLAG_HEADER_SPLIT_ENABLED | \ + I40E_FLAG_FDIR | \ + I40E_FLAG_VXLAN | \ + I40E_FLAG_RSS_AQ_CAPABLE) + +#define I40E_RSS_OFFLOAD_ALL ( \ + ETH_RSS_FRAG_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_NONFRAG_IPV4_SCTP | \ + ETH_RSS_NONFRAG_IPV4_OTHER | \ + ETH_RSS_FRAG_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + ETH_RSS_NONFRAG_IPV6_SCTP | \ + ETH_RSS_NONFRAG_IPV6_OTHER | \ + ETH_RSS_L2_PAYLOAD) + +/* All bits of RSS hash enable */ +#define I40E_RSS_HENA_ALL ( \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ + (1ULL << I40E_FILTER_PCTYPE_FCOE_OX) | \ + (1ULL << I40E_FILTER_PCTYPE_FCOE_RX) | \ + (1ULL << I40E_FILTER_PCTYPE_FCOE_OTHER) | \ + (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD)) + +#define I40E_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define I40E_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + +/* Default queue interrupt throttling time in microseconds */ +#define I40E_ITR_INDEX_DEFAULT 0 +#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ +#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ + +struct i40e_adapter; + +/** + * MAC filter structure + */ +struct i40e_mac_filter_info { + enum rte_mac_filter_type filter_type; + struct ether_addr mac_addr; +}; + +TAILQ_HEAD(i40e_mac_filter_list, i40e_mac_filter); + +/* MAC filter list structure */ +struct i40e_mac_filter { + TAILQ_ENTRY(i40e_mac_filter) next; + struct i40e_mac_filter_info mac_info; +}; + +TAILQ_HEAD(i40e_vsi_list_head, i40e_vsi_list); + +struct i40e_vsi; + +/* VSI list structure */ +struct i40e_vsi_list { + TAILQ_ENTRY(i40e_vsi_list) list; + struct i40e_vsi *vsi; +}; + +struct i40e_rx_queue; +struct i40e_tx_queue; + +/* Bandwidth limit information */ +struct i40e_bw_info { + uint16_t bw_limit; /* BW Limit (0 = disabled) */ + uint8_t bw_max; /* Max BW limit if enabled */ + + /* Relative credits within same TC with respect to other VSIs or Comps */ + uint8_t bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS]; + /* Bandwidth limit per TC */ + uint8_t bw_ets_credits[I40E_MAX_TRAFFIC_CLASS]; + /* Max bandwidth limit per TC */ + uint8_t bw_ets_max[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* Structure that defines a VEB */ +struct i40e_veb { + struct i40e_vsi_list_head head; + struct i40e_vsi *associate_vsi; /* Associate VSI who owns the VEB */ + uint16_t seid; /* The seid of VEB itself */ + uint16_t uplink_seid; /* The uplink seid of this VEB */ + uint16_t stats_idx; + struct i40e_eth_stats stats; + uint8_t enabled_tc; /* The traffic class enabled */ + struct i40e_bw_info bw_info; /* VEB bandwidth information */ +}; + +/* i40e MACVLAN filter structure */ +struct i40e_macvlan_filter { + struct ether_addr macaddr; + enum rte_mac_filter_type filter_type; + uint16_t vlan_id; +}; + +/* + * Structure that defines a VSI, associated with a adapter. + */ +struct i40e_vsi { + struct i40e_adapter *adapter; /* Backreference to associated adapter */ + struct i40e_aqc_vsi_properties_data info; /* VSI properties */ + + struct i40e_eth_stats eth_stats_offset; + struct i40e_eth_stats eth_stats; + /* + * When drivers loaded, only a default main VSI exists. In case new VSI + * needs to add, HW needs to know the layout that VSIs are organized. + * Besides that, VSI isan element and can't switch packets, which needs + * to add new component VEB to perform switching. So, a new VSI needs + * to specify the the uplink VSI (Parent VSI) before created. The + * uplink VSI will check whether it had a VEB to switch packets. If no, + * it will try to create one. Then, uplink VSI will move the new VSI + * into its' sib_vsi_list to manage all the downlink VSI. + * sib_vsi_list: the VSI list that shared the same uplink VSI. + * parent_vsi : the uplink VSI. It's NULL for main VSI. + * veb : the VEB associates with the VSI. + */ + struct i40e_vsi_list sib_vsi_list; /* sibling vsi list */ + struct i40e_vsi *parent_vsi; + struct i40e_veb *veb; /* Associated veb, could be null */ + bool offset_loaded; + enum i40e_vsi_type type; /* VSI types */ + uint16_t vlan_num; /* Total VLAN number */ + uint16_t mac_num; /* Total mac number */ + uint32_t vfta[I40E_VFTA_SIZE]; /* VLAN bitmap */ + struct i40e_mac_filter_list mac_list; /* macvlan filter list */ + /* specific VSI-defined parameters, SRIOV stored the vf_id */ + uint32_t user_param; + uint16_t seid; /* The seid of VSI itself */ + uint16_t uplink_seid; /* The uplink seid of this VSI */ + uint16_t nb_qps; /* Number of queue pairs VSI can occupy */ + uint16_t nb_used_qps; /* Number of queue pairs VSI uses */ + uint16_t max_macaddrs; /* Maximum number of MAC addresses */ + uint16_t base_queue; /* The first queue index of this VSI */ + /* + * The offset to visit VSI related register, assigned by HW when + * creating VSI + */ + uint16_t vsi_id; + uint16_t msix_intr; /* The MSIX interrupt binds to VSI */ + uint16_t nb_msix; /* The max number of msix vector */ + uint8_t enabled_tc; /* The traffic class enabled */ + struct i40e_bw_info bw_info; /* VSI bandwidth information */ +}; + +struct pool_entry { + LIST_ENTRY(pool_entry) next; + uint16_t base; + uint16_t len; +}; + +LIST_HEAD(res_list, pool_entry); + +struct i40e_res_pool_info { + uint32_t base; /* Resource start index */ + uint32_t num_alloc; /* Allocated resource number */ + uint32_t num_free; /* Total available resource number */ + struct res_list alloc_list; /* Allocated resource list */ + struct res_list free_list; /* Available resource list */ +}; + +enum I40E_VF_STATE { + I40E_VF_INACTIVE = 0, + I40E_VF_INRESET, + I40E_VF_ININIT, + I40E_VF_ACTIVE, +}; + +/* + * Structure to store private data for PF host. + */ +struct i40e_pf_vf { + struct i40e_pf *pf; + struct i40e_vsi *vsi; + enum I40E_VF_STATE state; /* The number of queue pairs availiable */ + uint16_t vf_idx; /* VF index in pf->vfs */ + uint16_t lan_nb_qps; /* Actual queues allocated */ + uint16_t reset_cnt; /* Total vf reset times */ + struct ether_addr mac_addr; /* Default MAC address */ +}; + +/* + * Structure to store private data for flow control. + */ +struct i40e_fc_conf { + uint16_t pause_time; /* Flow control pause timer */ + /* FC high water 0-7 for pfc and 8 for lfc unit:kilobytes */ + uint32_t high_water[I40E_MAX_TRAFFIC_CLASS + 1]; + /* FC low water 0-7 for pfc and 8 for lfc unit:kilobytes */ + uint32_t low_water[I40E_MAX_TRAFFIC_CLASS + 1]; +}; + +/* + * Structure to store private data for VMDQ instance + */ +struct i40e_vmdq_info { + struct i40e_pf *pf; + struct i40e_vsi *vsi; +}; + +/* + * Structure to store flex pit for flow diretor. + */ +struct i40e_fdir_flex_pit { + uint8_t src_offset; /* offset in words from the beginning of payload */ + uint8_t size; /* size in words */ + uint8_t dst_offset; /* offset in words of flexible payload */ +}; + +struct i40e_fdir_flex_mask { + uint8_t word_mask; /**< Bit i enables word i of flexible payload */ + struct { + uint8_t offset; + uint16_t mask; + } bitmask[I40E_FDIR_BITMASK_NUM_WORD]; +}; + +#define I40E_FILTER_PCTYPE_MAX 64 +/* + * A structure used to define fields of a FDIR related info. + */ +struct i40e_fdir_info { + struct i40e_vsi *fdir_vsi; /* pointer to fdir VSI structure */ + uint16_t match_counter_index; /* Statistic counter index used for fdir*/ + struct i40e_tx_queue *txq; + struct i40e_rx_queue *rxq; + void *prg_pkt; /* memory for fdir program packet */ + uint64_t dma_addr; /* physic address of packet memory*/ + /* input set bits for each pctype */ + uint64_t input_set[I40E_FILTER_PCTYPE_MAX]; + /* + * the rule how bytes stream is extracted as flexible payload + * for each payload layer, the setting can up to three elements + */ + struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED]; + struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX]; +}; + +#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64 +#define I40E_MAX_MIRROR_RULES 64 +/* + * Mirror rule structure + */ +struct i40e_mirror_rule { + TAILQ_ENTRY(i40e_mirror_rule) rules; + uint8_t rule_type; + uint16_t index; /* the sw index of mirror rule */ + uint16_t id; /* the rule id assigned by firmware */ + uint16_t dst_vsi_seid; /* destination vsi for this mirror rule. */ + uint16_t num_entries; + /* the info stores depend on the rule type. + If type is I40E_MIRROR_TYPE_VLAN, vlan ids are stored here. + If type is I40E_MIRROR_TYPE_VPORT_*, vsi's seid are stored. + */ + uint16_t entries[I40E_MIRROR_MAX_ENTRIES_PER_RULE]; +}; + +TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule); + +/* + * Structure to store private data specific for PF instance. + */ +struct i40e_pf { + struct i40e_adapter *adapter; /* The adapter this PF associate to */ + struct i40e_vsi *main_vsi; /* pointer to main VSI structure */ + uint16_t mac_seid; /* The seid of the MAC of this PF */ + uint16_t main_vsi_seid; /* The seid of the main VSI */ + uint16_t max_num_vsi; + struct i40e_res_pool_info qp_pool; /*Queue pair pool */ + struct i40e_res_pool_info msix_pool; /* MSIX interrupt pool */ + + struct i40e_hw_port_stats stats_offset; + struct i40e_hw_port_stats stats; + bool offset_loaded; + + struct rte_eth_dev_data *dev_data; /* Pointer to the device data */ + struct ether_addr dev_addr; /* PF device mac address */ + uint64_t flags; /* PF feature flags */ + /* All kinds of queue pair setting for different VSIs */ + struct i40e_pf_vf *vfs; + uint16_t vf_num; + /* Each of below queue pairs should be power of 2 since it's the + precondition after TC configuration applied */ + uint16_t lan_nb_qp_max; + uint16_t lan_nb_qps; /* The number of queue pairs of LAN */ + uint16_t lan_qp_offset; + uint16_t vmdq_nb_qp_max; + uint16_t vmdq_nb_qps; /* The number of queue pairs of VMDq */ + uint16_t vmdq_qp_offset; + uint16_t vf_nb_qp_max; + uint16_t vf_nb_qps; /* The number of queue pairs of VF */ + uint16_t vf_qp_offset; + uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ + uint16_t fdir_qp_offset; + + uint16_t hash_lut_size; /* The size of hash lookup table */ + /* input set bits for each pctype */ + uint64_t hash_input_set[I40E_FILTER_PCTYPE_MAX]; + /* store VXLAN UDP ports */ + uint16_t vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; + uint16_t vxlan_bitmap; /* Vxlan bit mask */ + + /* VMDQ related info */ + uint16_t max_nb_vmdq_vsi; /* Max number of VMDQ VSIs supported */ + uint16_t nb_cfg_vmdq_vsi; /* number of VMDQ VSIs configured */ + struct i40e_vmdq_info *vmdq; + + struct i40e_fdir_info fdir; /* flow director info */ + struct i40e_fc_conf fc_conf; /* Flow control conf */ + struct i40e_mirror_rule_list mirror_list; + uint16_t nb_mirror_rule; /* The number of mirror rules */ +}; + +enum pending_msg { + PFMSG_LINK_CHANGE = 0x1, + PFMSG_RESET_IMPENDING = 0x2, + PFMSG_DRIVER_CLOSE = 0x4, +}; + +struct i40e_vsi_vlan_pvid_info { + uint16_t on; /* Enable or disable pvid */ + union { + uint16_t pvid; /* Valid in case 'on' is set to set pvid */ + struct { + /* Valid in case 'on' is cleared. 'tagged' will reject tagged packets, + * while 'untagged' will reject untagged packets. + */ + uint8_t tagged; + uint8_t untagged; + } reject; + } config; +}; + +struct i40e_vf_rx_queues { + uint64_t rx_dma_addr; + uint32_t rx_ring_len; + uint32_t buff_size; +}; + +struct i40e_vf_tx_queues { + uint64_t tx_dma_addr; + uint32_t tx_ring_len; +}; + +/* + * Structure to store private data specific for VF instance. + */ +struct i40e_vf { + struct i40e_adapter *adapter; /* The adapter this VF associate to */ + struct rte_eth_dev_data *dev_data; /* Pointer to the device data */ + uint16_t num_queue_pairs; + uint16_t max_pkt_len; /* Maximum packet length */ + bool promisc_unicast_enabled; + bool promisc_multicast_enabled; + + uint32_t version_major; /* Major version number */ + uint32_t version_minor; /* Minor version number */ + uint16_t promisc_flags; /* Promiscuous setting */ + uint32_t vlan[I40E_VFTA_SIZE]; /* VLAN bit map */ + + /* Event from pf */ + bool dev_closed; + bool link_up; + enum i40e_aq_link_speed link_speed; + bool vf_reset; + volatile uint32_t pend_cmd; /* pending command not finished yet */ + uint32_t cmd_retval; /* return value of the cmd response from PF */ + u16 pend_msg; /* flags indicates events from pf not handled yet */ + uint8_t *aq_resp; /* buffer to store the adminq response from PF */ + + /* VSI info */ + struct i40e_virtchnl_vf_resource *vf_res; /* All VSIs */ + struct i40e_virtchnl_vsi_resource *vsi_res; /* LAN VSI */ + struct i40e_vsi vsi; + uint64_t flags; +}; + +/* + * Structure to store private data for each PF/VF instance. + */ +struct i40e_adapter { + /* Common for both PF and VF */ + struct i40e_hw hw; + struct rte_eth_dev *eth_dev; + + /* Specific for PF or VF */ + union { + struct i40e_pf pf; + struct i40e_vf vf; + }; + + /* For vector PMD */ + bool rx_bulk_alloc_allowed; + bool rx_vec_allowed; + bool tx_simple_allowed; + bool tx_vec_allowed; + + /* For PTP */ + struct rte_timecounter systime_tc; + struct rte_timecounter rx_tstamp_tc; + struct rte_timecounter tx_tstamp_tc; +}; + +int i40e_dev_switch_queues(struct i40e_pf *pf, bool on); +int i40e_vsi_release(struct i40e_vsi *vsi); +struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, + enum i40e_vsi_type type, + struct i40e_vsi *uplink_vsi, + uint16_t user_param); +int i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on); +int i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on); +int i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan); +int i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan); +int i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *filter); +int i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr); +void i40e_update_vsi_stats(struct i40e_vsi *vsi); +void i40e_pf_disable_irq0(struct i40e_hw *hw); +void i40e_pf_enable_irq0(struct i40e_hw *hw); +int i40e_dev_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete); +void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi); +void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi); +int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi, + struct i40e_vsi_vlan_pvid_info *info); +int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on); +int i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on); +uint64_t i40e_config_hena(uint64_t flags); +uint64_t i40e_parse_hena(uint64_t flags); +enum i40e_status_code i40e_fdir_setup_tx_resources(struct i40e_pf *pf); +enum i40e_status_code i40e_fdir_setup_rx_resources(struct i40e_pf *pf); +int i40e_fdir_setup(struct i40e_pf *pf); +const struct rte_memzone *i40e_memzone_reserve(const char *name, + uint32_t len, + int socket_id); +int i40e_fdir_configure(struct rte_eth_dev *dev); +void i40e_fdir_teardown(struct i40e_pf *pf); +enum i40e_filter_pctype i40e_flowtype_to_pctype(uint16_t flow_type); +uint16_t i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype); +int i40e_fdir_ctrl_func(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +int i40e_select_filter_input_set(struct i40e_hw *hw, + struct rte_eth_input_set_conf *conf, + enum rte_filter_type filter); +int i40e_hash_filter_inset_select(struct i40e_hw *hw, + struct rte_eth_input_set_conf *conf); +int i40e_fdir_filter_inset_select(struct i40e_pf *pf, + struct rte_eth_input_set_conf *conf); + +void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); +void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + +/* I40E_DEV_PRIVATE_TO */ +#define I40E_DEV_PRIVATE_TO_PF(adapter) \ + (&((struct i40e_adapter *)adapter)->pf) +#define I40E_DEV_PRIVATE_TO_HW(adapter) \ + (&((struct i40e_adapter *)adapter)->hw) +#define I40E_DEV_PRIVATE_TO_ADAPTER(adapter) \ + ((struct i40e_adapter *)adapter) + +/* I40EVF_DEV_PRIVATE_TO */ +#define I40EVF_DEV_PRIVATE_TO_VF(adapter) \ + (&((struct i40e_adapter *)adapter)->vf) + +static inline struct i40e_vsi * +i40e_get_vsi_from_adapter(struct i40e_adapter *adapter) +{ + struct i40e_hw *hw; + + if (!adapter) + return NULL; + + hw = I40E_DEV_PRIVATE_TO_HW(adapter); + if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(adapter); + return &vf->vsi; + } else { + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(adapter); + return pf->main_vsi; + } +} +#define I40E_DEV_PRIVATE_TO_MAIN_VSI(adapter) \ + i40e_get_vsi_from_adapter((struct i40e_adapter *)adapter) + +/* I40E_VSI_TO */ +#define I40E_VSI_TO_HW(vsi) \ + (&(((struct i40e_vsi *)vsi)->adapter->hw)) +#define I40E_VSI_TO_PF(vsi) \ + (&(((struct i40e_vsi *)vsi)->adapter->pf)) +#define I40E_VSI_TO_VF(vsi) \ + (&(((struct i40e_vsi *)vsi)->adapter->vf)) +#define I40E_VSI_TO_DEV_DATA(vsi) \ + (((struct i40e_vsi *)vsi)->adapter->pf.dev_data) +#define I40E_VSI_TO_ETH_DEV(vsi) \ + (((struct i40e_vsi *)vsi)->adapter->eth_dev) + +/* I40E_PF_TO */ +#define I40E_PF_TO_HW(pf) \ + (&(((struct i40e_pf *)pf)->adapter->hw)) +#define I40E_PF_TO_ADAPTER(pf) \ + ((struct i40e_adapter *)pf->adapter) + +/* I40E_VF_TO */ +#define I40E_VF_TO_HW(vf) \ + (&(((struct i40e_vf *)vf)->adapter->hw)) + +static inline void +i40e_init_adminq_parameter(struct i40e_hw *hw) +{ + hw->aq.num_arq_entries = I40E_AQ_LEN; + hw->aq.num_asq_entries = I40E_AQ_LEN; + hw->aq.arq_buf_size = I40E_AQ_BUF_SZ; + hw->aq.asq_buf_size = I40E_AQ_BUF_SZ; +} + +static inline int +i40e_align_floor(int n) +{ + if (n == 0) + return 0; + return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)); +} + +static inline uint16_t +i40e_calc_itr_interval(int16_t interval) +{ + if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) + interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; + + /* Convert to hardware count, as writing each 1 represents 2 us */ + return interval / 2; +} + +#define I40E_VALID_FLOW(flow_type) \ + ((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER || \ + (flow_type) == RTE_ETH_FLOW_FRAG_IPV6 || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP || \ + (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER || \ + (flow_type) == RTE_ETH_FLOW_L2_PAYLOAD) + +#define I40E_VALID_PCTYPE(pctype) \ + ((pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || \ + (pctype) == I40E_FILTER_PCTYPE_FRAG_IPV6 || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \ + (pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD) + +#endif /* _I40E_ETHDEV_H_ */ diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c new file mode 100644 index 00000000..2bce69b3 --- /dev/null +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -0,0 +1,2635 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <stdarg.h> +#include <inttypes.h> +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_cycles.h> + +#include <rte_interrupts.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_pci.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_eal.h> +#include <rte_alarm.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_atomic.h> +#include <rte_malloc.h> +#include <rte_dev.h> + +#include "i40e_logs.h" +#include "base/i40e_prototype.h" +#include "base/i40e_adminq_cmd.h" +#include "base/i40e_type.h" + +#include "i40e_rxtx.h" +#include "i40e_ethdev.h" +#include "i40e_pf.h" +#define I40EVF_VSI_DEFAULT_MSIX_INTR 1 +#define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0 + +/* busy wait delay in msec */ +#define I40EVF_BUSY_WAIT_DELAY 10 +#define I40EVF_BUSY_WAIT_COUNT 50 +#define MAX_RESET_WAIT_CNT 20 + +struct i40evf_arq_msg_info { + enum i40e_virtchnl_ops ops; + enum i40e_status_code result; + uint16_t buf_len; + uint16_t msg_len; + uint8_t *msg; +}; + +struct vf_cmd_info { + enum i40e_virtchnl_ops ops; + uint8_t *in_args; + uint32_t in_args_size; + uint8_t *out_buffer; + /* Input & output type. pass in buffer size and pass out + * actual return result + */ + uint32_t out_size; +}; + +enum i40evf_aq_result { + I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */ + I40EVF_MSG_NON, /* Read nothing from admin queue */ + I40EVF_MSG_SYS, /* Read system msg from admin queue */ + I40EVF_MSG_CMD, /* Read async command result */ +}; + +static int i40evf_dev_configure(struct rte_eth_dev *dev); +static int i40evf_dev_start(struct rte_eth_dev *dev); +static void i40evf_dev_stop(struct rte_eth_dev *dev); +static void i40evf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int i40evf_dev_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete); +static void i40evf_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstats *xstats, unsigned n); +static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev); +static int i40evf_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, + int on); +static void i40evf_dev_close(struct rte_eth_dev *dev); +static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev); +static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev); +static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev); +static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int i40evf_get_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link); +static int i40evf_init_vlan(struct rte_eth_dev *dev); +static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, + uint16_t rx_queue_id); +static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, + uint16_t rx_queue_id); +static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, + uint16_t tx_queue_id); +static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, + uint16_t tx_queue_id); +static void i40evf_add_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *addr, + uint32_t index, + uint32_t pool); +static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index); +static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int i40evf_config_rss(struct i40e_vf *vf); +static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +static int +i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); +static int +i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); +static void i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev, + uint8_t *msg, + uint16_t msglen); + +/* Default hash key buffer for RSS */ +static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1]; + +struct rte_i40evf_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned offset; +}; + +static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = { + {"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)}, + {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)}, + {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)}, + {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)}, + {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)}, + {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats, + rx_unknown_protocol)}, + {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)}, + {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, + {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, + {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, + {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, + {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, +}; + +#define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \ + sizeof(rte_i40evf_stats_strings[0])) + +static const struct eth_dev_ops i40evf_eth_dev_ops = { + .dev_configure = i40evf_dev_configure, + .dev_start = i40evf_dev_start, + .dev_stop = i40evf_dev_stop, + .promiscuous_enable = i40evf_dev_promiscuous_enable, + .promiscuous_disable = i40evf_dev_promiscuous_disable, + .allmulticast_enable = i40evf_dev_allmulticast_enable, + .allmulticast_disable = i40evf_dev_allmulticast_disable, + .link_update = i40evf_dev_link_update, + .stats_get = i40evf_dev_stats_get, + .xstats_get = i40evf_dev_xstats_get, + .xstats_reset = i40evf_dev_xstats_reset, + .dev_close = i40evf_dev_close, + .dev_infos_get = i40evf_dev_info_get, + .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get, + .vlan_filter_set = i40evf_vlan_filter_set, + .vlan_offload_set = i40evf_vlan_offload_set, + .vlan_pvid_set = i40evf_vlan_pvid_set, + .rx_queue_start = i40evf_dev_rx_queue_start, + .rx_queue_stop = i40evf_dev_rx_queue_stop, + .tx_queue_start = i40evf_dev_tx_queue_start, + .tx_queue_stop = i40evf_dev_tx_queue_stop, + .rx_queue_setup = i40e_dev_rx_queue_setup, + .rx_queue_release = i40e_dev_rx_queue_release, + .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable, + .rx_descriptor_done = i40e_dev_rx_descriptor_done, + .tx_queue_setup = i40e_dev_tx_queue_setup, + .tx_queue_release = i40e_dev_tx_queue_release, + .mac_addr_add = i40evf_add_mac_addr, + .mac_addr_remove = i40evf_del_mac_addr, + .reta_update = i40evf_dev_rss_reta_update, + .reta_query = i40evf_dev_rss_reta_query, + .rss_hash_update = i40evf_dev_rss_hash_update, + .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get, +}; + +/* + * Read data in admin queue to get msg from pf driver + */ +static enum i40evf_aq_result +i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_arq_event_info event; + enum i40e_virtchnl_ops opcode; + enum i40e_status_code retval; + int ret; + enum i40evf_aq_result result = I40EVF_MSG_NON; + + event.buf_len = data->buf_len; + event.msg_buf = data->msg; + ret = i40e_clean_arq_element(hw, &event, NULL); + /* Can't read any msg from adminQ */ + if (ret) { + if (ret != I40E_ERR_ADMIN_QUEUE_NO_WORK) + result = I40EVF_MSG_ERR; + return result; + } + + opcode = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high); + retval = (enum i40e_status_code)rte_le_to_cpu_32(event.desc.cookie_low); + /* pf sys event */ + if (opcode == I40E_VIRTCHNL_OP_EVENT) { + struct i40e_virtchnl_pf_event *vpe = + (struct i40e_virtchnl_pf_event *)event.msg_buf; + + result = I40EVF_MSG_SYS; + switch (vpe->event) { + case I40E_VIRTCHNL_EVENT_LINK_CHANGE: + vf->link_up = + vpe->event_data.link_event.link_status; + vf->link_speed = + vpe->event_data.link_event.link_speed; + vf->pend_msg |= PFMSG_LINK_CHANGE; + PMD_DRV_LOG(INFO, "Link status update:%s", + vf->link_up ? "up" : "down"); + break; + case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: + vf->vf_reset = true; + vf->pend_msg |= PFMSG_RESET_IMPENDING; + PMD_DRV_LOG(INFO, "vf is reseting"); + break; + case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE: + vf->dev_closed = true; + vf->pend_msg |= PFMSG_DRIVER_CLOSE; + PMD_DRV_LOG(INFO, "PF driver closed"); + break; + default: + PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf", + __func__, vpe->event); + } + } else { + /* async reply msg on command issued by vf previously */ + result = I40EVF_MSG_CMD; + /* Actual data length read from PF */ + data->msg_len = event.msg_len; + } + + data->result = retval; + data->ops = opcode; + + return result; +} + +/** + * clear current command. Only call in case execute + * _atomic_set_cmd successfully. + */ +static inline void +_clear_cmd(struct i40e_vf *vf) +{ + rte_wmb(); + vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN; +} + +/* + * Check there is pending cmd in execution. If none, set new command. + */ +static inline int +_atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops) +{ + int ret = rte_atomic32_cmpset(&vf->pend_cmd, + I40E_VIRTCHNL_OP_UNKNOWN, ops); + + if (!ret) + PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd); + + return !ret; +} + +#define MAX_TRY_TIMES 200 +#define ASQ_DELAY_MS 10 + +static int +i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40evf_arq_msg_info info; + enum i40evf_aq_result ret; + int err = -1; + int i = 0; + + if (_atomic_set_cmd(vf, args->ops)) + return -1; + + info.msg = args->out_buffer; + info.buf_len = args->out_size; + info.ops = I40E_VIRTCHNL_OP_UNKNOWN; + info.result = I40E_SUCCESS; + + err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS, + args->in_args, args->in_args_size, NULL); + if (err) { + PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops); + _clear_cmd(vf); + return err; + } + + switch (args->ops) { + case I40E_VIRTCHNL_OP_RESET_VF: + /*no need to process in this function */ + break; + case I40E_VIRTCHNL_OP_VERSION: + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + /* for init adminq commands, need to poll the response */ + do { + ret = i40evf_read_pfmsg(dev, &info); + if (ret == I40EVF_MSG_CMD) { + err = 0; + break; + } else if (ret == I40EVF_MSG_ERR) { + err = -1; + break; + } + rte_delay_ms(ASQ_DELAY_MS); + /* If don't read msg or read sys event, continue */ + } while (i++ < MAX_TRY_TIMES); + _clear_cmd(vf); + break; + + default: + /* for other adminq in running time, waiting the cmd done flag */ + do { + if (vf->pend_cmd == I40E_VIRTCHNL_OP_UNKNOWN) { + err = 0; + break; + } + rte_delay_ms(ASQ_DELAY_MS); + /* If don't read msg or read sys event, continue */ + } while (i++ < MAX_TRY_TIMES); + break; + } + + return err | vf->cmd_retval; +} + +/* + * Check API version with sync wait until version read or fail from admin queue + */ +static int +i40evf_check_api_version(struct rte_eth_dev *dev) +{ + struct i40e_virtchnl_version_info version, *pver; + int err; + struct vf_cmd_info args; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + version.major = I40E_VIRTCHNL_VERSION_MAJOR; + version.minor = I40E_VIRTCHNL_VERSION_MINOR; + + args.ops = I40E_VIRTCHNL_OP_VERSION; + args.in_args = (uint8_t *)&version; + args.in_args_size = sizeof(version); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + + err = i40evf_execute_vf_cmd(dev, &args); + if (err) { + PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION"); + return err; + } + + pver = (struct i40e_virtchnl_version_info *)args.out_buffer; + vf->version_major = pver->major; + vf->version_minor = pver->minor; + if (vf->version_major == I40E_DPDK_VERSION_MAJOR) + PMD_DRV_LOG(INFO, "Peer is DPDK PF host"); + else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && + (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR)) + PMD_DRV_LOG(INFO, "Peer is Linux PF host"); + else { + PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)", + vf->version_major, vf->version_minor, + I40E_VIRTCHNL_VERSION_MAJOR, + I40E_VIRTCHNL_VERSION_MINOR); + return -1; + } + + return 0; +} + +static int +i40evf_get_vf_resource(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int err; + struct vf_cmd_info args; + uint32_t caps, len; + + args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + if (PF_IS_V11(vf)) { + caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | + I40E_VIRTCHNL_VF_OFFLOAD_VLAN | + I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; + args.in_args = (uint8_t *)∩︀ + args.in_args_size = sizeof(caps); + } else { + args.in_args = NULL; + args.in_args_size = 0; + } + err = i40evf_execute_vf_cmd(dev, &args); + + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE"); + return err; + } + + len = sizeof(struct i40e_virtchnl_vf_resource) + + I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); + + (void)rte_memcpy(vf->vf_res, args.out_buffer, + RTE_MIN(args.out_size, len)); + i40e_vf_parse_hw_config(hw, vf->vf_res); + + return 0; +} + +static int +i40evf_config_promisc(struct rte_eth_dev *dev, + bool enable_unicast, + bool enable_multicast) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int err; + struct vf_cmd_info args; + struct i40e_virtchnl_promisc_info promisc; + + promisc.flags = 0; + promisc.vsi_id = vf->vsi_res->vsi_id; + + if (enable_unicast) + promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC; + + if (enable_multicast) + promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC; + + args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; + args.in_args = (uint8_t *)&promisc; + args.in_args_size = sizeof(promisc); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + + err = i40evf_execute_vf_cmd(dev, &args); + + if (err) + PMD_DRV_LOG(ERR, "fail to execute command " + "CONFIG_PROMISCUOUS_MODE"); + return err; +} + +/* Configure vlan and double vlan offload. Use flag to specify which part to configure */ +static int +i40evf_config_vlan_offload(struct rte_eth_dev *dev, + bool enable_vlan_strip) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int err; + struct vf_cmd_info args; + struct i40e_virtchnl_vlan_offload_info offload; + + offload.vsi_id = vf->vsi_res->vsi_id; + offload.enable_vlan_strip = enable_vlan_strip; + + args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD; + args.in_args = (uint8_t *)&offload; + args.in_args_size = sizeof(offload); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD"); + + return err; +} + +static int +i40evf_config_vlan_pvid(struct rte_eth_dev *dev, + struct i40e_vsi_vlan_pvid_info *info) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int err; + struct vf_cmd_info args; + struct i40e_virtchnl_pvid_info tpid_info; + + if (dev == NULL || info == NULL) { + PMD_DRV_LOG(ERR, "invalid parameters"); + return I40E_ERR_PARAM; + } + + memset(&tpid_info, 0, sizeof(tpid_info)); + tpid_info.vsi_id = vf->vsi_res->vsi_id; + (void)rte_memcpy(&tpid_info.info, info, sizeof(*info)); + + args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID; + args.in_args = (uint8_t *)&tpid_info; + args.in_args_size = sizeof(tpid_info); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID"); + + return err; +} + +static void +i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info, + uint16_t vsi_id, + uint16_t queue_id, + uint16_t nb_txq, + struct i40e_tx_queue *txq) +{ + txq_info->vsi_id = vsi_id; + txq_info->queue_id = queue_id; + if (queue_id < nb_txq) { + txq_info->ring_len = txq->nb_tx_desc; + txq_info->dma_ring_addr = txq->tx_ring_phys_addr; + } +} + +static void +i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info, + uint16_t vsi_id, + uint16_t queue_id, + uint16_t nb_rxq, + uint32_t max_pkt_size, + struct i40e_rx_queue *rxq) +{ + rxq_info->vsi_id = vsi_id; + rxq_info->queue_id = queue_id; + rxq_info->max_pkt_size = max_pkt_size; + if (queue_id < nb_rxq) { + rxq_info->ring_len = rxq->nb_rx_desc; + rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr; + rxq_info->databuffer_size = + (rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + } +} + +/* It configures VSI queues to co-work with Linux PF host */ +static int +i40evf_configure_vsi_queues(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_rx_queue **rxq = + (struct i40e_rx_queue **)dev->data->rx_queues; + struct i40e_tx_queue **txq = + (struct i40e_tx_queue **)dev->data->tx_queues; + struct i40e_virtchnl_vsi_queue_config_info *vc_vqci; + struct i40e_virtchnl_queue_pair_info *vc_qpi; + struct vf_cmd_info args; + uint16_t i, nb_qp = vf->num_queue_pairs; + const uint32_t size = + I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp); + uint8_t buff[size]; + int ret; + + memset(buff, 0, sizeof(buff)); + vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff; + vc_vqci->vsi_id = vf->vsi_res->vsi_id; + vc_vqci->num_queue_pairs = nb_qp; + + for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) { + i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq, + vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]); + i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq, + vc_vqci->vsi_id, i, dev->data->nb_rx_queues, + vf->max_pkt_len, rxq[i]); + } + memset(&args, 0, sizeof(args)); + args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; + args.in_args = (uint8_t *)vc_vqci; + args.in_args_size = size; + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + ret = i40evf_execute_vf_cmd(dev, &args); + if (ret) + PMD_DRV_LOG(ERR, "Failed to execute command of " + "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n"); + + return ret; +} + +/* It configures VSI queues to co-work with DPDK PF host */ +static int +i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_rx_queue **rxq = + (struct i40e_rx_queue **)dev->data->rx_queues; + struct i40e_tx_queue **txq = + (struct i40e_tx_queue **)dev->data->tx_queues; + struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei; + struct i40e_virtchnl_queue_pair_ext_info *vc_qpei; + struct vf_cmd_info args; + uint16_t i, nb_qp = vf->num_queue_pairs; + const uint32_t size = + I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, nb_qp); + uint8_t buff[size]; + int ret; + + memset(buff, 0, sizeof(buff)); + vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff; + vc_vqcei->vsi_id = vf->vsi_res->vsi_id; + vc_vqcei->num_queue_pairs = nb_qp; + vc_qpei = vc_vqcei->qpair; + for (i = 0; i < nb_qp; i++, vc_qpei++) { + i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq, + vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]); + i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq, + vc_vqcei->vsi_id, i, dev->data->nb_rx_queues, + vf->max_pkt_len, rxq[i]); + if (i < dev->data->nb_rx_queues) + /* + * It adds extra info for configuring VSI queues, which + * is needed to enable the configurable crc stripping + * in VF. + */ + vc_qpei->rxq_ext.crcstrip = + dev->data->dev_conf.rxmode.hw_strip_crc; + } + memset(&args, 0, sizeof(args)); + args.ops = + (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT; + args.in_args = (uint8_t *)vc_vqcei; + args.in_args_size = size; + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + ret = i40evf_execute_vf_cmd(dev, &args); + if (ret) + PMD_DRV_LOG(ERR, "Failed to execute command of " + "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n"); + + return ret; +} + +static int +i40evf_configure_queues(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + if (vf->version_major == I40E_DPDK_VERSION_MAJOR) + /* To support DPDK PF host */ + return i40evf_configure_vsi_queues_ext(dev); + else + /* To support Linux PF host */ + return i40evf_configure_vsi_queues(dev); +} + +static int +i40evf_config_irq_map(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct vf_cmd_info args; + uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \ + sizeof(struct i40e_virtchnl_vector_map)]; + struct i40e_virtchnl_irq_map_info *map_info; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + uint32_t vector_id; + int i, err; + + if (rte_intr_allow_others(intr_handle)) { + if (vf->version_major == I40E_DPDK_VERSION_MAJOR) + vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR; + else + vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX; + } else { + vector_id = I40E_MISC_VEC_ID; + } + + map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer; + map_info->num_vectors = 1; + map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT; + map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id; + /* Alway use default dynamic MSIX interrupt */ + map_info->vecmap[0].vector_id = vector_id; + /* Don't map any tx queue */ + map_info->vecmap[0].txq_map = 0; + map_info->vecmap[0].rxq_map = 0; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + map_info->vecmap[0].rxq_map |= 1 << i; + if (rte_intr_dp_is_en(intr_handle)) + intr_handle->intr_vec[i] = vector_id; + } + + args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; + args.in_args = (u8 *)cmd_buffer; + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES"); + + return err; +} + +static int +i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid, + bool on) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_virtchnl_queue_select queue_select; + int err; + struct vf_cmd_info args; + memset(&queue_select, 0, sizeof(queue_select)); + queue_select.vsi_id = vf->vsi_res->vsi_id; + + if (isrx) + queue_select.rx_queues |= 1 << qid; + else + queue_select.tx_queues |= 1 << qid; + + if (on) + args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES; + else + args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES; + args.in_args = (u8 *)&queue_select; + args.in_args_size = sizeof(queue_select); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to switch %s %u %s", + isrx ? "RX" : "TX", qid, on ? "on" : "off"); + + return err; +} + +static int +i40evf_start_queues(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *dev_data = dev->data; + int i; + struct i40e_rx_queue *rxq; + struct i40e_tx_queue *txq; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev_data->rx_queues[i]; + if (rxq->rx_deferred_start) + continue; + if (i40evf_dev_rx_queue_start(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to start queue %u", i); + return -1; + } + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev_data->tx_queues[i]; + if (txq->tx_deferred_start) + continue; + if (i40evf_dev_tx_queue_start(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to start queue %u", i); + return -1; + } + } + + return 0; +} + +static int +i40evf_stop_queues(struct rte_eth_dev *dev) +{ + int i; + + /* Stop TX queues first */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (i40evf_dev_tx_queue_stop(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to stop queue %u", i); + return -1; + } + } + + /* Then stop RX queues */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (i40evf_dev_rx_queue_stop(dev, i) != 0) { + PMD_DRV_LOG(ERR, "Fail to stop queue %u", i); + return -1; + } + } + + return 0; +} + +static void +i40evf_add_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *addr, + __rte_unused uint32_t index, + __rte_unused uint32_t pool) +{ + struct i40e_virtchnl_ether_addr_list *list; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \ + sizeof(struct i40e_virtchnl_ether_addr)]; + int err; + struct vf_cmd_info args; + + if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x", + addr->addr_bytes[0], addr->addr_bytes[1], + addr->addr_bytes[2], addr->addr_bytes[3], + addr->addr_bytes[4], addr->addr_bytes[5]); + return; + } + + list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer; + list->vsi_id = vf->vsi_res->vsi_id; + list->num_elements = 1; + (void)rte_memcpy(list->list[0].addr, addr->addr_bytes, + sizeof(addr->addr_bytes)); + + args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS; + args.in_args = cmd_buffer; + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command " + "OP_ADD_ETHER_ADDRESS"); + + return; +} + +static void +i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index) +{ + struct i40e_virtchnl_ether_addr_list *list; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct rte_eth_dev_data *data = dev->data; + struct ether_addr *addr; + uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \ + sizeof(struct i40e_virtchnl_ether_addr)]; + int err; + struct vf_cmd_info args; + + addr = &(data->mac_addrs[index]); + + if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x", + addr->addr_bytes[0], addr->addr_bytes[1], + addr->addr_bytes[2], addr->addr_bytes[3], + addr->addr_bytes[4], addr->addr_bytes[5]); + return; + } + + list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer; + list->vsi_id = vf->vsi_res->vsi_id; + list->num_elements = 1; + (void)rte_memcpy(list->list[0].addr, addr->addr_bytes, + sizeof(addr->addr_bytes)); + + args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS; + args.in_args = cmd_buffer; + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command " + "OP_DEL_ETHER_ADDRESS"); + return; +} + +static int +i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_virtchnl_queue_select q_stats; + int err; + struct vf_cmd_info args; + + memset(&q_stats, 0, sizeof(q_stats)); + q_stats.vsi_id = vf->vsi_res->vsi_id; + args.ops = I40E_VIRTCHNL_OP_GET_STATS; + args.in_args = (u8 *)&q_stats; + args.in_args_size = sizeof(q_stats); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + + err = i40evf_execute_vf_cmd(dev, &args); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS"); + *pstats = NULL; + return err; + } + *pstats = (struct i40e_eth_stats *)args.out_buffer; + return 0; +} + +static int +i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + int ret; + struct i40e_eth_stats *pstats = NULL; + + ret = i40evf_update_stats(dev, &pstats); + if (ret != 0) + return 0; + + stats->ipackets = pstats->rx_unicast + pstats->rx_multicast + + pstats->rx_broadcast; + stats->opackets = pstats->tx_broadcast + pstats->tx_multicast + + pstats->tx_unicast; + stats->ierrors = pstats->rx_discards; + stats->oerrors = pstats->tx_errors + pstats->tx_discards; + stats->ibytes = pstats->rx_bytes; + stats->obytes = pstats->tx_bytes; + + return 0; +} + +static void +i40evf_dev_xstats_reset(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_eth_stats *pstats = NULL; + + /* read stat values to clear hardware registers */ + i40evf_update_stats(dev, &pstats); + + /* set stats offset base on current values */ + vf->vsi.eth_stats_offset = vf->vsi.eth_stats; +} + +static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstats *xstats, unsigned n) +{ + int ret; + unsigned i; + struct i40e_eth_stats *pstats = NULL; + + if (n < I40EVF_NB_XSTATS) + return I40EVF_NB_XSTATS; + + ret = i40evf_update_stats(dev, &pstats); + if (ret != 0) + return 0; + + if (!xstats) + return 0; + + /* loop over xstats array and values from pstats */ + for (i = 0; i < I40EVF_NB_XSTATS; i++) { + snprintf(xstats[i].name, sizeof(xstats[i].name), + "%s", rte_i40evf_stats_strings[i].name); + xstats[i].value = *(uint64_t *)(((char *)pstats) + + rte_i40evf_stats_strings[i].offset); + } + + return I40EVF_NB_XSTATS; +} + +static int +i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_virtchnl_vlan_filter_list *vlan_list; + uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) + + sizeof(uint16_t)]; + int err; + struct vf_cmd_info args; + + vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer; + vlan_list->vsi_id = vf->vsi_res->vsi_id; + vlan_list->num_elements = 1; + vlan_list->vlan_id[0] = vlanid; + + args.ops = I40E_VIRTCHNL_OP_ADD_VLAN; + args.in_args = (u8 *)&cmd_buffer; + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN"); + + return err; +} + +static int +i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_virtchnl_vlan_filter_list *vlan_list; + uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) + + sizeof(uint16_t)]; + int err; + struct vf_cmd_info args; + + vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer; + vlan_list->vsi_id = vf->vsi_res->vsi_id; + vlan_list->num_elements = 1; + vlan_list->vlan_id[0] = vlanid; + + args.ops = I40E_VIRTCHNL_OP_DEL_VLAN; + args.in_args = (u8 *)&cmd_buffer; + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN"); + + return err; +} + +static int +i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int err; + struct vf_cmd_info args; + struct rte_eth_link *new_link; + + args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT; + args.in_args = NULL; + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT"); + return err; + } + + new_link = (struct rte_eth_link *)args.out_buffer; + (void)rte_memcpy(link, new_link, sizeof(*link)); + + return 0; +} + +static const struct rte_pci_id pci_id_i40evf_map[] = { +#define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" +{ .vendor_id = 0, /* sentinel */ }, +}; + +static inline int +i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &(dev->data->dev_link); + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/* Disable IRQ0 */ +static inline void +i40evf_disable_irq0(struct i40e_hw *hw) +{ + /* Disable all interrupt types */ + I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, 0); + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); + I40EVF_WRITE_FLUSH(hw); +} + +/* Enable IRQ0 */ +static inline void +i40evf_enable_irq0(struct i40e_hw *hw) +{ + /* Enable admin queue interrupt trigger */ + uint32_t val; + + i40evf_disable_irq0(hw); + val = I40E_READ_REG(hw, I40E_VFINT_ICR0_ENA1); + val |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK | + I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK; + I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, val); + + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_CLEARPBA_MASK | + I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); + + I40EVF_WRITE_FLUSH(hw); +} + +static int +i40evf_reset_vf(struct i40e_hw *hw) +{ + int i, reset; + + if (i40e_vf_reset(hw) != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Reset VF NIC failed"); + return -1; + } + /** + * After issuing vf reset command to pf, pf won't necessarily + * reset vf, it depends on what state it exactly is. If it's not + * initialized yet, it won't have vf reset since it's in a certain + * state. If not, it will try to reset. Even vf is reset, pf will + * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set + * it to ACTIVE. In this duration, vf may not catch the moment that + * COMPLETE is set. So, for vf, we'll try to wait a long time. + */ + rte_delay_ms(200); + + for (i = 0; i < MAX_RESET_WAIT_CNT; i++) { + reset = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT; + if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset) + break; + else + rte_delay_ms(50); + } + + if (i >= MAX_RESET_WAIT_CNT) { + PMD_INIT_LOG(ERR, "Reset VF NIC failed"); + return -1; + } + + return 0; +} + +static int +i40evf_init_vf(struct rte_eth_dev *dev) +{ + int i, err, bufsz; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct ether_addr *p_mac_addr; + uint16_t interval = + i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX); + + vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + vf->dev_data = dev->data; + err = i40e_set_mac_type(hw); + if (err) { + PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err); + goto err; + } + + i40e_init_adminq_parameter(hw); + err = i40e_init_adminq(hw); + if (err) { + PMD_INIT_LOG(ERR, "init_adminq failed: %d", err); + goto err; + } + + /* Reset VF and wait until it's complete */ + if (i40evf_reset_vf(hw)) { + PMD_INIT_LOG(ERR, "reset NIC failed"); + goto err_aq; + } + + /* VF reset, shutdown admin queue and initialize again */ + if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed"); + return -1; + } + + i40e_init_adminq_parameter(hw); + if (i40e_init_adminq(hw) != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "init_adminq failed"); + return -1; + } + vf->aq_resp = rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ, 0); + if (!vf->aq_resp) { + PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory"); + goto err_aq; + } + if (i40evf_check_api_version(dev) != 0) { + PMD_INIT_LOG(ERR, "check_api version failed"); + goto err_aq; + } + bufsz = sizeof(struct i40e_virtchnl_vf_resource) + + (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource)); + vf->vf_res = rte_zmalloc("vf_res", bufsz, 0); + if (!vf->vf_res) { + PMD_INIT_LOG(ERR, "unable to allocate vf_res memory"); + goto err_aq; + } + + if (i40evf_get_vf_resource(dev) != 0) { + PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed"); + goto err_alloc; + } + + /* got VF config message back from PF, now we can parse it */ + for (i = 0; i < vf->vf_res->num_vsis; i++) { + if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) + vf->vsi_res = &vf->vf_res->vsi_res[i]; + } + + if (!vf->vsi_res) { + PMD_INIT_LOG(ERR, "no LAN VSI found"); + goto err_alloc; + } + + if (hw->mac.type == I40E_MAC_X722_VF) + vf->flags = I40E_FLAG_RSS_AQ_CAPABLE; + vf->vsi.vsi_id = vf->vsi_res->vsi_id; + vf->vsi.type = vf->vsi_res->vsi_type; + vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs; + vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + /* Store the MAC address configured by host, or generate random one */ + p_mac_addr = (struct ether_addr *)(vf->vsi_res->default_mac_addr); + if (is_valid_assigned_ether_addr(p_mac_addr)) /* Configured by host */ + ether_addr_copy(p_mac_addr, (struct ether_addr *)hw->mac.addr); + else + eth_random_addr(hw->mac.addr); /* Generate a random one */ + + /* If the PF host is not DPDK, set the interval of ITR0 to max*/ + if (vf->version_major != I40E_DPDK_VERSION_MAJOR) { + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, + (I40E_ITR_INDEX_DEFAULT << + I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) | + (interval << + I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)); + I40EVF_WRITE_FLUSH(hw); + } + + return 0; + +err_alloc: + rte_free(vf->vf_res); +err_aq: + i40e_shutdown_adminq(hw); /* ignore error */ +err: + return -1; +} + +static int +i40evf_uninit_vf(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + if (hw->adapter_stopped == 0) + i40evf_dev_close(dev); + rte_free(vf->vf_res); + vf->vf_res = NULL; + rte_free(vf->aq_resp); + vf->aq_resp = NULL; + + return 0; +} + +static void +i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev, + uint8_t *msg, + __rte_unused uint16_t msglen) +{ + struct i40e_virtchnl_pf_event *pf_msg = + (struct i40e_virtchnl_pf_event *)msg; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + switch (pf_msg->event) { + case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: + PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event\n"); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET); + break; + case I40E_VIRTCHNL_EVENT_LINK_CHANGE: + PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event\n"); + vf->link_up = pf_msg->event_data.link_event.link_status; + vf->link_speed = pf_msg->event_data.link_event.link_speed; + break; + case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE: + PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event\n"); + break; + default: + PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event); + break; + } +} + +static void +i40evf_handle_aq_msg(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_arq_event_info info; + struct i40e_virtchnl_msg *v_msg; + uint16_t pending, opcode; + int ret; + + info.buf_len = I40E_AQ_BUF_SZ; + if (!vf->aq_resp) { + PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL"); + return; + } + info.msg_buf = vf->aq_resp; + v_msg = (struct i40e_virtchnl_msg *)&info.desc; + + pending = 1; + while (pending) { + ret = i40e_clean_arq_element(hw, &info, &pending); + + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ," + "ret: %d", ret); + break; + } + opcode = rte_le_to_cpu_16(info.desc.opcode); + + switch (opcode) { + case i40e_aqc_opc_send_msg_to_vf: + if (v_msg->v_opcode == I40E_VIRTCHNL_OP_EVENT) + /* process event*/ + i40evf_handle_pf_event(dev, info.msg_buf, + info.msg_len); + else { + /* read message and it's expected one */ + if (v_msg->v_opcode == vf->pend_cmd) { + vf->cmd_retval = v_msg->v_retval; + /* prevent compiler reordering */ + rte_compiler_barrier(); + _clear_cmd(vf); + } else + PMD_DRV_LOG(ERR, "command mismatch," + "expect %u, get %u", + vf->pend_cmd, v_msg->v_opcode); + PMD_DRV_LOG(DEBUG, "adminq response is received," + " opcode = %d\n", v_msg->v_opcode); + } + break; + default: + PMD_DRV_LOG(ERR, "Request %u is not supported yet", + opcode); + break; + } + } +} + +/** + * Interrupt handler triggered by NIC for handling + * specific interrupt. Only adminq interrupt is processed in VF. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t icr0; + + i40evf_disable_irq0(hw); + + /* read out interrupt causes */ + icr0 = I40E_READ_REG(hw, I40E_VFINT_ICR01); + + /* No interrupt event indicated */ + if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) { + PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do\n"); + goto done; + } + + if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) { + PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported\n"); + i40evf_handle_aq_msg(dev); + } + + /* Link Status Change interrupt */ + if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK) + PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported," + " do nothing\n"); + +done: + i40evf_enable_irq0(hw); + rte_intr_enable(&dev->pci_dev->intr_handle); +} + +static int +i40evf_dev_init(struct rte_eth_dev *eth_dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\ + eth_dev->data->dev_private); + struct rte_pci_device *pci_dev = eth_dev->pci_dev; + + PMD_INIT_FUNC_TRACE(); + + /* assign ops func pointer */ + eth_dev->dev_ops = &i40evf_eth_dev_ops; + eth_dev->rx_pkt_burst = &i40e_recv_pkts; + eth_dev->tx_pkt_burst = &i40e_xmit_pkts; + + /* + * For secondary processes, we don't initialise any further as primary + * has already done this work. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + i40e_set_rx_function(eth_dev); + i40e_set_tx_function(eth_dev); + return 0; + } + + rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev); + + hw->vendor_id = eth_dev->pci_dev->id.vendor_id; + hw->device_id = eth_dev->pci_dev->id.device_id; + hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id; + hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id; + hw->bus.device = eth_dev->pci_dev->addr.devid; + hw->bus.func = eth_dev->pci_dev->addr.function; + hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr; + hw->adapter_stopped = 0; + + if(i40evf_init_vf(eth_dev) != 0) { + PMD_INIT_LOG(ERR, "Init vf failed"); + return -1; + } + + /* register callback func to eal lib */ + rte_intr_callback_register(&pci_dev->intr_handle, + i40evf_dev_interrupt_handler, (void *)eth_dev); + + /* enable uio intr after callback register */ + rte_intr_enable(&pci_dev->intr_handle); + + /* configure and enable device interrupt */ + i40evf_enable_irq0(hw); + + /* copy mac addr */ + eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac", + ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX, + 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to" + " store MAC addresses", + ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX); + return -ENOMEM; + } + ether_addr_copy((struct ether_addr *)hw->mac.addr, + ð_dev->data->mac_addrs[0]); + + return 0; +} + +static int +i40evf_dev_uninit(struct rte_eth_dev *eth_dev) +{ + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + if (i40evf_uninit_vf(eth_dev) != 0) { + PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed"); + return -1; + } + + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + + return 0; +} +/* + * virtual function driver struct + */ +static struct eth_driver rte_i40evf_pmd = { + .pci_drv = { + .name = "rte_i40evf_pmd", + .id_table = pci_id_i40evf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, + }, + .eth_dev_init = i40evf_dev_init, + .eth_dev_uninit = i40evf_dev_uninit, + .dev_private_size = sizeof(struct i40e_adapter), +}; + +/* + * VF Driver initialization routine. + * Invoked one at EAL init time. + * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices. + */ +static int +rte_i40evf_pmd_init(const char *name __rte_unused, + const char *params __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + + rte_eth_driver_register(&rte_i40evf_pmd); + + return 0; +} + +static struct rte_driver rte_i40evf_driver = { + .type = PMD_PDEV, + .init = rte_i40evf_pmd_init, +}; + +PMD_REGISTER_DRIVER(rte_i40evf_driver); + +static int +i40evf_dev_configure(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk + * allocation or vector Rx preconditions we will reset it. + */ + ad->rx_bulk_alloc_allowed = true; + ad->rx_vec_allowed = true; + ad->tx_simple_allowed = true; + ad->tx_vec_allowed = true; + + return i40evf_init_vlan(dev); +} + +static int +i40evf_init_vlan(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *data = dev->data; + int ret; + + /* Apply vlan offload setting */ + i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); + + /* Apply pvid setting */ + ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid, + data->dev_conf.txmode.hw_vlan_insert_pvid); + return ret; +} + +static void +i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + bool enable_vlan_strip = 0; + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + /* Linux pf host doesn't support vlan offload yet */ + if (vf->version_major == I40E_DPDK_VERSION_MAJOR) { + /* Vlan stripping setting */ + if (mask & ETH_VLAN_STRIP_MASK) { + /* Enable or disable VLAN stripping */ + if (dev_conf->rxmode.hw_vlan_strip) + enable_vlan_strip = 1; + else + enable_vlan_strip = 0; + + i40evf_config_vlan_offload(dev, enable_vlan_strip); + } + } +} + +static int +i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) +{ + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct i40e_vsi_vlan_pvid_info info; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + memset(&info, 0, sizeof(info)); + info.on = on; + + /* Linux pf host don't support vlan offload yet */ + if (vf->version_major == I40E_DPDK_VERSION_MAJOR) { + if (info.on) + info.config.pvid = pvid; + else { + info.config.reject.tagged = + dev_conf->txmode.hw_vlan_reject_tagged; + info.config.reject.untagged = + dev_conf->txmode.hw_vlan_reject_untagged; + } + return i40evf_config_vlan_pvid(dev, &info); + } + + return 0; +} + +static int +i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct i40e_rx_queue *rxq; + int err = 0; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + + err = i40e_alloc_rx_queue_mbufs(rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); + return err; + } + + rte_wmb(); + + /* Init the RX tail register. */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + I40EVF_WRITE_FLUSH(hw); + + /* Ready to switch the queue on */ + err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE); + + if (err) + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", + rx_queue_id); + else + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + } + + return err; +} + +static int +i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct i40e_rx_queue *rxq; + int err; + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + + err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE); + + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); + return err; + } + + i40e_rx_queue_release_mbufs(rxq); + i40e_reset_rx_queue(rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + return 0; +} + +static int +i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + int err = 0; + + PMD_INIT_FUNC_TRACE(); + + if (tx_queue_id < dev->data->nb_tx_queues) { + + /* Ready to switch the queue on */ + err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE); + + if (err) + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", + tx_queue_id); + else + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + } + + return err; +} + +static int +i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct i40e_tx_queue *txq; + int err; + + if (tx_queue_id < dev->data->nb_tx_queues) { + txq = dev->data->tx_queues[tx_queue_id]; + + err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE); + + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", + tx_queue_id); + return err; + } + + i40e_tx_queue_release_mbufs(txq); + i40e_reset_tx_queue(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + return 0; +} + +static int +i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + int ret; + + if (on) + ret = i40evf_add_vlan(dev, vlan_id); + else + ret = i40evf_del_vlan(dev,vlan_id); + + return ret; +} + +static int +i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_dev_data *dev_data = dev->data; + struct rte_pktmbuf_pool_private *mbp_priv; + uint16_t buf_size, len; + + rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id); + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + I40EVF_WRITE_FLUSH(hw); + + /* Calculate the maximum packet length allowed */ + mbp_priv = rte_mempool_get_priv(rxq->mp); + buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size - + RTE_PKTMBUF_HEADROOM); + rxq->hs_mode = i40e_header_split_none; + rxq->rx_hdr_len = 0; + rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); + len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS; + rxq->max_pkt_len = RTE_MIN(len, + dev_data->dev_conf.rxmode.max_rx_pkt_len); + + /** + * Check if the jumbo frame and maximum packet length are set correctly + */ + if (dev_data->dev_conf.rxmode.jumbo_frame == 1) { + if (rxq->max_pkt_len <= ETHER_MAX_LEN || + rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, as jumbo " + "frame is enabled", (uint32_t)ETHER_MAX_LEN, + (uint32_t)I40E_FRAME_SIZE_MAX); + return I40E_ERR_CONFIG; + } + } else { + if (rxq->max_pkt_len < ETHER_MIN_LEN || + rxq->max_pkt_len > ETHER_MAX_LEN) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, as jumbo " + "frame is disabled", (uint32_t)ETHER_MIN_LEN, + (uint32_t)ETHER_MAX_LEN); + return I40E_ERR_CONFIG; + } + } + + if (dev_data->dev_conf.rxmode.enable_scatter || + (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) { + dev_data->scattered_rx = 1; + } + + return 0; +} + +static int +i40evf_rx_init(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint16_t i; + int ret = I40E_SUCCESS; + struct i40e_rx_queue **rxq = + (struct i40e_rx_queue **)dev->data->rx_queues; + + i40evf_config_rss(vf); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (!rxq[i] || !rxq[i]->q_set) + continue; + ret = i40evf_rxq_init(dev, rxq[i]); + if (ret != I40E_SUCCESS) + break; + } + if (ret == I40E_SUCCESS) + i40e_set_rx_function(dev); + + return ret; +} + +static void +i40evf_tx_init(struct rte_eth_dev *dev) +{ + uint16_t i; + struct i40e_tx_queue **txq = + (struct i40e_tx_queue **)dev->data->tx_queues; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + for (i = 0; i < dev->data->nb_tx_queues; i++) + txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i); + + i40e_set_tx_function(dev); +} + +static inline void +i40evf_enable_queues_intr(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + + if (!rte_intr_allow_others(intr_handle)) { + I40E_WRITE_REG(hw, + I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_CLEARPBA_MASK | + I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); + I40EVF_WRITE_FLUSH(hw); + return; + } + + if (vf->version_major == I40E_DPDK_VERSION_MAJOR) + /* To support DPDK PF host */ + I40E_WRITE_REG(hw, + I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1), + I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); + /* If host driver is kernel driver, do nothing. + * Interrupt 0 is used for rx packets, but don't set + * I40E_VFINT_DYN_CTL01, + * because it is already done in i40evf_enable_irq0. + */ + + I40EVF_WRITE_FLUSH(hw); +} + +static inline void +i40evf_disable_queues_intr(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + + if (!rte_intr_allow_others(intr_handle)) { + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); + I40EVF_WRITE_FLUSH(hw); + return; + } + + if (vf->version_major == I40E_DPDK_VERSION_MAJOR) + I40E_WRITE_REG(hw, + I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR + - 1), + 0); + /* If host driver is kernel driver, do nothing. + * Interrupt 0 is used for rx packets, but don't zero + * I40E_VFINT_DYN_CTL01, + * because interrupt 0 is also used for adminq processing. + */ + + I40EVF_WRITE_FLUSH(hw); +} + +static int +i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t interval = + i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + if (msix_intr == I40E_MISC_VEC_ID) + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_CLEARPBA_MASK | + (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) | + (interval << + I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)); + else + I40E_WRITE_REG(hw, + I40E_VFINT_DYN_CTLN1(msix_intr - + I40E_RX_VEC_START), + I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | + (interval << + I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)); + + I40EVF_WRITE_FLUSH(hw); + + rte_intr_enable(&dev->pci_dev->intr_handle); + + return 0; +} + +static int +i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t msix_intr; + + msix_intr = intr_handle->intr_vec[queue_id]; + if (msix_intr == I40E_MISC_VEC_ID) + I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0); + else + I40E_WRITE_REG(hw, + I40E_VFINT_DYN_CTLN1(msix_intr - + I40E_RX_VEC_START), + 0); + + I40EVF_WRITE_FLUSH(hw); + + return 0; +} + +static void +i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add) +{ + struct i40e_virtchnl_ether_addr_list *list; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int err, i, j; + int next_begin = 0; + int begin = 0; + uint32_t len; + struct ether_addr *addr; + struct vf_cmd_info args; + + do { + j = 0; + len = sizeof(struct i40e_virtchnl_ether_addr_list); + for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) { + if (is_zero_ether_addr(&dev->data->mac_addrs[i])) + continue; + len += sizeof(struct i40e_virtchnl_ether_addr); + if (len >= I40E_AQ_BUF_SZ) { + next_begin = i + 1; + break; + } + } + + list = rte_zmalloc("i40evf_del_mac_buffer", len, 0); + + for (i = begin; i < next_begin; i++) { + addr = &dev->data->mac_addrs[i]; + if (is_zero_ether_addr(addr)) + continue; + (void)rte_memcpy(list->list[j].addr, addr->addr_bytes, + sizeof(addr->addr_bytes)); + PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x", + addr->addr_bytes[0], addr->addr_bytes[1], + addr->addr_bytes[2], addr->addr_bytes[3], + addr->addr_bytes[4], addr->addr_bytes[5]); + j++; + } + list->vsi_id = vf->vsi_res->vsi_id; + list->num_elements = j; + args.ops = add ? I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS : + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS; + args.in_args = (uint8_t *)list; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_ETHER_ADDRESS" : + "OP_DEL_ETHER_ADDRESS"); + rte_free(list); + begin = next_begin; + } while (begin < I40E_NUM_MACADDR_MAX); +} + +static int +i40evf_dev_start(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + uint32_t intr_vector = 0; + + PMD_INIT_FUNC_TRACE(); + + hw->adapter_stopped = 0; + + vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, + dev->data->nb_tx_queues); + + /* check and configure queue intr-vector mapping */ + if (dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (!intr_handle->intr_vec) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec\n", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + if (i40evf_rx_init(dev) != 0){ + PMD_DRV_LOG(ERR, "failed to do RX init"); + return -1; + } + + i40evf_tx_init(dev); + + if (i40evf_configure_queues(dev) != 0) { + PMD_DRV_LOG(ERR, "configure queues failed"); + goto err_queue; + } + if (i40evf_config_irq_map(dev)) { + PMD_DRV_LOG(ERR, "config_irq_map failed"); + goto err_queue; + } + + /* Set all mac addrs */ + i40evf_add_del_all_mac_addr(dev, TRUE); + + if (i40evf_start_queues(dev) != 0) { + PMD_DRV_LOG(ERR, "enable queues failed"); + goto err_mac; + } + + i40evf_enable_queues_intr(dev); + return 0; + +err_mac: + i40evf_add_del_all_mac_addr(dev, FALSE); +err_queue: + return -1; +} + +static void +i40evf_dev_stop(struct rte_eth_dev *dev) +{ + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + + PMD_INIT_FUNC_TRACE(); + + i40evf_stop_queues(dev); + i40evf_disable_queues_intr(dev); + i40e_dev_clear_queues(dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + /* remove all mac addrs */ + i40evf_add_del_all_mac_addr(dev, FALSE); + +} + +static int +i40evf_dev_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete) +{ + struct rte_eth_link new_link; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + /* + * DPDK pf host provide interfacet to acquire link status + * while Linux driver does not + */ + if (vf->version_major == I40E_DPDK_VERSION_MAJOR) + i40evf_get_link_status(dev, &new_link); + else { + /* Linux driver PF host */ + switch (vf->link_speed) { + case I40E_LINK_SPEED_100MB: + new_link.link_speed = ETH_SPEED_NUM_100M; + break; + case I40E_LINK_SPEED_1GB: + new_link.link_speed = ETH_SPEED_NUM_1G; + break; + case I40E_LINK_SPEED_10GB: + new_link.link_speed = ETH_SPEED_NUM_10G; + break; + case I40E_LINK_SPEED_20GB: + new_link.link_speed = ETH_SPEED_NUM_20G; + break; + case I40E_LINK_SPEED_40GB: + new_link.link_speed = ETH_SPEED_NUM_40G; + break; + default: + new_link.link_speed = ETH_SPEED_NUM_100M; + break; + } + /* full duplex only */ + new_link.link_duplex = ETH_LINK_FULL_DUPLEX; + new_link.link_status = vf->link_up ? ETH_LINK_UP : + ETH_LINK_DOWN; + } + i40evf_dev_atomic_write_link_status(dev, &new_link); + + return 0; +} + +static void +i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int ret; + + /* If enabled, just return */ + if (vf->promisc_unicast_enabled) + return; + + ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled); + if (ret == 0) + vf->promisc_unicast_enabled = TRUE; +} + +static void +i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int ret; + + /* If disabled, just return */ + if (!vf->promisc_unicast_enabled) + return; + + ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled); + if (ret == 0) + vf->promisc_unicast_enabled = FALSE; +} + +static void +i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int ret; + + /* If enabled, just return */ + if (vf->promisc_multicast_enabled) + return; + + ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1); + if (ret == 0) + vf->promisc_multicast_enabled = TRUE; +} + +static void +i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int ret; + + /* If enabled, just return */ + if (!vf->promisc_multicast_enabled) + return; + + ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0); + if (ret == 0) + vf->promisc_multicast_enabled = FALSE; +} + +static void +i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + memset(dev_info, 0, sizeof(*dev_info)); + dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs; + dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs; + dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; + dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; + dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); + dev_info->reta_size = ETH_RSS_RETA_SIZE_64; + dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL; + dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = I40E_DEFAULT_RX_PTHRESH, + .hthresh = I40E_DEFAULT_RX_HTHRESH, + .wthresh = I40E_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = I40E_DEFAULT_TX_PTHRESH, + .hthresh = I40E_DEFAULT_TX_HTHRESH, + .wthresh = I40E_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH, + .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOOFFLOADS, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = I40E_MAX_RING_DESC, + .nb_min = I40E_MIN_RING_DESC, + .nb_align = I40E_ALIGN_RING_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = I40E_MAX_RING_DESC, + .nb_min = I40E_MIN_RING_DESC, + .nb_align = I40E_ALIGN_RING_DESC, + }; +} + +static void +i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + if (i40evf_get_statics(dev, stats)) + PMD_DRV_LOG(ERR, "Get statics failed"); +} + +static void +i40evf_dev_close(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = dev->pci_dev; + + i40evf_dev_stop(dev); + hw->adapter_stopped = 1; + i40e_dev_free_queues(dev); + i40evf_reset_vf(hw); + i40e_shutdown_adminq(hw); + /* disable uio intr before callback unregister */ + rte_intr_disable(&pci_dev->intr_handle); + + /* unregister callback func from eal lib */ + rte_intr_callback_unregister(&pci_dev->intr_handle, + i40evf_dev_interrupt_handler, (void *)dev); + i40evf_disable_irq0(hw); +} + +static int +i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) +{ + struct i40e_vf *vf = I40E_VSI_TO_VF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int ret; + + if (!lut) + return -EINVAL; + + if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, FALSE, + lut, lut_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS lookup table"); + return ret; + } + } else { + uint32_t *lut_dw = (uint32_t *)lut; + uint16_t i, lut_size_dw = lut_size / 4; + + for (i = 0; i < lut_size_dw; i++) + lut_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HLUT(i)); + } + + return 0; +} + +static int +i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) +{ + struct i40e_vf *vf = I40E_VSI_TO_VF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int ret; + + if (!vsi || !lut) + return -EINVAL; + + if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE, + lut, lut_size); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to set RSS lookup table"); + return ret; + } + } else { + uint32_t *lut_dw = (uint32_t *)lut; + uint16_t i, lut_size_dw = lut_size / 4; + + for (i = 0; i < lut_size_dw; i++) + I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i), lut_dw[i]); + I40EVF_WRITE_FLUSH(hw); + } + + return 0; +} + +static int +i40evf_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint8_t *lut; + uint16_t i, idx, shift; + int ret; + + if (reta_size != ETH_RSS_RETA_SIZE_64) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can " + "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64); + return -EINVAL; + } + + lut = rte_zmalloc("i40e_rss_lut", reta_size, 0); + if (!lut) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size); + if (ret) + goto out; + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + lut[i] = reta_conf[idx].reta[shift]; + } + ret = i40evf_set_rss_lut(&vf->vsi, lut, reta_size); + +out: + rte_free(lut); + + return ret; +} + +static int +i40evf_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint16_t i, idx, shift; + uint8_t *lut; + int ret; + + if (reta_size != ETH_RSS_RETA_SIZE_64) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can " + "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64); + return -EINVAL; + } + + lut = rte_zmalloc("i40e_rss_lut", reta_size, 0); + if (!lut) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + + ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size); + if (ret) + goto out; + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = lut[i]; + } + +out: + rte_free(lut); + + return ret; +} + +static int +i40evf_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) +{ + struct i40e_vf *vf = I40E_VSI_TO_VF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int ret = 0; + + if (!key || key_len == 0) { + PMD_DRV_LOG(DEBUG, "No key to be configured"); + return 0; + } else if (key_len != (I40E_VFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, "Invalid key length %u", key_len); + return -EINVAL; + } + + if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + struct i40e_aqc_get_set_rss_key_data *key_dw = + (struct i40e_aqc_get_set_rss_key_data *)key; + + ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw); + if (ret) + PMD_INIT_LOG(ERR, "Failed to configure RSS key " + "via AQ"); + } else { + uint32_t *hash_key = (uint32_t *)key; + uint16_t i; + + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + i40e_write_rx_ctl(hw, I40E_VFQF_HKEY(i), hash_key[i]); + I40EVF_WRITE_FLUSH(hw); + } + + return ret; +} + +static int +i40evf_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len) +{ + struct i40e_vf *vf = I40E_VSI_TO_VF(vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int ret; + + if (!key || !key_len) + return -EINVAL; + + if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + ret = i40e_aq_get_rss_key(hw, vsi->vsi_id, + (struct i40e_aqc_get_set_rss_key_data *)key); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ"); + return ret; + } + } else { + uint32_t *key_dw = (uint32_t *)key; + uint16_t i; + + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + key_dw[i] = i40e_read_rx_ctl(hw, I40E_VFQF_HKEY(i)); + } + *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); + + return 0; +} + +static int +i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf) +{ + struct i40e_hw *hw = I40E_VF_TO_HW(vf); + uint64_t rss_hf, hena; + int ret; + + ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key, + rss_conf->rss_key_len); + if (ret) + return ret; + + rss_hf = rss_conf->rss_hf; + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; + hena &= ~I40E_RSS_HENA_ALL; + hena |= i40e_config_hena(rss_hf); + i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena); + i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32)); + I40EVF_WRITE_FLUSH(hw); + + return 0; +} + +static void +i40evf_disable_rss(struct i40e_vf *vf) +{ + struct i40e_hw *hw = I40E_VF_TO_HW(vf); + uint64_t hena; + + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; + hena &= ~I40E_RSS_HENA_ALL; + i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena); + i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32)); + I40EVF_WRITE_FLUSH(hw); +} + +static int +i40evf_config_rss(struct i40e_vf *vf) +{ + struct i40e_hw *hw = I40E_VF_TO_HW(vf); + struct rte_eth_rss_conf rss_conf; + uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4; + uint16_t num; + + if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { + i40evf_disable_rss(vf); + PMD_DRV_LOG(DEBUG, "RSS not configured\n"); + return 0; + } + + num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF); + /* Fill out the look up table */ + for (i = 0, j = 0; i < nb_q; i++, j++) { + if (j >= num) + j = 0; + lut = (lut << 8) | j; + if ((i & 3) == 3) + I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut); + } + + rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf; + if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) { + i40evf_disable_rss(vf); + PMD_DRV_LOG(DEBUG, "No hash flag is set\n"); + return 0; + } + + if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < + (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { + /* Calculate the default hash key */ + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + rss_key_default[i] = (uint32_t)rte_rand(); + rss_conf.rss_key = (uint8_t *)rss_key_default; + rss_conf.rss_key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + } + + return i40evf_hw_rss_hash_set(vf, &rss_conf); +} + +static int +i40evf_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL; + uint64_t hena; + + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; + if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */ + if (rss_hf != 0) /* Enable RSS */ + return -EINVAL; + return 0; + } + + /* RSS enabled */ + if (rss_hf == 0) /* Disable RSS */ + return -EINVAL; + + return i40evf_hw_rss_hash_set(vf, rss_conf); +} + +static int +i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t hena; + + i40evf_get_rss_key(&vf->vsi, rss_conf->rss_key, + &rss_conf->rss_key_len); + + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; + rss_conf->rss_hf = i40e_parse_hena(hena); + + return 0; +} diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c new file mode 100644 index 00000000..ff57e8a2 --- /dev/null +++ b/drivers/net/i40e/i40e_fdir.c @@ -0,0 +1,1466 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <stdarg.h> + +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_log.h> +#include <rte_memzone.h> +#include <rte_malloc.h> +#include <rte_arp.h> +#include <rte_ip.h> +#include <rte_udp.h> +#include <rte_tcp.h> +#include <rte_sctp.h> + +#include "i40e_logs.h" +#include "base/i40e_type.h" +#include "base/i40e_prototype.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" + +#define I40E_FDIR_MZ_NAME "FDIR_MEMZONE" +#ifndef IPV6_ADDR_LEN +#define IPV6_ADDR_LEN 16 +#endif + +#define I40E_FDIR_PKT_LEN 512 +#define I40E_FDIR_IP_DEFAULT_LEN 420 +#define I40E_FDIR_IP_DEFAULT_TTL 0x40 +#define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45 +#define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50 +#define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000 +#define I40E_FDIR_IPv6_TC_OFFSET 20 + +#define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF +#define I40E_FDIR_IPv6_PAYLOAD_LEN 380 +#define I40E_FDIR_UDP_DEFAULT_LEN 400 + +/* Wait count and interval for fdir filter programming */ +#define I40E_FDIR_WAIT_COUNT 10 +#define I40E_FDIR_WAIT_INTERVAL_US 1000 + +/* Wait count and interval for fdir filter flush */ +#define I40E_FDIR_FLUSH_RETRY 50 +#define I40E_FDIR_FLUSH_INTERVAL_MS 5 + +#define I40E_COUNTER_PF 2 +/* Statistic counter index for one pf */ +#define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF) +#define I40E_MAX_FLX_SOURCE_OFF 480 +#define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50 + +#define NONUSE_FLX_PIT_DEST_OFF 63 +#define NONUSE_FLX_PIT_FSIZE 1 +#define MK_FLX_PIT(src_offset, fsize, dst_offset) ( \ + (((src_offset) << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \ + I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \ + (((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \ + I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \ + ((((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR) << \ + I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \ + I40E_PRTQF_FLX_PIT_DEST_OFF_MASK)) + +#define I40E_FDIR_FLOWS ( \ + (1 << RTE_ETH_FLOW_FRAG_IPV4) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ + (1 << RTE_ETH_FLOW_FRAG_IPV6) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \ + (1 << RTE_ETH_FLOW_L2_PAYLOAD)) + +#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off)) + +static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq); +static int i40e_check_fdir_flex_conf( + const struct rte_eth_fdir_flex_conf *conf); +static void i40e_set_flx_pld_cfg(struct i40e_pf *pf, + const struct rte_eth_flex_payload_cfg *cfg); +static void i40e_set_flex_mask_on_pctype(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + const struct rte_eth_fdir_flex_mask *mask_cfg); +static int i40e_fdir_construct_pkt(struct i40e_pf *pf, + const struct rte_eth_fdir_input *fdir_input, + unsigned char *raw_pkt); +static int i40e_add_del_fdir_filter(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *filter, + bool add); +static int i40e_fdir_filter_programming(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + const struct rte_eth_fdir_filter *filter, + bool add); +static int i40e_fdir_flush(struct rte_eth_dev *dev); +static void i40e_fdir_info_get(struct rte_eth_dev *dev, + struct rte_eth_fdir_info *fdir); +static void i40e_fdir_stats_get(struct rte_eth_dev *dev, + struct rte_eth_fdir_stats *stat); + +static int +i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi); + struct i40e_hmc_obj_rxq rx_ctx; + int err = I40E_SUCCESS; + + memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); + /* Init the RX queue in hardware */ + rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT; + rx_ctx.hbuff = 0; + rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT; + rx_ctx.qlen = rxq->nb_rx_desc; +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + rx_ctx.dsize = 1; +#endif + rx_ctx.dtype = i40e_header_split_none; + rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE; + rx_ctx.rxmax = ETHER_MAX_LEN; + rx_ctx.tphrdesc_ena = 1; + rx_ctx.tphwdesc_ena = 1; + rx_ctx.tphdata_ena = 1; + rx_ctx.tphhead_ena = 1; + rx_ctx.lrxqthresh = 2; + rx_ctx.crcstrip = 0; + rx_ctx.l2tsel = 1; + rx_ctx.showiv = 1; + rx_ctx.prefena = 1; + + err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx); + if (err != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context."); + return err; + } + err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx); + if (err != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context."); + return err; + } + rxq->qrx_tail = hw->hw_addr + + I40E_QRX_TAIL(rxq->vsi->base_queue); + + rte_wmb(); + /* Init the RX tail regieter. */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, 0); + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + + return err; +} + +/* + * i40e_fdir_setup - reserve and initialize the Flow Director resources + * @pf: board private structure + */ +int +i40e_fdir_setup(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + int err = I40E_SUCCESS; + char z_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz = NULL; + struct rte_eth_dev *eth_dev = pf->adapter->eth_dev; + + if ((pf->flags & I40E_FLAG_FDIR) == 0) { + PMD_INIT_LOG(ERR, "HW doesn't support FDIR"); + return I40E_NOT_SUPPORTED; + } + + PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u," + " num_filters_best_effort = %u.", + hw->func_caps.fd_filters_guaranteed, + hw->func_caps.fd_filters_best_effort); + + vsi = pf->fdir.fdir_vsi; + if (vsi) { + PMD_DRV_LOG(INFO, "FDIR initialization has been done."); + return I40E_SUCCESS; + } + /* make new FDIR VSI */ + vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0); + if (!vsi) { + PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI."); + return I40E_ERR_NO_AVAILABLE_VSI; + } + pf->fdir.fdir_vsi = vsi; + + /*Fdir tx queue setup*/ + err = i40e_fdir_setup_tx_resources(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources."); + goto fail_setup_tx; + } + + /*Fdir rx queue setup*/ + err = i40e_fdir_setup_rx_resources(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources."); + goto fail_setup_rx; + } + + err = i40e_tx_queue_init(pf->fdir.txq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization."); + goto fail_mem; + } + + /* need switch on before dev start*/ + err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on."); + goto fail_mem; + } + + /* Init the rx queue in hardware */ + err = i40e_fdir_rx_queue_init(pf->fdir.rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization."); + goto fail_mem; + } + + /* switch on rx queue */ + err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on."); + goto fail_mem; + } + + /* reserve memory for the fdir programming packet */ + snprintf(z_name, sizeof(z_name), "%s_%s_%d", + eth_dev->driver->pci_drv.name, + I40E_FDIR_MZ_NAME, + eth_dev->data->port_id); + mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY); + if (!mz) { + PMD_DRV_LOG(ERR, "Cannot init memzone for " + "flow director program packet."); + err = I40E_ERR_NO_MEMORY; + goto fail_mem; + } + pf->fdir.prg_pkt = mz->addr; + pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); + + pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id); + PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.", + vsi->base_queue); + return I40E_SUCCESS; + +fail_mem: + i40e_dev_rx_queue_release(pf->fdir.rxq); + pf->fdir.rxq = NULL; +fail_setup_rx: + i40e_dev_tx_queue_release(pf->fdir.txq); + pf->fdir.txq = NULL; +fail_setup_tx: + i40e_vsi_release(vsi); + pf->fdir.fdir_vsi = NULL; + return err; +} + +/* + * i40e_fdir_teardown - release the Flow Director resources + * @pf: board private structure + */ +void +i40e_fdir_teardown(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + + vsi = pf->fdir.fdir_vsi; + if (!vsi) + return; + i40e_switch_tx_queue(hw, vsi->base_queue, FALSE); + i40e_switch_rx_queue(hw, vsi->base_queue, FALSE); + i40e_dev_rx_queue_release(pf->fdir.rxq); + pf->fdir.rxq = NULL; + i40e_dev_tx_queue_release(pf->fdir.txq); + pf->fdir.txq = NULL; + i40e_vsi_release(vsi); + pf->fdir.fdir_vsi = NULL; +} + +/* check whether the flow director table in empty */ +static inline int +i40e_fdir_empty(struct i40e_hw *hw) +{ + uint32_t guarant_cnt, best_cnt; + + guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) & + I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >> + I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT); + best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) & + I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> + I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); + if (best_cnt + guarant_cnt > 0) + return -1; + + return 0; +} + +/* + * Initialize the configuration about bytes stream extracted as flexible payload + * and mask setting + */ +static inline void +i40e_init_flx_pld(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint8_t pctype; + int i, index; + + /* + * Define the bytes stream extracted as flexible payload in + * field vector. By default, select 8 words from the beginning + * of payload as flexible payload. + */ + for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) { + index = i * I40E_MAX_FLXPLD_FIED; + pf->fdir.flex_set[index].src_offset = 0; + pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM; + pf->fdir.flex_set[index].dst_offset = 0; + I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900); + I40E_WRITE_REG(hw, + I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/ + I40E_WRITE_REG(hw, + I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/ + } + + /* initialize the masks */ + for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) { + if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)pctype)) + continue; + pf->fdir.flex_mask[pctype].word_mask = 0; + i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0); + for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) { + pf->fdir.flex_mask[pctype].bitmask[i].offset = 0; + pf->fdir.flex_mask[pctype].bitmask[i].mask = 0; + i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0); + } + } +} + +#define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF)) + +#define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \ + if ((flex_pit2).src_offset < \ + (flex_pit1).src_offset + (flex_pit1).size) { \ + PMD_DRV_LOG(ERR, "src_offset should be not" \ + " less than than previous offset" \ + " + previous FSIZE."); \ + return -EINVAL; \ + } \ +} while (0) + +/* + * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure, + * and the flex_pit will be sorted by it's src_offset value + */ +static inline uint16_t +i40e_srcoff_to_flx_pit(const uint16_t *src_offset, + struct i40e_fdir_flex_pit *flex_pit) +{ + uint16_t src_tmp, size, num = 0; + uint16_t i, k, j = 0; + + while (j < I40E_FDIR_MAX_FLEX_LEN) { + size = 1; + for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) { + if (src_offset[j + 1] == src_offset[j] + 1) + size++; + else + break; + } + src_tmp = src_offset[j] + 1 - size; + /* the flex_pit need to be sort by src_offset */ + for (i = 0; i < num; i++) { + if (src_tmp < flex_pit[i].src_offset) + break; + } + /* if insert required, move backward */ + for (k = num; k > i; k--) + flex_pit[k] = flex_pit[k - 1]; + /* insert */ + flex_pit[i].dst_offset = j + 1 - size; + flex_pit[i].src_offset = src_tmp; + flex_pit[i].size = size; + j++; + num++; + } + return num; +} + +/* i40e_check_fdir_flex_payload -check flex payload configuration arguments */ +static inline int +i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg) +{ + struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN]; + uint16_t num, i; + + for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) { + if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) { + PMD_DRV_LOG(ERR, "exceeds maxmial payload limit."); + return -EINVAL; + } + } + + memset(flex_pit, 0, sizeof(flex_pit)); + num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit); + if (num > I40E_MAX_FLXPLD_FIED) { + PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields."); + return -EINVAL; + } + for (i = 0; i < num; i++) { + if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 || + flex_pit[i].src_offset & 0x01) { + PMD_DRV_LOG(ERR, "flexpayload should be measured" + " in word"); + return -EINVAL; + } + if (i != num - 1) + I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]); + } + return 0; +} + +/* + * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration + * arguments are valid + */ +static int +i40e_check_fdir_flex_conf(const struct rte_eth_fdir_flex_conf *conf) +{ + const struct rte_eth_flex_payload_cfg *flex_cfg; + const struct rte_eth_fdir_flex_mask *flex_mask; + uint16_t mask_tmp; + uint8_t nb_bitmask; + uint16_t i, j; + int ret = 0; + + if (conf == NULL) { + PMD_DRV_LOG(INFO, "NULL pointer."); + return -EINVAL; + } + /* check flexible payload setting configuration */ + if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) { + PMD_DRV_LOG(ERR, "invalid number of payload setting."); + return -EINVAL; + } + for (i = 0; i < conf->nb_payloads; i++) { + flex_cfg = &conf->flex_set[i]; + if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) { + PMD_DRV_LOG(ERR, "invalid payload type."); + return -EINVAL; + } + ret = i40e_check_fdir_flex_payload(flex_cfg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "invalid flex payload arguments."); + return -EINVAL; + } + } + + /* check flex mask setting configuration */ + if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) { + PMD_DRV_LOG(ERR, "invalid number of flex masks."); + return -EINVAL; + } + for (i = 0; i < conf->nb_flexmasks; i++) { + flex_mask = &conf->flex_mask[i]; + if (!I40E_VALID_FLOW(flex_mask->flow_type)) { + PMD_DRV_LOG(WARNING, "invalid flow type."); + return -EINVAL; + } + nb_bitmask = 0; + for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) { + mask_tmp = I40E_WORD(flex_mask->mask[j], + flex_mask->mask[j + 1]); + if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) { + nb_bitmask++; + if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) { + PMD_DRV_LOG(ERR, " exceed maximal" + " number of bitmasks."); + return -EINVAL; + } + } + } + } + return 0; +} + +/* + * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload + * @pf: board private structure + * @cfg: the rule how bytes stream is extracted as flexible payload + */ +static void +i40e_set_flx_pld_cfg(struct i40e_pf *pf, + const struct rte_eth_flex_payload_cfg *cfg) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED]; + uint32_t flx_pit; + uint16_t num, min_next_off; /* in words */ + uint8_t field_idx = 0; + uint8_t layer_idx = 0; + uint16_t i; + + if (cfg->type == RTE_ETH_L2_PAYLOAD) + layer_idx = I40E_FLXPLD_L2_IDX; + else if (cfg->type == RTE_ETH_L3_PAYLOAD) + layer_idx = I40E_FLXPLD_L3_IDX; + else if (cfg->type == RTE_ETH_L4_PAYLOAD) + layer_idx = I40E_FLXPLD_L4_IDX; + + memset(flex_pit, 0, sizeof(flex_pit)); + num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit); + + for (i = 0; i < RTE_MIN(num, RTE_DIM(flex_pit)); i++) { + field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i; + /* record the info in fdir structure */ + pf->fdir.flex_set[field_idx].src_offset = + flex_pit[i].src_offset / sizeof(uint16_t); + pf->fdir.flex_set[field_idx].size = + flex_pit[i].size / sizeof(uint16_t); + pf->fdir.flex_set[field_idx].dst_offset = + flex_pit[i].dst_offset / sizeof(uint16_t); + flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset, + pf->fdir.flex_set[field_idx].size, + pf->fdir.flex_set[field_idx].dst_offset); + + I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit); + } + min_next_off = pf->fdir.flex_set[field_idx].src_offset + + pf->fdir.flex_set[field_idx].size; + + for (; i < I40E_MAX_FLXPLD_FIED; i++) { + /* set the non-used register obeying register's constrain */ + flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE, + NONUSE_FLX_PIT_DEST_OFF); + I40E_WRITE_REG(hw, + I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i), + flx_pit); + min_next_off++; + } +} + +/* + * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload + * @pf: board private structure + * @pctype: packet classify type + * @flex_masks: mask for flexible payload + */ +static void +i40e_set_flex_mask_on_pctype(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + const struct rte_eth_fdir_flex_mask *mask_cfg) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_fdir_flex_mask *flex_mask; + uint32_t flxinset, fd_mask; + uint16_t mask_tmp; + uint8_t i, nb_bitmask = 0; + + flex_mask = &pf->fdir.flex_mask[pctype]; + memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask)); + for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) { + mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]); + if (mask_tmp != 0x0) { + flex_mask->word_mask |= + I40E_FLEX_WORD_MASK(i / sizeof(uint16_t)); + if (mask_tmp != UINT16_MAX) { + /* set bit mask */ + flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp; + flex_mask->bitmask[nb_bitmask].offset = + i / sizeof(uint16_t); + nb_bitmask++; + } + } + } + /* write mask to hw */ + flxinset = (flex_mask->word_mask << + I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) & + I40E_PRTQF_FD_FLXINSET_INSET_MASK; + i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset); + + for (i = 0; i < nb_bitmask; i++) { + fd_mask = (flex_mask->bitmask[i].mask << + I40E_PRTQF_FD_MSK_MASK_SHIFT) & + I40E_PRTQF_FD_MSK_MASK_MASK; + fd_mask |= ((flex_mask->bitmask[i].offset + + I40E_FLX_OFFSET_IN_FIELD_VECTOR) << + I40E_PRTQF_FD_MSK_OFFSET_SHIFT) & + I40E_PRTQF_FD_MSK_OFFSET_MASK; + i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask); + } +} + +/* + * Configure flow director related setting + */ +int +i40e_fdir_configure(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_fdir_flex_conf *conf; + enum i40e_filter_pctype pctype; + uint32_t val; + uint8_t i; + int ret = 0; + + /* + * configuration need to be done before + * flow director filters are added + * If filters exist, flush them. + */ + if (i40e_fdir_empty(hw) < 0) { + ret = i40e_fdir_flush(dev); + if (ret) { + PMD_DRV_LOG(ERR, "failed to flush fdir table."); + return ret; + } + } + + /* enable FDIR filter */ + val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); + val |= I40E_PFQF_CTL_0_FD_ENA_MASK; + i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); + + i40e_init_flx_pld(pf); /* set flex config to default value */ + + conf = &dev->data->dev_conf.fdir_conf.flex_conf; + ret = i40e_check_fdir_flex_conf(conf); + if (ret < 0) { + PMD_DRV_LOG(ERR, " invalid configuration arguments."); + return -EINVAL; + } + /* configure flex payload */ + for (i = 0; i < conf->nb_payloads; i++) + i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]); + /* configure flex mask*/ + for (i = 0; i < conf->nb_flexmasks; i++) { + pctype = i40e_flowtype_to_pctype(conf->flex_mask[i].flow_type); + i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]); + } + + return ret; +} + +static inline int +i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input, + unsigned char *raw_pkt, + bool vlan) +{ + static uint8_t vlan_frame[] = {0x81, 0, 0, 0}; + uint16_t *ether_type; + uint8_t len = 2 * sizeof(struct ether_addr); + struct ipv4_hdr *ip; + struct ipv6_hdr *ip6; + static const uint8_t next_proto[] = { + [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP, + [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP, + [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP, + [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP, + [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP, + [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE, + [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP, + [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP, + [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP, + [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE, + }; + + raw_pkt += 2 * sizeof(struct ether_addr); + if (vlan && fdir_input->flow_ext.vlan_tci) { + rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame)); + rte_memcpy(raw_pkt + sizeof(uint16_t), + &fdir_input->flow_ext.vlan_tci, + sizeof(uint16_t)); + raw_pkt += sizeof(vlan_frame); + len += sizeof(vlan_frame); + } + ether_type = (uint16_t *)raw_pkt; + raw_pkt += sizeof(uint16_t); + len += sizeof(uint16_t); + + switch (fdir_input->flow_type) { + case RTE_ETH_FLOW_L2_PAYLOAD: + *ether_type = fdir_input->flow.l2_flow.ether_type; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + case RTE_ETH_FLOW_FRAG_IPV4: + ip = (struct ipv4_hdr *)raw_pkt; + + *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL; + /* set len to by default */ + ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN); + ip->next_proto_id = fdir_input->flow.ip4_flow.proto ? + fdir_input->flow.ip4_flow.proto : + next_proto[fdir_input->flow_type]; + ip->time_to_live = fdir_input->flow.ip4_flow.ttl ? + fdir_input->flow.ip4_flow.ttl : + I40E_FDIR_IP_DEFAULT_TTL; + ip->type_of_service = fdir_input->flow.ip4_flow.tos; + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + ip->src_addr = fdir_input->flow.ip4_flow.dst_ip; + ip->dst_addr = fdir_input->flow.ip4_flow.src_ip; + len += sizeof(struct ipv4_hdr); + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + case RTE_ETH_FLOW_FRAG_IPV6: + ip6 = (struct ipv6_hdr *)raw_pkt; + + *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + ip6->vtc_flow = + rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW | + (fdir_input->flow.ipv6_flow.tc << + I40E_FDIR_IPv6_TC_OFFSET)); + ip6->payload_len = + rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); + ip6->proto = fdir_input->flow.ipv6_flow.proto ? + fdir_input->flow.ipv6_flow.proto : + next_proto[fdir_input->flow_type]; + ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ? + fdir_input->flow.ipv6_flow.hop_limits : + I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS; + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + rte_memcpy(&(ip6->src_addr), + &(fdir_input->flow.ipv6_flow.dst_ip), + IPV6_ADDR_LEN); + rte_memcpy(&(ip6->dst_addr), + &(fdir_input->flow.ipv6_flow.src_ip), + IPV6_ADDR_LEN); + len += sizeof(struct ipv6_hdr); + break; + default: + PMD_DRV_LOG(ERR, "unknown flow type %u.", + fdir_input->flow_type); + return -1; + } + return len; +} + + +/* + * i40e_fdir_construct_pkt - construct packet based on fields in input + * @pf: board private structure + * @fdir_input: input set of the flow director entry + * @raw_pkt: a packet to be constructed + */ +static int +i40e_fdir_construct_pkt(struct i40e_pf *pf, + const struct rte_eth_fdir_input *fdir_input, + unsigned char *raw_pkt) +{ + unsigned char *payload, *ptr; + struct udp_hdr *udp; + struct tcp_hdr *tcp; + struct sctp_hdr *sctp; + uint8_t size, dst = 0; + uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/ + int len; + + /* fill the ethernet and IP head */ + len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt, + !!fdir_input->flow_ext.vlan_tci); + if (len < 0) + return -EINVAL; + + /* fill the L4 head */ + switch (fdir_input->flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + udp = (struct udp_hdr *)(raw_pkt + len); + payload = (unsigned char *)udp + sizeof(struct udp_hdr); + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + udp->src_port = fdir_input->flow.udp4_flow.dst_port; + udp->dst_port = fdir_input->flow.udp4_flow.src_port; + udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN); + break; + + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + tcp = (struct tcp_hdr *)(raw_pkt + len); + payload = (unsigned char *)tcp + sizeof(struct tcp_hdr); + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + tcp->src_port = fdir_input->flow.tcp4_flow.dst_port; + tcp->dst_port = fdir_input->flow.tcp4_flow.src_port; + tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF; + break; + + case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: + sctp = (struct sctp_hdr *)(raw_pkt + len); + payload = (unsigned char *)sctp + sizeof(struct sctp_hdr); + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + sctp->src_port = fdir_input->flow.sctp4_flow.dst_port; + sctp->dst_port = fdir_input->flow.sctp4_flow.src_port; + sctp->tag = fdir_input->flow.sctp4_flow.verify_tag; + break; + + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + case RTE_ETH_FLOW_FRAG_IPV4: + payload = raw_pkt + len; + set_idx = I40E_FLXPLD_L3_IDX; + break; + + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + udp = (struct udp_hdr *)(raw_pkt + len); + payload = (unsigned char *)udp + sizeof(struct udp_hdr); + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + udp->src_port = fdir_input->flow.udp6_flow.dst_port; + udp->dst_port = fdir_input->flow.udp6_flow.src_port; + udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); + break; + + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + tcp = (struct tcp_hdr *)(raw_pkt + len); + payload = (unsigned char *)tcp + sizeof(struct tcp_hdr); + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF; + tcp->src_port = fdir_input->flow.udp6_flow.dst_port; + tcp->dst_port = fdir_input->flow.udp6_flow.src_port; + break; + + case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: + sctp = (struct sctp_hdr *)(raw_pkt + len); + payload = (unsigned char *)sctp + sizeof(struct sctp_hdr); + /* + * The source and destination fields in the transmitted packet + * need to be presented in a reversed order with respect + * to the expected received packets. + */ + sctp->src_port = fdir_input->flow.sctp6_flow.dst_port; + sctp->dst_port = fdir_input->flow.sctp6_flow.src_port; + sctp->tag = fdir_input->flow.sctp6_flow.verify_tag; + break; + + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + case RTE_ETH_FLOW_FRAG_IPV6: + payload = raw_pkt + len; + set_idx = I40E_FLXPLD_L3_IDX; + break; + case RTE_ETH_FLOW_L2_PAYLOAD: + payload = raw_pkt + len; + /* + * ARP packet is a special case on which the payload + * starts after the whole ARP header + */ + if (fdir_input->flow.l2_flow.ether_type == + rte_cpu_to_be_16(ETHER_TYPE_ARP)) + payload += sizeof(struct arp_hdr); + set_idx = I40E_FLXPLD_L2_IDX; + break; + default: + PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type); + return -EINVAL; + } + + /* fill the flexbytes to payload */ + for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) { + pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i; + size = pf->fdir.flex_set[pit_idx].size; + if (size == 0) + continue; + dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t); + ptr = payload + + pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t); + (void)rte_memcpy(ptr, + &fdir_input->flow_ext.flexbytes[dst], + size * sizeof(uint16_t)); + } + + return 0; +} + +/* Construct the tx flags */ +static inline uint64_t +i40e_build_ctob(uint32_t td_cmd, + uint32_t td_offset, + unsigned int size, + uint32_t td_tag) +{ + return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA | + ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | + ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | + ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); +} + +/* + * check the programming status descriptor in rx queue. + * done after Programming Flow Director is programmed on + * tx queue + */ +static inline int +i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq) +{ + volatile union i40e_rx_desc *rxdp; + uint64_t qword1; + uint32_t rx_status; + uint32_t len, id; + uint32_t error; + int ret = 0; + + rxdp = &rxq->rx_ring[rxq->rx_tail]; + qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); + rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) + >> I40E_RXD_QW1_STATUS_SHIFT; + + if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { + len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT; + id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> + I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; + + if (len == I40E_RX_PROG_STATUS_DESC_LENGTH && + id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) { + error = (qword1 & + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; + if (error == (0x1 << + I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { + PMD_DRV_LOG(ERR, "Failed to add FDIR filter" + " (FD_ID %u): programming status" + " reported.", + rxdp->wb.qword0.hi_dword.fd_id); + ret = -1; + } else if (error == (0x1 << + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { + PMD_DRV_LOG(ERR, "Failed to delete FDIR filter" + " (FD_ID %u): programming status" + " reported.", + rxdp->wb.qword0.hi_dword.fd_id); + ret = -1; + } else + PMD_DRV_LOG(ERR, "invalid programming status" + " reported, error = %u.", error); + } else + PMD_DRV_LOG(ERR, "unknown programming status" + " reported, len = %d, id = %u.", len, id); + rxdp->wb.qword1.status_error_len = 0; + rxq->rx_tail++; + if (unlikely(rxq->rx_tail == rxq->nb_rx_desc)) + rxq->rx_tail = 0; + } + return ret; +} + +/* + * i40e_add_del_fdir_filter - add or remove a flow director filter. + * @pf: board private structure + * @filter: fdir filter entry + * @add: 0 - delete, 1 - add + */ +static int +i40e_add_del_fdir_filter(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *filter, + bool add) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt; + enum i40e_filter_pctype pctype; + int ret = 0; + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) { + PMD_DRV_LOG(ERR, "FDIR is not enabled, please" + " check the mode in fdir_conf."); + return -ENOTSUP; + } + + if (!I40E_VALID_FLOW(filter->input.flow_type)) { + PMD_DRV_LOG(ERR, "invalid flow_type input."); + return -EINVAL; + } + if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) { + PMD_DRV_LOG(ERR, "Invalid queue ID"); + return -EINVAL; + } + if (filter->input.flow_ext.is_vf && + filter->input.flow_ext.dst_id >= pf->vf_num) { + PMD_DRV_LOG(ERR, "Invalid VF ID"); + return -EINVAL; + } + + memset(pkt, 0, I40E_FDIR_PKT_LEN); + + ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt); + if (ret < 0) { + PMD_DRV_LOG(ERR, "construct packet for fdir fails."); + return ret; + } + pctype = i40e_flowtype_to_pctype(filter->input.flow_type); + ret = i40e_fdir_filter_programming(pf, pctype, filter, add); + if (ret < 0) { + PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).", + pctype); + return ret; + } + return ret; +} + +/* + * i40e_fdir_filter_programming - Program a flow director filter rule. + * Is done by Flow Director Programming Descriptor followed by packet + * structure that contains the filter fields need to match. + * @pf: board private structure + * @pctype: pctype + * @filter: fdir filter entry + * @add: 0 - delete, 1 - add + */ +static int +i40e_fdir_filter_programming(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + const struct rte_eth_fdir_filter *filter, + bool add) +{ + struct i40e_tx_queue *txq = pf->fdir.txq; + struct i40e_rx_queue *rxq = pf->fdir.rxq; + const struct rte_eth_fdir_action *fdir_action = &filter->action; + volatile struct i40e_tx_desc *txdp; + volatile struct i40e_filter_program_desc *fdirdp; + uint32_t td_cmd; + uint16_t vsi_id, i; + uint8_t dest; + + PMD_DRV_LOG(INFO, "filling filter programming descriptor."); + fdirdp = (volatile struct i40e_filter_program_desc *) + (&(txq->tx_ring[txq->tx_tail])); + + fdirdp->qindex_flex_ptype_vsi = + rte_cpu_to_le_32((fdir_action->rx_queue << + I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & + I40E_TXD_FLTR_QW0_QINDEX_MASK); + + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32((fdir_action->flex_off << + I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) & + I40E_TXD_FLTR_QW0_FLEXOFF_MASK); + + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32((pctype << + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) & + I40E_TXD_FLTR_QW0_PCTYPE_MASK); + + if (filter->input.flow_ext.is_vf) + vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id; + else + /* Use LAN VSI Id by default */ + vsi_id = pf->main_vsi->vsi_id; + fdirdp->qindex_flex_ptype_vsi |= + rte_cpu_to_le_32(((uint32_t)vsi_id << + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) & + I40E_TXD_FLTR_QW0_DEST_VSI_MASK); + + fdirdp->dtype_cmd_cntindex = + rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG); + + if (add) + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32( + I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT); + else + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32( + I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT); + + if (fdir_action->behavior == RTE_ETH_FDIR_REJECT) + dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; + else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT) + dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; + else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU) + dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER; + else { + PMD_DRV_LOG(ERR, "Failed to program FDIR filter:" + " unsupported fdir behavior."); + return -EINVAL; + } + + fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest << + I40E_TXD_FLTR_QW1_DEST_SHIFT) & + I40E_TXD_FLTR_QW1_DEST_MASK); + + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32((fdir_action->report_status<< + I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) & + I40E_TXD_FLTR_QW1_FD_STATUS_MASK); + + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK); + fdirdp->dtype_cmd_cntindex |= + rte_cpu_to_le_32((pf->fdir.match_counter_index << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK); + + fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id); + + PMD_DRV_LOG(INFO, "filling transmit descriptor."); + txdp = &(txq->tx_ring[txq->tx_tail + 1]); + txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr); + td_cmd = I40E_TX_DESC_CMD_EOP | + I40E_TX_DESC_CMD_RS | + I40E_TX_DESC_CMD_DUMMY; + + txdp->cmd_type_offset_bsz = + i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0); + + txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */ + if (txq->tx_tail >= txq->nb_tx_desc) + txq->tx_tail = 0; + /* Update the tx tail register */ + rte_wmb(); + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + for (i = 0; i < I40E_FDIR_WAIT_COUNT; i++) { + rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US); + if ((txdp->cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) == + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) + break; + } + if (i >= I40E_FDIR_WAIT_COUNT) { + PMD_DRV_LOG(ERR, "Failed to program FDIR filter:" + " time out to get DD on tx queue."); + return -ETIMEDOUT; + } + /* totally delay 10 ms to check programming status*/ + rte_delay_us((I40E_FDIR_WAIT_COUNT - i) * I40E_FDIR_WAIT_INTERVAL_US); + if (i40e_check_fdir_programming_status(rxq) < 0) { + PMD_DRV_LOG(ERR, "Failed to program FDIR filter:" + " programming status reported."); + return -ENOSYS; + } + + return 0; +} + +/* + * i40e_fdir_flush - clear all filters of Flow Director table + * @pf: board private structure + */ +static int +i40e_fdir_flush(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t reg; + uint16_t guarant_cnt, best_cnt; + uint16_t i; + + I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); + I40E_WRITE_FLUSH(hw); + + for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) { + rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS); + reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1); + if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) + break; + } + if (i >= I40E_FDIR_FLUSH_RETRY) { + PMD_DRV_LOG(ERR, "FD table did not flush, may need more time."); + return -ETIMEDOUT; + } + guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) & + I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >> + I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT); + best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) & + I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> + I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); + if (guarant_cnt != 0 || best_cnt != 0) { + PMD_DRV_LOG(ERR, "Failed to flush FD table."); + return -ENOSYS; + } else + PMD_DRV_LOG(INFO, "FD table Flush success."); + return 0; +} + +static inline void +i40e_fdir_info_get_flex_set(struct i40e_pf *pf, + struct rte_eth_flex_payload_cfg *flex_set, + uint16_t *num) +{ + struct i40e_fdir_flex_pit *flex_pit; + struct rte_eth_flex_payload_cfg *ptr = flex_set; + uint16_t src, dst, size, j, k; + uint8_t i, layer_idx; + + for (layer_idx = I40E_FLXPLD_L2_IDX; + layer_idx <= I40E_FLXPLD_L4_IDX; + layer_idx++) { + if (layer_idx == I40E_FLXPLD_L2_IDX) + ptr->type = RTE_ETH_L2_PAYLOAD; + else if (layer_idx == I40E_FLXPLD_L3_IDX) + ptr->type = RTE_ETH_L3_PAYLOAD; + else if (layer_idx == I40E_FLXPLD_L4_IDX) + ptr->type = RTE_ETH_L4_PAYLOAD; + + for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) { + flex_pit = &pf->fdir.flex_set[layer_idx * + I40E_MAX_FLXPLD_FIED + i]; + if (flex_pit->size == 0) + continue; + src = flex_pit->src_offset * sizeof(uint16_t); + dst = flex_pit->dst_offset * sizeof(uint16_t); + size = flex_pit->size * sizeof(uint16_t); + for (j = src, k = dst; j < src + size; j++, k++) + ptr->src_offset[k] = j; + } + (*num)++; + ptr++; + } +} + +static inline void +i40e_fdir_info_get_flex_mask(struct i40e_pf *pf, + struct rte_eth_fdir_flex_mask *flex_mask, + uint16_t *num) +{ + struct i40e_fdir_flex_mask *mask; + struct rte_eth_fdir_flex_mask *ptr = flex_mask; + uint16_t flow_type; + uint8_t i, j; + uint16_t off_bytes, mask_tmp; + + for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; + i++) { + mask = &pf->fdir.flex_mask[i]; + if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i)) + continue; + flow_type = i40e_pctype_to_flowtype((enum i40e_filter_pctype)i); + for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) { + if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) { + ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX; + ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX; + } else { + ptr->mask[j * sizeof(uint16_t)] = 0x0; + ptr->mask[j * sizeof(uint16_t) + 1] = 0x0; + } + } + for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) { + off_bytes = mask->bitmask[j].offset * sizeof(uint16_t); + mask_tmp = ~mask->bitmask[j].mask; + ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp); + ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp); + } + ptr->flow_type = flow_type; + ptr++; + (*num)++; + } +} + +/* + * i40e_fdir_info_get - get information of Flow Director + * @pf: ethernet device to get info from + * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with + * the flow director information. + */ +static void +i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint16_t num_flex_set = 0; + uint16_t num_flex_mask = 0; + + if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) + fdir->mode = RTE_FDIR_MODE_PERFECT; + else + fdir->mode = RTE_FDIR_MODE_NONE; + + fdir->guarant_spc = + (uint32_t)hw->func_caps.fd_filters_guaranteed; + fdir->best_spc = + (uint32_t)hw->func_caps.fd_filters_best_effort; + fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN; + fdir->flow_types_mask[0] = I40E_FDIR_FLOWS; + fdir->flex_payload_unit = sizeof(uint16_t); + fdir->flex_bitmask_unit = sizeof(uint16_t); + fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED; + fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF; + fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD; + + i40e_fdir_info_get_flex_set(pf, + fdir->flex_conf.flex_set, + &num_flex_set); + i40e_fdir_info_get_flex_mask(pf, + fdir->flex_conf.flex_mask, + &num_flex_mask); + + fdir->flex_conf.nb_payloads = num_flex_set; + fdir->flex_conf.nb_flexmasks = num_flex_mask; +} + +/* + * i40e_fdir_stat_get - get statistics of Flow Director + * @pf: ethernet device to get info from + * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with + * the flow director statistics. + */ +static void +i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t fdstat; + + fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT); + stat->guarant_cnt = + (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >> + I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT); + stat->best_cnt = + (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> + I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); +} + +static int +i40e_fdir_filter_set(struct rte_eth_dev *dev, + struct rte_eth_fdir_filter_info *info) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret = 0; + + if (!info) { + PMD_DRV_LOG(ERR, "Invalid pointer"); + return -EFAULT; + } + + switch (info->info_type) { + case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT: + ret = i40e_fdir_filter_inset_select(pf, + &(info->info.input_set_conf)); + break; + default: + PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported", + info->info_type); + return -EINVAL; + } + + return ret; +} + +/* + * i40e_fdir_ctrl_func - deal with all operations on flow director. + * @pf: board private structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +int +i40e_fdir_ctrl_func(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + int ret = 0; + + if ((pf->flags & I40E_FLAG_FDIR) == 0) + return -ENOTSUP; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) + return -EINVAL; + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = i40e_add_del_fdir_filter(dev, + (struct rte_eth_fdir_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = i40e_add_del_fdir_filter(dev, + (struct rte_eth_fdir_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_FLUSH: + ret = i40e_fdir_flush(dev); + break; + case RTE_ETH_FILTER_INFO: + i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg); + break; + case RTE_ETH_FILTER_SET: + ret = i40e_fdir_filter_set(dev, + (struct rte_eth_fdir_filter_info *)arg); + break; + case RTE_ETH_FILTER_STATS: + i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} diff --git a/drivers/net/i40e/i40e_logs.h b/drivers/net/i40e/i40e_logs.h new file mode 100644 index 00000000..e042e242 --- /dev/null +++ b/drivers/net/i40e/i40e_logs.h @@ -0,0 +1,77 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _I40E_LOGS_H_ +#define _I40E_LOGS_H_ + +#define PMD_INIT_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args) + +#ifdef RTE_LIBRTE_I40E_DEBUG_INIT +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +#else +#define PMD_INIT_FUNC_TRACE() do { } while(0) +#endif + +#ifdef RTE_LIBRTE_I40E_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_I40E_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) +#else +#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) +#endif + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +#endif /* _I40E_LOGS_H_ */ diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c new file mode 100644 index 00000000..5afd61a0 --- /dev/null +++ b/drivers/net/i40e/i40e_pf.c @@ -0,0 +1,1097 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <stdarg.h> +#include <inttypes.h> + +#include <rte_string_fns.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_memzone.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> + +#include "i40e_logs.h" +#include "base/i40e_prototype.h" +#include "base/i40e_adminq_cmd.h" +#include "base/i40e_type.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" +#include "i40e_pf.h" + +#define I40E_CFG_CRCSTRIP_DEFAULT 1 + +static int +i40e_pf_host_switch_queues(struct i40e_pf_vf *vf, + struct i40e_virtchnl_queue_select *qsel, + bool on); + +/** + * Bind PF queues with VSI and VF. + **/ +static int +i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf) +{ + int i; + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + uint16_t vsi_id = vf->vsi->vsi_id; + uint16_t vf_id = vf->vf_idx; + uint16_t nb_qps = vf->vsi->nb_qps; + uint16_t qbase = vf->vsi->base_queue; + uint16_t q1, q2; + uint32_t val; + + /* + * VF should use scatter range queues. So, it needn't + * to set QBASE in this register. + */ + i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vsi_id), + I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); + + /* Set to enable VFLAN_QTABLE[] registers valid */ + I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id), + I40E_VPLAN_MAPENA_TXRX_ENA_MASK); + + /* map PF queues to VF */ + for (i = 0; i < nb_qps; i++) { + val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK); + I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val); + } + + /* map PF queues to VSI */ + for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) { + if (2 * i > nb_qps - 1) + q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK; + else + q1 = qbase + 2 * i; + + if (2 * i + 1 > nb_qps - 1) + q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK; + else + q2 = qbase + 2 * i + 1; + + val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1; + i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(i, vsi_id), val); + } + I40E_WRITE_FLUSH(hw); + + return I40E_SUCCESS; +} + + +/** + * Proceed VF reset operation. + */ +int +i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset) +{ + uint32_t val, i; + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + uint16_t vf_id, abs_vf_id, vf_msix_num; + int ret; + struct i40e_virtchnl_queue_select qsel; + + if (vf == NULL) + return -EINVAL; + + vf_id = vf->vf_idx; + abs_vf_id = vf_id + hw->func_caps.vf_base_id; + + /* Notify VF that we are in VFR progress */ + I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_INPROGRESS); + + /* + * If require a SW VF reset, a VFLR interrupt will be generated, + * this function will be called again. To avoid it, + * disable interrupt first. + */ + if (do_hw_reset) { + vf->state = I40E_VF_INRESET; + val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id)); + val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; + I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val); + I40E_WRITE_FLUSH(hw); + } + +#define VFRESET_MAX_WAIT_CNT 100 + /* Wait until VF reset is done */ + for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) { + rte_delay_us(10); + val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id)); + if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK) + break; + } + + if (i >= VFRESET_MAX_WAIT_CNT) { + PMD_DRV_LOG(ERR, "VF reset timeout"); + return -ETIMEDOUT; + } + + /* This is not first time to do reset, do cleanup job first */ + if (vf->vsi) { + /* Disable queues */ + memset(&qsel, 0, sizeof(qsel)); + for (i = 0; i < vf->vsi->nb_qps; i++) + qsel.rx_queues |= 1 << i; + qsel.tx_queues = qsel.rx_queues; + ret = i40e_pf_host_switch_queues(vf, &qsel, false); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Disable VF queues failed"); + return -EFAULT; + } + + /* Disable VF interrupt setting */ + vf_msix_num = hw->func_caps.num_msix_vectors_vf; + for (i = 0; i < vf_msix_num; i++) { + if (!i) + val = I40E_VFINT_DYN_CTL0(vf_id); + else + val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) * + (vf_id)) + (i - 1)); + I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); + } + I40E_WRITE_FLUSH(hw); + + /* remove VSI */ + ret = i40e_vsi_release(vf->vsi); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Release VSI failed"); + return -EFAULT; + } + } + +#define I40E_VF_PCI_ADDR 0xAA +#define I40E_VF_PEND_MASK 0x20 + /* Check the pending transactions of this VF */ + /* Use absolute VF id, refer to datasheet for details */ + I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR | + (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); + for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) { + rte_delay_us(1); + val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD); + if ((val & I40E_VF_PEND_MASK) == 0) + break; + } + + if (i >= VFRESET_MAX_WAIT_CNT) { + PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout"); + return -ETIMEDOUT; + } + + /* Reset done, Set COMPLETE flag and clear reset bit */ + I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_COMPLETED); + val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id)); + val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; + I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val); + vf->reset_cnt++; + I40E_WRITE_FLUSH(hw); + + /* Allocate resource again */ + vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV, + vf->pf->main_vsi, vf->vf_idx); + if (vf->vsi == NULL) { + PMD_DRV_LOG(ERR, "Add vsi failed"); + return -EFAULT; + } + + ret = i40e_pf_vf_queues_mapping(vf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "queue mapping error"); + i40e_vsi_release(vf->vsi); + return -EFAULT; + } + + return ret; +} + +static int +i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf, + uint32_t opcode, + uint32_t retval, + uint8_t *msg, + uint16_t msglen) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx; + int ret; + + ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval, + msg, msglen, NULL); + if (ret) { + PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u", + hw->aq.asq_last_status); + } + + return ret; +} + +static void +i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf) +{ + struct i40e_virtchnl_version_info info; + + info.major = I40E_DPDK_VERSION_MAJOR; + info.minor = I40E_DPDK_VERSION_MINOR; + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, + I40E_SUCCESS, (uint8_t *)&info, sizeof(info)); +} + +static int +i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf) +{ + i40e_pf_host_vf_reset(vf, 1); + + /* No feedback will be sent to VF for VFLR */ + return I40E_SUCCESS; +} + +static int +i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf) +{ + struct i40e_virtchnl_vf_resource *vf_res = NULL; + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + uint32_t len = 0; + int ret = I40E_SUCCESS; + + /* only have 1 VSI by default */ + len = sizeof(struct i40e_virtchnl_vf_resource) + + I40E_DEFAULT_VF_VSI_NUM * + sizeof(struct i40e_virtchnl_vsi_resource); + + vf_res = rte_zmalloc("i40e_vf_res", len, 0); + if (vf_res == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate mem"); + ret = I40E_ERR_NO_MEMORY; + vf_res = NULL; + len = 0; + goto send_msg; + } + + vf_res->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 | + I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf; + vf_res->num_queue_pairs = vf->vsi->nb_qps; + vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM; + + /* Change below setting if PF host can support more VSIs for VF */ + vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV; + /* As assume Vf only has single VSI now, always return 0 */ + vf_res->vsi_res[0].vsi_id = 0; + vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps; + ether_addr_copy(&vf->mac_addr, + (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + ret, (uint8_t *)vf_res, len); + rte_free(vf_res); + + return ret; +} + +static int +i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw, + struct i40e_pf_vf *vf, + struct i40e_virtchnl_rxq_info *rxq, + uint8_t crcstrip) +{ + int err = I40E_SUCCESS; + struct i40e_hmc_obj_rxq rx_ctx; + uint16_t abs_queue_id = vf->vsi->base_queue + rxq->queue_id; + + /* Clear the context structure first */ + memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); + rx_ctx.dbuff = rxq->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; + rx_ctx.hbuff = rxq->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; + rx_ctx.base = rxq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT; + rx_ctx.qlen = rxq->ring_len; +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + rx_ctx.dsize = 1; +#endif + + if (rxq->splithdr_enabled) { + rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL; + rx_ctx.dtype = i40e_header_split_enabled; + } else { + rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE; + rx_ctx.dtype = i40e_header_split_none; + } + rx_ctx.rxmax = rxq->max_pkt_size; + rx_ctx.tphrdesc_ena = 1; + rx_ctx.tphwdesc_ena = 1; + rx_ctx.tphdata_ena = 1; + rx_ctx.tphhead_ena = 1; + rx_ctx.lrxqthresh = 2; + rx_ctx.crcstrip = crcstrip; + rx_ctx.l2tsel = 1; + rx_ctx.prefena = 1; + + err = i40e_clear_lan_rx_queue_context(hw, abs_queue_id); + if (err != I40E_SUCCESS) + return err; + err = i40e_set_lan_rx_queue_context(hw, abs_queue_id, &rx_ctx); + + return err; +} + +static int +i40e_pf_host_hmc_config_txq(struct i40e_hw *hw, + struct i40e_pf_vf *vf, + struct i40e_virtchnl_txq_info *txq) +{ + int err = I40E_SUCCESS; + struct i40e_hmc_obj_txq tx_ctx; + uint32_t qtx_ctl; + uint16_t abs_queue_id = vf->vsi->base_queue + txq->queue_id; + + + /* clear the context structure first */ + memset(&tx_ctx, 0, sizeof(tx_ctx)); + tx_ctx.new_context = 1; + tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT; + tx_ctx.qlen = txq->ring_len; + tx_ctx.rdylist = rte_le_to_cpu_16(vf->vsi->info.qs_handle[0]); + err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id); + if (err != I40E_SUCCESS) + return err; + + err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx); + if (err != I40E_SUCCESS) + return err; + + /* bind queue with VF function, since TX/QX will appear in pair, + * so only has QTX_CTL to set. + */ + qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) | + ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & + I40E_QTX_CTL_PF_INDX_MASK) | + (((vf->vf_idx + hw->func_caps.vf_base_id) << + I40E_QTX_CTL_VFVM_INDX_SHIFT) & + I40E_QTX_CTL_VFVM_INDX_MASK); + I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl); + I40E_WRITE_FLUSH(hw); + + return I40E_SUCCESS; +} + +static int +i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + struct i40e_vsi *vsi = vf->vsi; + struct i40e_virtchnl_vsi_queue_config_info *vc_vqci = + (struct i40e_virtchnl_vsi_queue_config_info *)msg; + struct i40e_virtchnl_queue_pair_info *vc_qpi; + int i, ret = I40E_SUCCESS; + + if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps || + vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP || + msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, + vc_vqci->num_queue_pairs)) { + PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + vc_qpi = vc_vqci->qpair; + for (i = 0; i < vc_vqci->num_queue_pairs; i++) { + if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 || + vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) { + ret = I40E_ERR_PARAM; + goto send_msg; + } + + /* + * Apply VF RX queue setting to HMC. + * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, + * then the extra information of + * 'struct i40e_virtchnl_queue_pair_extra_info' is needed, + * otherwise set the last parameter to NULL. + */ + if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq, + I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Configure RX queue HMC failed"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + /* Apply VF TX queue setting to HMC */ + if (i40e_pf_host_hmc_config_txq(hw, vf, + &vc_qpi[i].txq) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Configure TX queue HMC failed"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + } + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + struct i40e_vsi *vsi = vf->vsi; + struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei = + (struct i40e_virtchnl_vsi_queue_config_ext_info *)msg; + struct i40e_virtchnl_queue_pair_ext_info *vc_qpei; + int i, ret = I40E_SUCCESS; + + if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps || + vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP || + msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, + vc_vqcei->num_queue_pairs)) { + PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong\n"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + vc_qpei = vc_vqcei->qpair; + for (i = 0; i < vc_vqcei->num_queue_pairs; i++) { + if (vc_qpei[i].rxq.queue_id > vsi->nb_qps - 1 || + vc_qpei[i].txq.queue_id > vsi->nb_qps - 1) { + ret = I40E_ERR_PARAM; + goto send_msg; + } + /* + * Apply VF RX queue setting to HMC. + * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, + * then the extra information of + * 'struct i40e_virtchnl_queue_pair_ext_info' is needed, + * otherwise set the last parameter to NULL. + */ + if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq, + vc_qpei[i].rxq_ext.crcstrip) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Configure RX queue HMC failed"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + /* Apply VF TX queue setting to HMC */ + if (i40e_pf_host_hmc_config_txq(hw, vf, &vc_qpei[i].txq) != + I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Configure TX queue HMC failed"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + } + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf, + uint8_t *msg, uint16_t msglen) +{ + int ret = I40E_SUCCESS; + struct i40e_virtchnl_irq_map_info *irqmap = + (struct i40e_virtchnl_irq_map_info *)msg; + + if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) { + PMD_DRV_LOG(ERR, "buffer too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + /* Assume VF only have 1 vector to bind all queues */ + if (irqmap->num_vectors != 1) { + PMD_DRV_LOG(ERR, "DKDK host only support 1 vector"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + /* This MSIX intr store the intr in VF range */ + vf->vsi->msix_intr = irqmap->vecmap[0].vector_id; + vf->vsi->nb_msix = irqmap->num_vectors; + vf->vsi->nb_used_qps = vf->vsi->nb_qps; + + /* Don't care how the TX/RX queue mapping with this vector. + * Link all VF RX queues together. Only did mapping work. + * VF can disable/enable the intr by itself. + */ + i40e_vsi_queues_bind_intr(vf->vsi); +send_msg: + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_switch_queues(struct i40e_pf_vf *vf, + struct i40e_virtchnl_queue_select *qsel, + bool on) +{ + int ret = I40E_SUCCESS; + int i; + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + uint16_t baseq = vf->vsi->base_queue; + + if (qsel->rx_queues + qsel->tx_queues == 0) + return I40E_ERR_PARAM; + + /* always enable RX first and disable last */ + /* Enable RX if it's enable */ + if (on) { + for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++) + if (qsel->rx_queues & (1 << i)) { + ret = i40e_switch_rx_queue(hw, baseq + i, on); + if (ret != I40E_SUCCESS) + return ret; + } + } + + /* Enable/Disable TX */ + for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++) + if (qsel->tx_queues & (1 << i)) { + ret = i40e_switch_tx_queue(hw, baseq + i, on); + if (ret != I40E_SUCCESS) + return ret; + } + + /* disable RX last if it's disable */ + if (!on) { + /* disable RX */ + for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++) + if (qsel->rx_queues & (1 << i)) { + ret = i40e_switch_rx_queue(hw, baseq + i, on); + if (ret != I40E_SUCCESS) + return ret; + } + } + + return ret; +} + +static int +i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen) +{ + int ret = I40E_SUCCESS; + struct i40e_virtchnl_queue_select *q_sel = + (struct i40e_virtchnl_queue_select *)msg; + + if (msg == NULL || msglen != sizeof(*q_sel)) { + ret = I40E_ERR_PARAM; + goto send_msg; + } + ret = i40e_pf_host_switch_queues(vf, q_sel, true); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen) +{ + int ret = I40E_SUCCESS; + struct i40e_virtchnl_queue_select *q_sel = + (struct i40e_virtchnl_queue_select *)msg; + + if (msg == NULL || msglen != sizeof(*q_sel)) { + ret = I40E_ERR_PARAM; + goto send_msg; + } + ret = i40e_pf_host_switch_queues(vf, q_sel, false); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, + ret, NULL, 0); + + return ret; +} + + +static int +i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen) +{ + int ret = I40E_SUCCESS; + struct i40e_virtchnl_ether_addr_list *addr_list = + (struct i40e_virtchnl_ether_addr_list *)msg; + struct i40e_mac_filter_info filter; + int i; + struct ether_addr *mac; + + memset(&filter, 0 , sizeof(struct i40e_mac_filter_info)); + + if (msg == NULL || msglen <= sizeof(*addr_list)) { + PMD_DRV_LOG(ERR, "add_ether_address argument too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + for (i = 0; i < addr_list->num_elements; i++) { + mac = (struct ether_addr *)(addr_list->list[i].addr); + (void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN); + filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + if(!is_valid_assigned_ether_addr(mac) || + i40e_vsi_add_mac(vf->vsi, &filter)) { + ret = I40E_ERR_INVALID_MAC_ADDR; + goto send_msg; + } + } + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen) +{ + int ret = I40E_SUCCESS; + struct i40e_virtchnl_ether_addr_list *addr_list = + (struct i40e_virtchnl_ether_addr_list *)msg; + int i; + struct ether_addr *mac; + + if (msg == NULL || msglen <= sizeof(*addr_list)) { + PMD_DRV_LOG(ERR, "delete_ether_address argument too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + for (i = 0; i < addr_list->num_elements; i++) { + mac = (struct ether_addr *)(addr_list->list[i].addr); + if(!is_valid_assigned_ether_addr(mac) || + i40e_vsi_delete_mac(vf->vsi, mac)) { + ret = I40E_ERR_INVALID_MAC_ADDR; + goto send_msg; + } + } + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf, + uint8_t *msg, uint16_t msglen) +{ + int ret = I40E_SUCCESS; + struct i40e_virtchnl_vlan_filter_list *vlan_filter_list = + (struct i40e_virtchnl_vlan_filter_list *)msg; + int i; + uint16_t *vid; + + if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) { + PMD_DRV_LOG(ERR, "add_vlan argument too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + vid = vlan_filter_list->vlan_id; + + for (i = 0; i < vlan_filter_list->num_elements; i++) { + ret = i40e_vsi_add_vlan(vf->vsi, vid[i]); + if(ret != I40E_SUCCESS) + goto send_msg; + } + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen) +{ + int ret = I40E_SUCCESS; + struct i40e_virtchnl_vlan_filter_list *vlan_filter_list = + (struct i40e_virtchnl_vlan_filter_list *)msg; + int i; + uint16_t *vid; + + if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) { + PMD_DRV_LOG(ERR, "delete_vlan argument too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + vid = vlan_filter_list->vlan_id; + for (i = 0; i < vlan_filter_list->num_elements; i++) { + ret = i40e_vsi_delete_vlan(vf->vsi, vid[i]); + if(ret != I40E_SUCCESS) + goto send_msg; + } + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_config_promisc_mode( + struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen) +{ + int ret = I40E_SUCCESS; + struct i40e_virtchnl_promisc_info *promisc = + (struct i40e_virtchnl_promisc_info *)msg; + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + bool unicast = FALSE, multicast = FALSE; + + if (msg == NULL || msglen != sizeof(*promisc)) { + ret = I40E_ERR_PARAM; + goto send_msg; + } + + if (promisc->flags & I40E_FLAG_VF_UNICAST_PROMISC) + unicast = TRUE; + ret = i40e_aq_set_vsi_unicast_promiscuous(hw, + vf->vsi->seid, unicast, NULL); + if (ret != I40E_SUCCESS) + goto send_msg; + + if (promisc->flags & I40E_FLAG_VF_MULTICAST_PROMISC) + multicast = TRUE; + ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid, + multicast, NULL); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf) +{ + i40e_update_vsi_stats(vf->vsi); + + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, + I40E_SUCCESS, (uint8_t *)&vf->vsi->eth_stats, + sizeof(vf->vsi->eth_stats)); + + return I40E_SUCCESS; +} + +static void +i40e_pf_host_process_cmd_get_link_status(struct i40e_pf_vf *vf) +{ + struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vf->pf->main_vsi); + + /* Update link status first to acquire latest link change */ + i40e_dev_link_update(dev, 1); + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_LINK_STAT, + I40E_SUCCESS, (uint8_t *)&dev->data->dev_link, + sizeof(struct rte_eth_link)); +} + +static int +i40e_pf_host_process_cmd_cfg_vlan_offload( + struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen) +{ + int ret = I40E_SUCCESS; + struct i40e_virtchnl_vlan_offload_info *offload = + (struct i40e_virtchnl_vlan_offload_info *)msg; + + if (msg == NULL || msglen != sizeof(*offload)) { + ret = I40E_ERR_PARAM; + goto send_msg; + } + + ret = i40e_vsi_config_vlan_stripping(vf->vsi, + !!offload->enable_vlan_strip); + if (ret != 0) + PMD_DRV_LOG(ERR, "Failed to configure vlan stripping"); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen) +{ + int ret = I40E_SUCCESS; + struct i40e_virtchnl_pvid_info *tpid_info = + (struct i40e_virtchnl_pvid_info *)msg; + + if (msg == NULL || msglen != sizeof(*tpid_info)) { + ret = I40E_ERR_PARAM; + goto send_msg; + } + + ret = i40e_vsi_vlan_pvid_set(vf->vsi, &tpid_info->info); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_PVID, + ret, NULL, 0); + + return ret; +} + +void +i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, + uint16_t abs_vf_id, uint32_t opcode, + __rte_unused uint32_t retval, + uint8_t *msg, + uint16_t msglen) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf_vf *vf; + /* AdminQ will pass absolute VF id, transfer to internal vf id */ + uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id; + + if (!dev || vf_id > pf->vf_num - 1 || !pf->vfs) { + PMD_DRV_LOG(ERR, "invalid argument"); + return; + } + + vf = &pf->vfs[vf_id]; + if (!vf->vsi) { + PMD_DRV_LOG(ERR, "NO VSI associated with VF found"); + i40e_pf_host_send_msg_to_vf(vf, opcode, + I40E_ERR_NO_AVAILABLE_VSI, NULL, 0); + return; + } + + switch (opcode) { + case I40E_VIRTCHNL_OP_VERSION : + PMD_DRV_LOG(INFO, "OP_VERSION received"); + i40e_pf_host_process_cmd_version(vf); + break; + case I40E_VIRTCHNL_OP_RESET_VF : + PMD_DRV_LOG(INFO, "OP_RESET_VF received"); + i40e_pf_host_process_cmd_reset_vf(vf); + break; + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received"); + i40e_pf_host_process_cmd_get_vf_resource(vf); + break; + case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: + PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received"); + i40e_pf_host_process_cmd_config_vsi_queues(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT: + PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received"); + i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg, + msglen); + break; + case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received"); + i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received"); + i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received"); + i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received"); + i40e_pf_host_process_cmd_add_ether_address(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received"); + i40e_pf_host_process_cmd_del_ether_address(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_ADD_VLAN: + PMD_DRV_LOG(INFO, "OP_ADD_VLAN received"); + i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_DEL_VLAN: + PMD_DRV_LOG(INFO, "OP_DEL_VLAN received"); + i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received"); + i40e_pf_host_process_cmd_config_promisc_mode(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_GET_STATS: + PMD_DRV_LOG(INFO, "OP_GET_STATS received"); + i40e_pf_host_process_cmd_get_stats(vf); + break; + case I40E_VIRTCHNL_OP_GET_LINK_STAT: + PMD_DRV_LOG(INFO, "OP_GET_LINK_STAT received"); + i40e_pf_host_process_cmd_get_link_status(vf); + break; + case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD: + PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received"); + i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_CFG_VLAN_PVID: + PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received"); + i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen); + break; + /* Don't add command supported below, which will + * return an error code. + */ + case I40E_VIRTCHNL_OP_FCOE: + PMD_DRV_LOG(ERR, "OP_FCOE received, not supported"); + default: + PMD_DRV_LOG(ERR, "%u received, not supported", opcode); + i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM, + NULL, 0); + break; + } +} + +int +i40e_pf_host_init(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + int ret, i; + uint32_t val; + + PMD_INIT_FUNC_TRACE(); + + /** + * return if SRIOV not enabled, VF number not configured or + * no queue assigned. + */ + if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0) + return I40E_SUCCESS; + + /* Allocate memory to store VF structure */ + pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0); + if(pf->vfs == NULL) + return -ENOMEM; + + /* Disable irq0 for VFR event */ + i40e_pf_disable_irq0(hw); + + /* Disable VF link status interrupt */ + val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM); + val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; + I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val); + I40E_WRITE_FLUSH(hw); + + for (i = 0; i < pf->vf_num; i++) { + pf->vfs[i].pf = pf; + pf->vfs[i].state = I40E_VF_INACTIVE; + pf->vfs[i].vf_idx = i; + ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0); + if (ret != I40E_SUCCESS) + goto fail; + eth_random_addr(pf->vfs[i].mac_addr.addr_bytes); + } + + /* restore irq0 */ + i40e_pf_enable_irq0(hw); + + return I40E_SUCCESS; + +fail: + rte_free(pf->vfs); + i40e_pf_enable_irq0(hw); + + return ret; +} + +int +i40e_pf_host_uninit(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t val; + + PMD_INIT_FUNC_TRACE(); + + /** + * return if SRIOV not enabled, VF number not configured or + * no queue assigned. + */ + if ((!hw->func_caps.sr_iov_1_1) || + (pf->vf_num == 0) || + (pf->vf_nb_qps == 0)) + return I40E_SUCCESS; + + /* free memory to store VF structure */ + rte_free(pf->vfs); + pf->vfs = NULL; + + /* Disable irq0 for VFR event */ + i40e_pf_disable_irq0(hw); + + /* Disable VF link status interrupt */ + val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM); + val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; + I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val); + I40E_WRITE_FLUSH(hw); + + return I40E_SUCCESS; +} diff --git a/drivers/net/i40e/i40e_pf.h b/drivers/net/i40e/i40e_pf.h new file mode 100644 index 00000000..9c01829a --- /dev/null +++ b/drivers/net/i40e/i40e_pf.h @@ -0,0 +1,128 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _I40E_PF_H_ +#define _I40E_PF_H_ + +/* VERSION info to exchange between VF and PF host. In case VF works with + * ND kernel driver, it reads I40E_VIRTCHNL_VERSION_MAJOR/MINOR. In + * case works with DPDK host, it reads version below. Then VF realize who it + * is talking to and use proper language to communicate. + * */ +#define I40E_DPDK_SIGNATURE ('D' << 24 | 'P' << 16 | 'D' << 8 | 'K') +#define I40E_DPDK_VERSION_MAJOR I40E_DPDK_SIGNATURE +#define I40E_DPDK_VERSION_MINOR 0 + +/* Default setting on number of VSIs that VF can contain */ +#define I40E_DEFAULT_VF_VSI_NUM 1 + +#define I40E_DPDK_OFFSET 0x100 + +enum i40e_pf_vfr_state { + I40E_PF_VFR_INPROGRESS = 0, + I40E_PF_VFR_COMPLETED = 1, +}; + +/* DPDK pf driver specific command to VF */ +enum i40e_virtchnl_ops_dpdk { + /* + * Keep some gap between Linux PF commands and + * DPDK PF extended commands. + */ + I40E_VIRTCHNL_OP_GET_LINK_STAT = I40E_VIRTCHNL_OP_VERSION + + I40E_DPDK_OFFSET, + I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD, + I40E_VIRTCHNL_OP_CFG_VLAN_PVID, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, +}; + +/* A structure to support extended info of a receive queue. */ +struct i40e_virtchnl_rxq_ext_info { + uint8_t crcstrip; +}; + +/* + * A structure to support extended info of queue pairs, an additional field + * is added, comparing to original 'struct i40e_virtchnl_queue_pair_info'. + */ +struct i40e_virtchnl_queue_pair_ext_info { + /* vsi_id and queue_id should be identical for both rx and tx queues.*/ + struct i40e_virtchnl_txq_info txq; + struct i40e_virtchnl_rxq_info rxq; + struct i40e_virtchnl_rxq_ext_info rxq_ext; +}; + +/* + * A structure to support extended info of VSI queue pairs, + * 'struct i40e_virtchnl_queue_pair_ext_info' is used, see its original + * of 'struct i40e_virtchnl_queue_pair_info'. + */ +struct i40e_virtchnl_vsi_queue_config_ext_info { + uint16_t vsi_id; + uint16_t num_queue_pairs; + struct i40e_virtchnl_queue_pair_ext_info qpair[0]; +}; + +struct i40e_virtchnl_vlan_offload_info { + uint16_t vsi_id; + uint8_t enable_vlan_strip; + uint8_t reserved; +}; + +/* + * Macro to calculate the memory size for configuring VSI queues + * via virtual channel. + */ +#define I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(x, n) \ + (sizeof(*(x)) + sizeof((x)->qpair[0]) * (n)) + +/* + * I40E_VIRTCHNL_OP_CFG_VLAN_PVID + * VF sends this message to enable/disable pvid. If it's + * enable op, needs to specify the pvid. PF returns status + * code in retval. + */ +struct i40e_virtchnl_pvid_info { + uint16_t vsi_id; + struct i40e_vsi_vlan_pvid_info info; +}; + +int i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset); +void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, + uint16_t abs_vf_id, uint32_t opcode, + __rte_unused uint32_t retval, + uint8_t *msg, uint16_t msglen); +int i40e_pf_host_init(struct rte_eth_dev *dev); +int i40e_pf_host_uninit(struct rte_eth_dev *dev); + +#endif /* _I40E_PF_H_ */ diff --git a/drivers/net/i40e/i40e_regs.h b/drivers/net/i40e/i40e_regs.h new file mode 100644 index 00000000..472c7a06 --- /dev/null +++ b/drivers/net/i40e/i40e_regs.h @@ -0,0 +1,997 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +struct i40e_reg_info { + uint32_t base_addr; + uint32_t count1; + uint32_t stride1; + uint32_t count2; + uint32_t stride2; + const char *name; +}; + +static const struct i40e_reg_info i40e_regs_adminq[] = { + {I40E_VFQF_HENA(0), 1, 4, 0, 0, "VFQF_HENA"}, + {I40E_VFQF_HKEY(0), 12, 4, 0, 0, "VFQF_HKEY"}, + {I40E_VFQF_HREGION(0), 7, 4, 0, 0, "VFQF_HREGION"}, + {I40E_VPQF_CTL(0), 127, 4, 0, 0, "VPQF_CTL"}, + {I40E_PFLAN_QALLOC, 0, 0, 0, 0, "PFLAN_QALLOC"}, + {I40E_PFQF_CTL_0, 0, 0, 0, 0, "PFQF_CTL_0"}, + {I40E_VSILAN_QTABLE(0, 0), 7, 2048, 383, 4, "VSILAN_QTABLE"}, + {I40E_VSIQF_TCREGION(0, 0), 3, 2048, 383, 4, "VSIQF_TCREGION"}, + {I40E_VSILAN_QBASE(0), 383, 4, 0, 0, "VSILAN_QBASE"}, + {I40E_VSIQF_CTL(0), 383, 4, 0, 0, "VSIQF_CTL"}, + {I40E_PFQF_HKEY(0), 12, 128, 0, 0, "PFQF_HKEY"}, + {I40E_PFQF_HREGION(0), 7, 128, 0, 0, "PFQF_HREGION"}, + {I40E_PFQF_HENA(0), 1, 128, 0, 0, "PFQF_HENA"}, + {I40E_PFQF_FDALLOC, 0, 0, 0, 0, "PFQF_FDALLOC"}, + {I40E_PRTQF_FD_INSET(0, 0), 63, 64, 1, 32, "PRTQF_FD_INSET"}, + {I40E_PRTQF_FD_INSET(0, 0), 63, 64, 1, 32, "PRTQF_FD_INSET"}, + {I40E_PRTQF_FD_MSK(0, 0), 63, 64, 1, 32, "PRTQF_FD_MSK"}, + {I40E_PRTQF_FD_FLXINSET(0), 63, 32, 0, 0, "PRTQF_FD_FLXINSET"}, + {I40E_PRTQF_CTL_0, 0, 0, 0, 0, "PRTQF_CTL_0"}, + {I40E_GLQF_FD_MSK(0, 0), 1, 4, 63, 8, "GLQF_FD_MSK"}, + {I40E_GLQF_HASH_INSET(0, 0), 1, 4, 63, 8, "GLQF_HASH_INSET"}, + {I40E_GLQF_HASH_MSK(0, 0), 1, 4, 63, 8, "GLQF_HASH_MSK"}, + {I40E_GLQF_SWAP(0, 0), 1, 4, 63, 8, "GLQF_SWAP"}, + {I40E_GLFCOE_RCTL, 0, 0, 0, 0, "GLFCOE_RCTL"}, + {I40E_GLQF_CTL, 0, 0, 0, 0, "GLQF_CTL"}, + {I40E_GLQF_HSYM(0), 63, 4, 0, 0, "GLQF_HSYM"}, + {0, 0, 0, 0, 0, NULL} +}; + +static const struct i40e_reg_info i40e_regs_others[] = { + {I40E_QTX_TAIL1(0), 15, 4, 0, 0, "QTX_TAIL1"}, + {I40E_VFPE_CQPDB(0), 127, 4, 0, 0, "VFPE_CQPDB"}, + {I40E_VFPE_CQPTAIL(0), 127, 4, 0, 0, "VFPE_CQPTAIL"}, + {I40E_VFPE_CCQPSTATUS(0), 127, 4, 0, 0, "VFPE_CCQPSTATUS"}, + {I40E_VFPE_CCQPLOW(0), 127, 4, 0, 0, "VFPE_CCQPLOW"}, + {I40E_VFPE_CCQPHIGH(0), 127, 4, 0, 0, "VFPE_CCQPHIGH"}, + {I40E_VFPE_IPCONFIG0(0), 127, 4, 0, 0, "VFPE_IPCONFIG0"}, + {I40E_VFPE_CQPERRCODES(0), 127, 4, 0, 0, "VFPE_CQPERRCODES"}, + {I40E_QRX_TAIL1(0), 15, 4, 0, 0, "QRX_TAIL1"}, + {I40E_VFINT_ITRN1(0, 0), 2, 64, 15, 4, "VFINT_ITRN1"}, + {I40E_VFPE_TCPNOWTIMER(0), 127, 4, 0, 0, "VFPE_TCPNOWTIMER"}, + {I40E_VFPE_MRTEIDXMASK(0), 127, 4, 0, 0, "VFPE_MRTEIDXMASK"}, + {I40E_VFPE_RCVUNEXPECTEDERROR(0), 127, 4, 0, 0, + "VFPE_RCVUNEXPECTEDERROR"}, + {I40E_VFINT_DYN_CTLN1(0), 15, 4, 0, 0, "VFINT_DYN_CTLN1"}, + {I40E_VFINT_ICR01, 0, 0, 0, 0, "VFINT_ICR01"}, + {I40E_VFINT_ITR01(0), 2, 4, 0, 0, "VFINT_ITR01"}, + {I40E_VFINT_ICR0_ENA1, 0, 0, 0, 0, "VFINT_ICR0_ENA1"}, + {I40E_VFINT_STAT_CTL01, 0, 0, 0, 0, "VFINT_STAT_CTL01"}, + {I40E_VFINT_DYN_CTL01, 0, 0, 0, 0, "VFINT_DYN_CTL01"}, + {I40E_VF_ARQBAH1, 0, 0, 0, 0, "VF_ARQBAH1"}, + {I40E_VF_ATQH1, 0, 0, 0, 0, "VF_ATQH1"}, + {I40E_VF_ATQLEN1, 0, 0, 0, 0, "VF_ATQLEN1"}, + {I40E_VF_ARQBAL1, 0, 0, 0, 0, "VF_ARQBAL1"}, + {I40E_VF_ARQT1, 0, 0, 0, 0, "VF_ARQT1"}, + {I40E_VF_ARQH1, 0, 0, 0, 0, "VF_ARQH1"}, + {I40E_VF_ATQBAH1, 0, 0, 0, 0, "VF_ATQBAH1"}, + {I40E_VF_ATQBAL1, 0, 0, 0, 0, "VF_ATQBAL1"}, + {I40E_VF_ARQLEN1, 0, 0, 0, 0, "VF_ARQLEN1"}, + {I40E_PFPE_CQPDB, 0, 0, 0, 0, "PFPE_CQPDB"}, + {I40E_PFPE_CQPTAIL, 0, 0, 0, 0, "PFPE_CQPTAIL"}, + {I40E_PFPE_CCQPSTATUS, 0, 0, 0, 0, "PFPE_CCQPSTATUS"}, + {I40E_PFPE_CCQPLOW, 0, 0, 0, 0, "PFPE_CCQPLOW"}, + {I40E_PFPE_CCQPHIGH, 0, 0, 0, 0, "PFPE_CCQPHIGH"}, + {I40E_PFPE_IPCONFIG0, 0, 0, 0, 0, "PFPE_IPCONFIG0"}, + {I40E_VF_ATQT1, 0, 0, 0, 0, "VF_ATQT1"}, + {I40E_PFPE_TCPNOWTIMER, 0, 0, 0, 0, "PFPE_TCPNOWTIMER"}, + {I40E_PFPE_MRTEIDXMASK, 0, 0, 0, 0, "PFPE_MRTEIDXMASK"}, + {I40E_PFPE_RCVUNEXPECTEDERROR, 0, 0, 0, 0, "PFPE_RCVUNEXPECTEDERROR"}, + {I40E_PFPE_UDACTRL, 0, 0, 0, 0, "PFPE_UDACTRL"}, + {I40E_PFPE_UDAUCFBQPN, 0, 0, 0, 0, "PFPE_UDAUCFBQPN"}, + {I40E_VFGEN_RSTAT, 0, 0, 0, 0, "VFGEN_RSTAT"}, + {I40E_PFPE_CQPERRCODES, 0, 0, 0, 0, "PFPE_CQPERRCODES"}, + {I40E_PFPE_FLMXMITALLOCERR, 0, 0, 0, 0, "PFPE_FLMXMITALLOCERR"}, + {I40E_PFPE_FLMQ1ALLOCERR, 0, 0, 0, 0, "PFPE_FLMQ1ALLOCERR"}, + {I40E_VFPE_IPCONFIG01, 0, 0, 0, 0, "VFPE_IPCONFIG01"}, + {I40E_VFPE_MRTEIDXMASK1, 0, 0, 0, 0, "VFPE_MRTEIDXMASK1"}, + {I40E_VFPE_RCVUNEXPECTEDERROR1, 0, 0, 0, 0, "VFPE_RCVUNEXPECTEDERROR1"}, + {I40E_VFPE_CCQPHIGH1, 0, 0, 0, 0, "VFPE_CCQPHIGH1"}, + {I40E_VFPE_CQPERRCODES1, 0, 0, 0, 0, "VFPE_CQPERRCODES1"}, + {I40E_VFPE_CQPTAIL1, 0, 0, 0, 0, "VFPE_CQPTAIL1"}, + {I40E_VFPE_AEQALLOC1, 0, 0, 0, 0, "VFPE_AEQALLOC1"}, + {I40E_VFPE_TCPNOWTIMER1, 0, 0, 0, 0, "VFPE_TCPNOWTIMER1"}, + {I40E_VFPE_CCQPLOW1, 0, 0, 0, 0, "VFPE_CCQPLOW1"}, + {I40E_VFPE_CQACK1, 0, 0, 0, 0, "VFPE_CQACK1"}, + {I40E_VFPE_CQARM1, 0, 0, 0, 0, "VFPE_CQARM1"}, + {I40E_VFPE_CCQPSTATUS1, 0, 0, 0, 0, "VFPE_CCQPSTATUS1"}, + {I40E_VFPE_CQPDB1, 0, 0, 0, 0, "VFPE_CQPDB1"}, + {I40E_GLPE_VFUDACTRL(0), 31, 4, 0, 0, "GLPE_VFUDACTRL"}, + {I40E_VFPE_WQEALLOC1, 0, 0, 0, 0, "VFPE_WQEALLOC1"}, + {I40E_GLPE_VFUDAUCFBQPN(0), 31, 4, 0, 0, "GLPE_VFUDAUCFBQPN"}, + {I40E_GLPE_VFFLMXMITALLOCERR(0), 31, 4, 0, 0, "GLPE_VFFLMXMITALLOCERR"}, + {I40E_GLPE_VFFLMQ1ALLOCERR(0), 31, 4, 0, 0, "GLPE_VFFLMQ1ALLOCERR"}, + {I40E_VFQF_HLUT(0), 15, 4, 0, 0, "VFQF_HLUT"}, + {I40E_GLPE_CPUSTATUS0, 0, 0, 0, 0, "GLPE_CPUSTATUS0"}, + {I40E_GLPE_CPUSTATUS1, 0, 0, 0, 0, "GLPE_CPUSTATUS1"}, + {I40E_GLPE_CPUSTATUS2, 0, 0, 0, 0, "GLPE_CPUSTATUS2"}, + {I40E_GLPE_CPUTRIG0, 0, 0, 0, 0, "GLPE_CPUTRIG0"}, + {I40E_GLPE_VFFLMOBJCTRL(0), 31, 4, 0, 0, "GLPE_VFFLMOBJCTRL"}, + {I40E_VFCM_PE_ERRINFO, 0, 0, 0, 0, "VFCM_PE_ERRINFO"}, + {I40E_GLPE_RUPM_GCTL, 0, 0, 0, 0, "GLPE_RUPM_GCTL"}, + {I40E_GLPE_DUAL40_RUPM, 0, 0, 0, 0, "GLPE_DUAL40_RUPM"}, + {I40E_GLPE_RUPM_TXHOST_EN, 0, 0, 0, 0, "GLPE_RUPM_TXHOST_EN"}, + {I40E_PRTPE_RUPM_THRES, 0, 0, 0, 0, "PRTPE_RUPM_THRES"}, + {I40E_PRTPE_RUPM_CTL, 0, 0, 0, 0, "PRTPE_RUPM_CTL"}, + {I40E_PRTPE_RUPM_PFCCTL, 0, 0, 0, 0, "PRTPE_RUPM_PFCCTL"}, + {I40E_PRTPE_RUPM_PFCPC, 0, 0, 0, 0, "PRTPE_RUPM_PFCPC"}, + {I40E_PRTPE_RUPM_PFCTCC, 0, 0, 0, 0, "PRTPE_RUPM_PFCTCC"}, + {I40E_GLPE_RUPM_PUSHPOOL, 0, 0, 0, 0, "GLPE_RUPM_PUSHPOOL"}, + {I40E_GLPE_RUPM_FLRPOOL, 0, 0, 0, 0, "GLPE_RUPM_FLRPOOL"}, + {I40E_GLPE_RUPM_PTXPOOL, 0, 0, 0, 0, "GLPE_RUPM_PTXPOOL"}, + {I40E_GLPE_RUPM_CQPPOOL, 0, 0, 0, 0, "GLPE_RUPM_CQPPOOL"}, + {I40E_PRTE_RUPM_TCCNTR03, 0, 0, 0, 0, "PRTE_RUPM_TCCNTR03"}, + {I40E_PRTPE_RUPM_TCCNTR47, 0, 0, 0, 0, "PRTPE_RUPM_TCCNTR47"}, + {I40E_PRTPE_RUPM_CNTR, 0, 0, 0, 0, "PRTPE_RUPM_CNTR"}, + {I40E_PRTPE_RUPM_PTXTCCNTR03, 0, 0, 0, 0, "PRTPE_RUPM_PTXTCCNTR03"}, + {I40E_PRTPE_RUPM_PTCTCCNTR47, 0, 0, 0, 0, "PRTPE_RUPM_PTCTCCNTR47"}, + {I40E_VFCM_PE_ERRDATA, 0, 0, 0, 0, "VFCM_PE_ERRDATA"}, + {I40E_PFPCI_VF_FLUSH_DONE, 0, 0, 0, 0, "PFPCI_VF_FLUSH_DONE"}, + {I40E_GLPES_PFRXVLANERR(0), 15, 4, 0, 0, "GLPES_PFRXVLANERR"}, + {I40E_GLPES_PFIP4RXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXOCTSLO"}, + {I40E_GLPES_PFIP4RXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXOCTSHI"}, + {I40E_GLPES_PFIP4RXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXPKTSLO"}, + {I40E_GLPES_PFIP4RXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXPKTSHI"}, + {I40E_GLPES_PFIP4RXDISCARD(0), 15, 4, 0, 0, "GLPES_PFIP4RXDISCARD"}, + {I40E_GLPES_PFIP4RXTRUNC(0), 15, 4, 0, 0, "GLPES_PFIP4RXTRUNC"}, + {I40E_GLPES_PFIP4RXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXFRAGSLO"}, + {I40E_GLPES_PFIP4RXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXFRAGSHI"}, + {I40E_GLPES_PFIP4RXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCOCTSLO"}, + {I40E_GLPES_PFIP4RXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCOCTSHI"}, + {I40E_GLPES_PFIP4RXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCPKTSLO"}, + {I40E_GLPES_PFIP4RXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4RXMCPKTSHI"}, + {I40E_GLPES_PFIP6RXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXOCTSLO"}, + {I40E_GLPES_PFIP6RXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXOCTSHI"}, + {I40E_GLPES_PFIP6RXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXPKTSLO"}, + {I40E_GLPES_PFIP6RXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXPKTSHI"}, + {I40E_GLPES_PFIP6RXDISCARD(0), 15, 4, 0, 0, "GLPES_PFIP6RXDISCARD"}, + {I40E_GLPES_PFIP6RXTRUNC(0), 15, 4, 0, 0, "GLPES_PFIP6RXTRUNC"}, + {I40E_GLPES_PFIP6RXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXFRAGSLO"}, + {I40E_GLPES_PFIP6RXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXFRAGSHI"}, + {I40E_GLPES_PFIP6RXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCOCTSLO"}, + {I40E_GLPES_PFIP6RXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCOCTSHI"}, + {I40E_GLPES_PFIP6RXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCPKTSLO"}, + {I40E_GLPES_PFIP6RXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6RXMCPKTSHI"}, + {I40E_GLPES_PFIP4TXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXOCTSLO"}, + {I40E_GLPES_PFIP4TXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXOCTSHI"}, + {I40E_GLPES_PFIP4TXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXPKTSLO"}, + {I40E_GLPES_PFIP4TXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXPKTSHI"}, + {I40E_GLPES_PFIP4TXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXFRAGSLO"}, + {I40E_GLPES_PFIP4TXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXFRAGSHI"}, + {I40E_GLPES_PFIP4TXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCOCTSLO"}, + {I40E_GLPES_PFIP4TXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCOCTSHI"}, + {I40E_GLPES_PFIP4TXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCPKTSLO"}, + {I40E_GLPES_PFIP4TXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP4TXMCPKTSHI"}, + {I40E_GLPES_PFIP6TXOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXOCTSLO"}, + {I40E_GLPES_PFIP6TXOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXOCTSHI"}, + {I40E_GLPES_PFIP6TXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXPKTSLO"}, + {I40E_GLPES_PFIP6TXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXPKTSHI"}, + {I40E_GLPES_PFIP6TXFRAGSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXFRAGSLO"}, + {I40E_GLPES_PFIP6TXFRAGSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXFRAGSHI"}, + {I40E_GLPES_PFIP6TXMCOCTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCOCTSLO"}, + {I40E_GLPES_PFIP6TXMCOCTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCOCTSHI"}, + {I40E_GLPES_PFIP6TXMCPKTSLO(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCPKTSLO"}, + {I40E_GLPES_PFIP6TXMCPKTSHI(0), 15, 8, 0, 0, "GLPES_PFIP6TXMCPKTSHI"}, + {I40E_GLPES_PFIP4TXNOROUTE(0), 15, 4, 0, 0, "GLPES_PFIP4TXNOROUTE"}, + {I40E_GLPES_PFIP6TXNOROUTE(0), 15, 4, 0, 0, "GLPES_PFIP6TXNOROUTE"}, + {I40E_GLPES_PFTCPRXSEGSLO(0), 15, 8, 0, 0, "GLPES_PFTCPRXSEGSLO"}, + {I40E_GLPES_PFTCPRXSEGSHI(0), 15, 8, 0, 0, "GLPES_PFTCPRXSEGSHI"}, + {I40E_GLPES_PFTCPRXOPTERR(0), 15, 4, 0, 0, "GLPES_PFTCPRXOPTERR"}, + {I40E_GLPES_PFTCPRXPROTOERR(0), 15, 4, 0, 0, "GLPES_PFTCPRXPROTOERR"}, + {I40E_GLPES_PFTCPTXSEGLO(0), 15, 8, 0, 0, "GLPES_PFTCPTXSEGLO"}, + {I40E_GLPES_PFTCPTXSEGHI(0), 15, 8, 0, 0, "GLPES_PFTCPTXSEGHI"}, + {I40E_GLPES_PFTCPRTXSEG(0), 15, 4, 0, 0, "GLPES_PFTCPRTXSEG"}, + {I40E_GLPES_PFUDPRXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFUDPRXPKTSLO"}, + {I40E_GLPES_PFUDPRXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFUDPRXPKTSHI"}, + {I40E_GLPES_PFUDPTXPKTSLO(0), 15, 8, 0, 0, "GLPES_PFUDPTXPKTSLO"}, + {I40E_GLPES_PFUDPTXPKTSHI(0), 15, 8, 0, 0, "GLPES_PFUDPTXPKTSHI"}, + {I40E_GLPES_PFRDMARXWRSLO(0), 15, 8, 0, 0, "GLPES_PFRDMARXWRSLO"}, + {I40E_GLPES_PFRDMARXWRSHI(0), 15, 8, 0, 0, "GLPES_PFRDMARXWRSHI"}, + {I40E_GLPES_PFRDMARXRDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMARXRDSLO"}, + {I40E_GLPES_PFRDMARXRDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMARXRDSHI"}, + {I40E_GLPES_PFRDMARXSNDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMARXSNDSLO"}, + {I40E_GLPES_PFRDMARXSNDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMARXSNDSHI"}, + {I40E_GLPES_PFRDMATXWRSLO(0), 15, 8, 0, 0, "GLPES_PFRDMATXWRSLO"}, + {I40E_GLPES_PFRDMATXWRSHI(0), 15, 8, 0, 0, "GLPES_PFRDMATXWRSHI"}, + {I40E_GLPES_PFRDMATXRDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMATXRDSLO"}, + {I40E_GLPES_PFRDMATXRDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMATXRDSHI"}, + {I40E_GLPES_PFRDMATXSNDSLO(0), 15, 8, 0, 0, "GLPES_PFRDMATXSNDSLO"}, + {I40E_GLPES_PFRDMATXSNDSHI(0), 15, 8, 0, 0, "GLPES_PFRDMATXSNDSHI"}, + {I40E_GLPES_PFRDMAVBNDLO(0), 15, 8, 0, 0, "GLPES_PFRDMAVBNDLO"}, + {I40E_GLPES_PFRDMAVBNDHI(0), 15, 8, 0, 0, "GLPES_PFRDMAVBNDHI"}, + {I40E_GLPES_PFRDMAVINVLO(0), 15, 8, 0, 0, "GLPES_PFRDMAVINVLO"}, + {I40E_GLPES_PFRDMAVINVHI(0), 15, 8, 0, 0, "GLPES_PFRDMAVINVHI"}, + {I40E_GLPES_VFRXVLANERR(0), 31, 4, 0, 0, "GLPES_VFRXVLANERR"}, + {I40E_GLPES_VFIP4RXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXOCTSLO"}, + {I40E_GLPES_VFIP4RXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXOCTSHI"}, + {I40E_GLPES_VFIP4RXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXPKTSLO"}, + {I40E_GLPES_VFIP4RXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXPKTSHI"}, + {I40E_GLPES_VFIP4RXDISCARD(0), 31, 4, 0, 0, "GLPES_VFIP4RXDISCARD"}, + {I40E_GLPES_VFIP4RXTRUNC(0), 31, 4, 0, 0, "GLPES_VFIP4RXTRUNC"}, + {I40E_GLPES_VFIP4RXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXFRAGSLO"}, + {I40E_GLPES_VFIP4RXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXFRAGSHI"}, + {I40E_GLPES_VFIP4RXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCOCTSLO"}, + {I40E_GLPES_VFIP4RXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCOCTSHI"}, + {I40E_GLPES_VFIP4RXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCPKTSLO"}, + {I40E_GLPES_VFIP4RXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4RXMCPKTSHI"}, + {I40E_GLPES_VFIP6RXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXOCTSLO"}, + {I40E_GLPES_VFIP6RXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXOCTSHI"}, + {I40E_GLPES_VFIP6RXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXPKTSLO"}, + {I40E_GLPES_VFIP6RXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXPKTSHI"}, + {I40E_GLPES_VFIP6RXDISCARD(0), 31, 4, 0, 0, "GLPES_VFIP6RXDISCARD"}, + {I40E_GLPES_VFIP6RXTRUNC(0), 31, 4, 0, 0, "GLPES_VFIP6RXTRUNC"}, + {I40E_GLPES_VFIP6RXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXFRAGSLO"}, + {I40E_GLPES_VFIP6RXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXFRAGSHI"}, + {I40E_GLPES_VFIP6RXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCOCTSLO"}, + {I40E_GLPES_VFIP6RXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCOCTSHI"}, + {I40E_GLPES_VFIP6RXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCPKTSLO"}, + {I40E_GLPES_VFIP6RXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6RXMCPKTSHI"}, + {I40E_GLPES_VFIP4TXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXOCTSLO"}, + {I40E_GLPES_VFIP4TXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXOCTSHI"}, + {I40E_GLPES_VFIP4TXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXPKTSLO"}, + {I40E_GLPES_VFIP4TXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXPKTSHI"}, + {I40E_GLPES_VFIP4TXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXFRAGSLO"}, + {I40E_GLPES_VFIP4TXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXFRAGSHI"}, + {I40E_GLPES_VFIP4TXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCOCTSLO"}, + {I40E_GLPES_VFIP4TXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCOCTSHI"}, + {I40E_GLPES_VFIP4TXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCPKTSLO"}, + {I40E_GLPES_VFIP4TXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP4TXMCPKTSHI"}, + {I40E_GLPES_VFIP6TXOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXOCTSLO"}, + {I40E_GLPES_VFIP6TXOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXOCTSHI"}, + {I40E_GLPES_VFIP6TXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXPKTSLO"}, + {I40E_GLPES_VFIP6TXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXPKTSHI"}, + {I40E_GLPES_VFIP6TXFRAGSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXFRAGSLO"}, + {I40E_GLPES_VFIP6TXFRAGSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXFRAGSHI"}, + {I40E_GLPES_VFIP6TXMCOCTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCOCTSLO"}, + {I40E_GLPES_VFIP6TXMCOCTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCOCTSHI"}, + {I40E_GLPES_VFIP6TXMCPKTSLO(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCPKTSLO"}, + {I40E_GLPES_VFIP6TXMCPKTSHI(0), 31, 8, 0, 0, "GLPES_VFIP6TXMCPKTSHI"}, + {I40E_GLPES_VFIP4TXNOROUTE(0), 31, 4, 0, 0, "GLPES_VFIP4TXNOROUTE"}, + {I40E_GLPES_VFIP6TXNOROUTE(0), 31, 4, 0, 0, "GLPES_VFIP6TXNOROUTE"}, + {I40E_GLPES_VFTCPRXSEGSLO(0), 31, 8, 0, 0, "GLPES_VFTCPRXSEGSLO"}, + {I40E_GLPES_VFTCPRXSEGSHI(0), 31, 8, 0, 0, "GLPES_VFTCPRXSEGSHI"}, + {I40E_GLPES_VFTCPRXOPTERR(0), 31, 4, 0, 0, "GLPES_VFTCPRXOPTERR"}, + {I40E_GLPES_VFTCPRXPROTOERR(0), 31, 4, 0, 0, "GLPES_VFTCPRXPROTOERR"}, + {I40E_GLPES_VFTCPTXSEGLO(0), 31, 8, 0, 0, "GLPES_VFTCPTXSEGLO"}, + {I40E_GLPES_VFTCPTXSEGHI(0), 31, 8, 0, 0, "GLPES_VFTCPTXSEGHI"}, + {I40E_GLPES_VFTCPRTXSEG(0), 31, 4, 0, 0, "GLPES_VFTCPRTXSEG"}, + {I40E_GLPES_VFUDPRXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFUDPRXPKTSLO"}, + {I40E_GLPES_VFUDPRXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFUDPRXPKTSHI"}, + {I40E_GLPES_VFUDPTXPKTSLO(0), 31, 8, 0, 0, "GLPES_VFUDPTXPKTSLO"}, + {I40E_GLPES_VFUDPTXPKTSHI(0), 31, 8, 0, 0, "GLPES_VFUDPTXPKTSHI"}, + {I40E_GLPES_VFRDMARXWRSLO(0), 31, 8, 0, 0, "GLPES_VFRDMARXWRSLO"}, + {I40E_GLPES_VFRDMARXWRSHI(0), 31, 8, 0, 0, "GLPES_VFRDMARXWRSHI"}, + {I40E_GLPES_VFRDMARXRDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMARXRDSLO"}, + {I40E_GLPES_VFRDMARXRDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMARXRDSHI"}, + {I40E_GLPES_VFRDMARXSNDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMARXSNDSLO"}, + {I40E_GLPES_VFRDMARXSNDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMARXSNDSHI"}, + {I40E_GLPES_VFRDMATXWRSLO(0), 31, 8, 0, 0, "GLPES_VFRDMATXWRSLO"}, + {I40E_GLPES_VFRDMATXWRSHI(0), 31, 8, 0, 0, "GLPES_VFRDMATXWRSHI"}, + {I40E_GLPES_VFRDMATXRDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMATXRDSLO"}, + {I40E_GLPES_VFRDMATXRDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMATXRDSHI"}, + {I40E_GLPES_VFRDMATXSNDSLO(0), 31, 8, 0, 0, "GLPES_VFRDMATXSNDSLO"}, + {I40E_GLPES_VFRDMATXSNDSHI(0), 31, 8, 0, 0, "GLPES_VFRDMATXSNDSHI"}, + {I40E_GLPES_VFRDMAVBNDLO(0), 31, 8, 0, 0, "GLPES_VFRDMAVBNDLO"}, + {I40E_GLPES_VFRDMAVBNDHI(0), 31, 8, 0, 0, "GLPES_VFRDMAVBNDHI"}, + {I40E_GLPES_VFRDMAVINVLO(0), 31, 8, 0, 0, "GLPES_VFRDMAVINVLO"}, + {I40E_GLPES_VFRDMAVINVHI(0), 31, 8, 0, 0, "GLPES_VFRDMAVINVHI"}, + {I40E_GLPES_RDMARXUNALIGN, 0, 0, 0, 0, "GLPES_RDMARXUNALIGN"}, + {I40E_GLPES_RDMARXOOONOMARK, 0, 0, 0, 0, "GLPES_RDMARXOOONOMARK"}, + {I40E_GLPES_RDMARXMULTFPDUSLO, 0, 0, 0, 0, "GLPES_RDMARXMULTFPDUSLO"}, + {I40E_GLPES_RDMARXMULTFPDUSHI, 0, 0, 0, 0, "GLPES_RDMARXMULTFPDUSHI"}, + {I40E_GLPES_RDMARXOOODDPLO, 0, 0, 0, 0, "GLPES_RDMARXOOODDPLO"}, + {I40E_GLPES_RDMARXOOODDPHI, 0, 0, 0, 0, "GLPES_RDMARXOOODDPHI"}, + {I40E_GLPES_TCPRXPUREACKSLO, 0, 0, 0, 0, "GLPES_TCPRXPUREACKSLO"}, + {I40E_GLPES_TCPRXPUREACKHI, 0, 0, 0, 0, "GLPES_TCPRXPUREACKHI"}, + {I40E_GLPES_TCPRXONEHOLELO, 0, 0, 0, 0, "GLPES_TCPRXONEHOLELO"}, + {I40E_GLPES_TCPRXONEHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXONEHOLEHI"}, + {I40E_GLPES_TCPRXTWOHOLELO, 0, 0, 0, 0, "GLPES_TCPRXTWOHOLELO"}, + {I40E_GLPES_TCPRXTWOHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXTWOHOLEHI"}, + {I40E_GLPES_TCPRXTHREEHOLELO, 0, 0, 0, 0, "GLPES_TCPRXTHREEHOLELO"}, + {I40E_GLPES_TCPRXTHREEHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXTHREEHOLEHI"}, + {I40E_GLPES_TCPRXFOURHOLELO, 0, 0, 0, 0, "GLPES_TCPRXFOURHOLELO"}, + {I40E_GLPES_TCPRXFOURHOLEHI, 0, 0, 0, 0, "GLPES_TCPRXFOURHOLEHI"}, + {I40E_GLPES_TCPTXRETRANSFASTLO, 0, 0, 0, 0, "GLPES_TCPTXRETRANSFASTLO"}, + {I40E_GLPES_TCPTXRETRANSFASTHI, 0, 0, 0, 0, "GLPES_TCPTXRETRANSFASTHI"}, + {I40E_GLPES_TCPTXTOUTSFASTLO, 0, 0, 0, 0, "GLPES_TCPTXTOUTSFASTLO"}, + {I40E_GLPES_TCPTXTOUTSFASTHI, 0, 0, 0, 0, "GLPES_TCPTXTOUTSFASTHI"}, + {I40E_GLPES_TCPTXTOUTSLO, 0, 0, 0, 0, "GLPES_TCPTXTOUTSLO"}, + {I40E_GLPES_TCPTXTOUTSHI, 0, 0, 0, 0, "GLPES_TCPTXTOUTSHI"}, + {I40E_PRTDCB_TCMSTC_RLPM(0), 7, 32, 0, 0, "PRTDCB_TCMSTC_RLPM"}, + {I40E_PRTDCB_RLPMC, 0, 0, 0, 0, "PRTDCB_RLPMC"}, + {I40E_PRTDCB_TCPMC_RLPM, 0, 0, 0, 0, "PRTDCB_TCPMC_RLPM"}, + {I40E_VFINT_ITRN(0, 0), 2, 2048, 511, 4, "VFINT_ITRN"}, + {I40E_VFINT_DYN_CTLN(0), 511, 4, 0, 0, "VFINT_DYN_CTLN"}, + {I40E_VPINT_LNKLSTN(0), 511, 4, 0, 0, "VPINT_LNKLSTN"}, + {I40E_VPINT_RATEN(0), 511, 4, 0, 0, "VPINT_RATEN"}, + {I40E_VPINT_CEQCTL(0), 511, 4, 0, 0, "VPINT_CEQCTL"}, + {I40E_VFINT_ITR0(0, 0), 2, 1024, 127, 4, "VFINT_ITR0"}, + {I40E_VFINT_STAT_CTL0(0), 127, 4, 0, 0, "VFINT_STAT_CTL0"}, + {I40E_VFINT_DYN_CTL0(0), 127, 4, 0, 0, "VFINT_DYN_CTL0"}, + {I40E_VPINT_LNKLST0(0), 127, 4, 0, 0, "VPINT_LNKLST0"}, + {I40E_VPINT_RATE0(0), 127, 4, 0, 0, "VPINT_RATE0"}, + {I40E_VPINT_AEQCTL(0), 127, 4, 0, 0, "VPINT_AEQCTL"}, + {I40E_VFINT_ICR0(0), 127, 4, 0, 0, "VFINT_ICR0"}, + {I40E_VFINT_ICR0_ENA(0), 127, 4, 0, 0, "VFINT_ICR0_ENA"}, + {I40E_PFINT_ITRN(0, 0), 2, 2048, 511, 4, "PFINT_ITRN"}, + {I40E_PFINT_DYN_CTLN(0), 511, 4, 0, 0, "PFINT_DYN_CTLN"}, + {I40E_PFINT_LNKLSTN(0), 511, 4, 0, 0, "PFINT_LNKLSTN"}, + {I40E_PFINT_RATEN(0), 511, 4, 0, 0, "PFINT_RATEN"}, + {I40E_PFINT_CEQCTL(0), 511, 4, 0, 0, "PFINT_CEQCTL"}, + {I40E_PFINT_ITR0(0), 2, 128, 0, 0, "PFINT_ITR0"}, + {I40E_PFINT_STAT_CTL0, 0, 0, 0, 0, "PFINT_STAT_CTL0"}, + {I40E_PFINT_DYN_CTL0, 0, 0, 0, 0, "PFINT_DYN_CTL0"}, + {I40E_PFINT_LNKLST0, 0, 0, 0, 0, "PFINT_LNKLST0"}, + {I40E_PFINT_RATE0, 0, 0, 0, 0, "PFINT_RATE0"}, + {I40E_PFINT_AEQCTL, 0, 0, 0, 0, "PFINT_AEQCTL"}, + {I40E_PFINT_ICR0, 0, 0, 0, 0, "PFINT_ICR0"}, + {I40E_PFINT_ICR0_ENA, 0, 0, 0, 0, "PFINT_ICR0_ENA"}, + {I40E_QINT_RQCTL(0), 1535, 4, 0, 0, "QINT_RQCTL"}, + {I40E_QINT_TQCTL(0), 1535, 4, 0, 0, "QINT_TQCTL"}, + {I40E_PFGEN_PORTMDIO_NUM, 0, 0, 0, 0, "PFGEN_PORTMDIO_NUM"}, + {I40E_GLINT_CTL, 0, 0, 0, 0, "GLINT_CTL"}, + {I40E_GLLAN_TSOMSK_F, 0, 0, 0, 0, "GLLAN_TSOMSK_F"}, + {I40E_GLLAN_TSOMSK_M, 0, 0, 0, 0, "GLLAN_TSOMSK_M"}, + {I40E_GLLAN_TSOMSK_L, 0, 0, 0, 0, "GLLAN_TSOMSK_L"}, + {I40E_GL_RDPU_CNTRL, 0, 0, 0, 0, "GL_RDPU_CNTRL"}, + {I40E_PFPM_FHFT_LENGTH(0), 7, 128, 0, 0, "PFPM_FHFT_LENGTH"}, + {I40E_PFPM_WUC, 0, 0, 0, 0, "PFPM_WUC"}, + {I40E_PFPM_WUFC, 0, 0, 0, 0, "PFPM_WUFC"}, + {I40E_PFPM_WUS, 0, 0, 0, 0, "PFPM_WUS"}, + {I40E_PRTPM_FHFHR, 0, 0, 0, 0, "PRTPM_FHFHR"}, + {I40E_GLPM_WUMC, 0, 0, 0, 0, "GLPM_WUMC"}, + {I40E_VPLAN_QTABLE(0, 0), 15, 1024, 127, 4, "VPLAN_QTABLE"}, + {I40E_VPLAN_MAPENA(0), 127, 4, 0, 0, "VPLAN_MAPENA"}, + {I40E_VFGEN_RSTAT1(0), 127, 4, 0, 0, "VFGEN_RSTAT1"}, + {I40E_VPLAN_QBASE(0), 127, 4, 0, 0, "VPLAN_QBASE"}, + {I40E_PF_ATQBAL, 0, 0, 0, 0, "PF_ATQBAL"}, + {I40E_GL_ATQBAL, 0, 0, 0, 0, "GL_ATQBAL"}, + {I40E_PF_ARQBAL, 0, 0, 0, 0, "PF_ARQBAL"}, + {I40E_GL_ARQBAL, 0, 0, 0, 0, "GL_ARQBAL"}, + {I40E_PF_ATQBAH, 0, 0, 0, 0, "PF_ATQBAH"}, + {I40E_GL_ATQBAH, 0, 0, 0, 0, "GL_ATQBAH"}, + {I40E_PF_ARQBAH, 0, 0, 0, 0, "PF_ARQBAH"}, + {I40E_GL_ARQBAH, 0, 0, 0, 0, "GL_ARQBAH"}, + {I40E_PF_ATQLEN, 0, 0, 0, 0, "PF_ATQLEN"}, + {I40E_GL_ATQLEN, 0, 0, 0, 0, "GL_ATQLEN"}, + {I40E_PF_ARQLEN, 0, 0, 0, 0, "PF_ARQLEN"}, + {I40E_PF_ATQH, 0, 0, 0, 0, "PF_ATQH"}, + {I40E_GL_ATQH, 0, 0, 0, 0, "GL_ATQH"}, + {I40E_PF_ARQH, 0, 0, 0, 0, "PF_ARQH"}, + {I40E_GL_ARQH, 0, 0, 0, 0, "GL_ARQH"}, + {I40E_PF_ATQT, 0, 0, 0, 0, "PF_ATQT"}, + {I40E_GL_ATQT, 0, 0, 0, 0, "GL_ATQT"}, + {I40E_PF_ARQT, 0, 0, 0, 0, "PF_ARQT"}, + {I40E_GL_ARQT, 0, 0, 0, 0, "GL_ARQT"}, + {I40E_VF_ATQBAL(0), 127, 4, 0, 0, "VF_ATQBAL"}, + {I40E_VF_ARQBAL(0), 127, 4, 0, 0, "VF_ARQBAL"}, + {I40E_VF_ATQBAH(0), 127, 4, 0, 0, "VF_ATQBAH"}, + {I40E_VF_ARQBAH(0), 127, 4, 0, 0, "VF_ARQBAH"}, + {I40E_VF_ATQLEN(0), 127, 4, 0, 0, "VF_ATQLEN"}, + {I40E_VF_ARQLEN(0), 127, 4, 0, 0, "VF_ARQLEN"}, + {I40E_VF_ATQH(0), 127, 4, 0, 0, "VF_ATQH"}, + {I40E_VF_ARQH(0), 127, 4, 0, 0, "VF_ARQH"}, + {I40E_VF_ATQT(0), 127, 4, 0, 0, "VF_ATQT"}, + {I40E_VF_ARQT(0), 127, 4, 0, 0, "VF_ARQT"}, + {I40E_PRTDCB_GENC, 0, 0, 0, 0, "PRTDCB_GENC"}, + {I40E_PRTDCB_GENS, 0, 0, 0, 0, "PRTDCB_GENS"}, + {I40E_GLDCB_GENC, 0, 0, 0, 0, "GLDCB_GENC"}, + {I40E_GL_FWSTS, 0, 0, 0, 0, "GL_FWSTS"}, + {I40E_GL_FWRESETCNT, 0, 0, 0, 0, "GL_FWRESETCNT"}, + {I40E_GL_VF_CTRL_TX(0), 127, 4, 0, 0, "GL_VF_CTRL_TX"}, + {I40E_GL_VF_CTRL_RX(0), 127, 4, 0, 0, "GL_VF_CTRL_RX"}, + {I40E_PRTTSYN_CTL1, 0, 0, 0, 0, "PRTTSYN_CTL1"}, + {I40E_PRTTSYN_RXTIME_H(0), 3, 32, 0, 0, "PRTTSYN_RXTIME_H"}, + {I40E_PRTTSYN_RXTIME_L(0), 3, 32, 0, 0, "PRTTSYN_RXTIME_L"}, + {I40E_PRTTSYN_STAT_1, 0, 0, 0, 0, "PRTTSYN_STAT_1"}, + {I40E_PRT_MNG_FTFT_MASK(0), 7, 32, 0, 0, "PRT_MNG_FTFT_MASK"}, + {I40E_PRT_MNG_FTFT_LENGTH, 0, 0, 0, 0, "PRT_MNG_FTFT_LENGTH"}, + {I40E_PRT_MNG_FTFT_DATA(0), 31, 32, 0, 0, "PRT_MNG_FTFT_DATA"}, + {I40E_GL_PPRS_SPARE, 0, 0, 0, 0, "GL_PPRS_SPARE"}, + {I40E_PFGEN_STATE, 0, 0, 0, 0, "PFGEN_STATE"}, + {I40E_PFINT_GPIO_ENA, 0, 0, 0, 0, "PFINT_GPIO_ENA"}, + {I40E_GLGEN_MISC_SPARE, 0, 0, 0, 0, "GLGEN_MISC_SPARE"}, + {I40E_GLGEN_GPIO_CTL(0), 29, 4, 0, 0, "GLGEN_GPIO_CTL"}, + {I40E_GLGEN_LED_CTL, 0, 0, 0, 0, "GLGEN_LED_CTL"}, + {I40E_GLGEN_GPIO_STAT, 0, 0, 0, 0, "GLGEN_GPIO_STAT"}, + {I40E_GLGEN_GPIO_TRANSIT, 0, 0, 0, 0, "GLGEN_GPIO_TRANSIT"}, + {I40E_GLGEN_GPIO_SET, 0, 0, 0, 0, "GLGEN_GPIO_SET"}, + {I40E_EMPINT_GPIO_ENA, 0, 0, 0, 0, "EMPINT_GPIO_ENA"}, + {I40E_GLGEN_MSCA(0), 3, 4, 0, 0, "GLGEN_MSCA"}, + {I40E_GLGEN_MSRWD(0), 3, 4, 0, 0, "GLGEN_MSRWD"}, + {I40E_GLGEN_I2CPARAMS(0), 3, 4, 0, 0, "GLGEN_I2CPARAMS"}, + {I40E_GLVFGEN_TIMER, 0, 0, 0, 0, "GLVFGEN_TIMER"}, + {I40E_GLGEN_MDIO_I2C_SEL(0), 3, 4, 0, 0, "GLGEN_MDIO_I2C_SEL"}, + {I40E_GLGEN_MDIO_CTRL(0), 3, 4, 0, 0, "GLGEN_MDIO_CTRL"}, + {I40E_GLGEN_I2CCMD(0), 3, 4, 0, 0, "GLGEN_I2CCMD"}, + {I40E_PRTMAC_PCS_XAUI_SWAP_A, 0, 0, 0, 0, "PRTMAC_PCS_XAUI_SWAP_A"}, + {I40E_PRTMAC_PCS_XAUI_SWAP_B, 0, 0, 0, 0, "PRTMAC_PCS_XAUI_SWAP_B"}, + {I40E_VSIGEN_RTRIG(0), 383, 4, 0, 0, "VSIGEN_RTRIG"}, + {I40E_VSIGEN_RSTAT(0), 383, 4, 0, 0, "VSIGEN_RSTAT"}, + {I40E_VPGEN_VFRTRIG(0), 127, 4, 0, 0, "VPGEN_VFRTRIG"}, + {I40E_VPGEN_VFRSTAT(0), 127, 4, 0, 0, "VPGEN_VFRSTAT"}, + {I40E_PFGEN_CTRL, 0, 0, 0, 0, "PFGEN_CTRL"}, + {I40E_PFGEN_DRUN, 0, 0, 0, 0, "PFGEN_DRUN"}, + {I40E_GLGEN_VFLRSTAT(0), 3, 4, 0, 0, "GLGEN_VFLRSTAT"}, + {I40E_GL_UFUSE, 0, 0, 0, 0, "GL_UFUSE"}, + {I40E_GL_GP_FUSE(0), 28, 4, 0, 0, "GL_GP_FUSE"}, + {I40E_PRTDCB_TETSC_TPB, 0, 0, 0, 0, "PRTDCB_TETSC_TPB"}, + {I40E_PF_FUNC_RID, 0, 0, 0, 0, "PF_FUNC_RID"}, + {I40E_PF_PCI_CIAA, 0, 0, 0, 0, "PF_PCI_CIAA"}, + {I40E_PF_PCI_CIAD, 0, 0, 0, 0, "PF_PCI_CIAD"}, + {I40E_PFPCI_FACTPS, 0, 0, 0, 0, "PFPCI_FACTPS"}, + {I40E_PFPCI_ICAUSE, 0, 0, 0, 0, "PFPCI_ICAUSE"}, + {I40E_PFPCI_IENA, 0, 0, 0, 0, "PFPCI_IENA"}, + {I40E_PFPCI_VMINDEX, 0, 0, 0, 0, "PFPCI_VMINDEX"}, + {I40E_PFPCI_VMPEND, 0, 0, 0, 0, "PFPCI_VMPEND"}, + {I40E_GLPCI_DREVID, 0, 0, 0, 0, "GLPCI_DREVID"}, + {I40E_GLPCI_BYTCTH, 0, 0, 0, 0, "GLPCI_BYTCTH"}, + {I40E_GLPCI_BYTCTL, 0, 0, 0, 0, "GLPCI_BYTCTL"}, + {I40E_GLPCI_GSCL_1, 0, 0, 0, 0, "GLPCI_GSCL_1"}, + {I40E_GLPCI_GSCL_2, 0, 0, 0, 0, "GLPCI_GSCL_2"}, + {I40E_GLPCI_GSCL_5_8(0), 3, 4, 0, 0, "GLPCI_GSCL_5_8"}, + {I40E_GLPCI_GSCN_0_3(0), 3, 4, 0, 0, "GLPCI_GSCN_0_3"}, + {I40E_GLPCI_PKTCT, 0, 0, 0, 0, "GLPCI_PKTCT"}, + {I40E_GLPCI_PQ_MAX_USED_SPC, 0, 0, 0, 0, "GLPCI_PQ_MAX_USED_SPC"}, + {I40E_GLPCI_PM_MUX_PFB, 0, 0, 0, 0, "GLPCI_PM_MUX_PFB"}, + {I40E_GLPCI_PM_MUX_NPQ, 0, 0, 0, 0, "GLPCI_PM_MUX_NPQ"}, + {I40E_GLPCI_SPARE_BITS_0, 0, 0, 0, 0, "GLPCI_SPARE_BITS_0"}, + {I40E_GLPCI_SPARE_BITS_1, 0, 0, 0, 0, "GLPCI_SPARE_BITS_1"}, + {I40E_GLPCI_CUR_RLAN_ALWD, 0, 0, 0, 0, "GLPCI_CUR_RLAN_ALWD"}, + {I40E_GLPCI_CUR_TLAN_ALWD, 0, 0, 0, 0, "GLPCI_CUR_TLAN_ALWD"}, + {I40E_GLPCI_CUR_RXPE_ALWD, 0, 0, 0, 0, "GLPCI_CUR_RXPE_ALWD"}, + {I40E_GLPCI_CUR_TXPE_ALWD, 0, 0, 0, 0, "GLPCI_CUR_TXPE_ALWD"}, + {I40E_GLPCI_CUR_PMAT_ALWD, 0, 0, 0, 0, "GLPCI_CUR_PMAT_ALWD"}, + {I40E_GLPCI_CUR_MNG_ALWD, 0, 0, 0, 0, "GLPCI_CUR_MNG_ALWD"}, + {I40E_GLPCI_CUR_TDPU_ALWD, 0, 0, 0, 0, "GLPCI_CUR_TDPU_ALWD"}, + {I40E_GLPCI_CUR_RLAN_RSVD, 0, 0, 0, 0, "GLPCI_CUR_RLAN_RSVD"}, + {I40E_GLPCI_CUR_TLAN_RSVD, 0, 0, 0, 0, "GLPCI_CUR_TLAN_RSVD"}, + {I40E_GLPCI_CUR_RXPE_RSVD, 0, 0, 0, 0, "GLPCI_CUR_RXPE_RSVD"}, + {I40E_GLPCI_CUR_TXPE_RSVD, 0, 0, 0, 0, "GLPCI_CUR_TXPE_RSVD"}, + {I40E_GLPCI_CUR_PMAT_RSVD, 0, 0, 0, 0, "GLPCI_CUR_PMAT_RSVD"}, + {I40E_GLPCI_CUR_MNG_RSVD, 0, 0, 0, 0, "GLPCI_CUR_MNG_RSVD"}, + {I40E_GLPCI_CUR_TDPU_RSVD, 0, 0, 0, 0, "GLPCI_CUR_TDPU_RSVD"}, + {I40E_PFPCI_VF_FLUSH_DONE1(0), 127, 4, 0, 0, "PFPCI_VF_FLUSH_DONE1"}, + {I40E_PFPCI_PF_FLUSH_DONE, 0, 0, 0, 0, "PFPCI_PF_FLUSH_DONE"}, + {I40E_PFPCI_VM_FLUSH_DONE, 0, 0, 0, 0, "PFPCI_VM_FLUSH_DONE"}, + {I40E_GLPCI_NPQ_CFG, 0, 0, 0, 0, "GLPCI_NPQ_CFG"}, + {I40E_GLPCI_CUR_CLNT_COMMON, 0, 0, 0, 0, "GLPCI_CUR_CLNT_COMMON"}, + {I40E_GLPCI_CUR_CLNT_PIPEMON, 0, 0, 0, 0, "GLPCI_CUR_CLNT_PIPEMON"}, + {I40E_GLPCI_CUR_WATMK_CLNT_COMMON, 0, 0, 0, 0, + "GLPCI_CUR_WATMK_CLNT_COMMON"}, + {I40E_GLPCI_WATMK_CLNT_PIPEMON, 0, 0, 0, 0, + "GLPCI_WATMK_CLNT_PIPEMON"}, + {I40E_GLPCI_WATMK_RLAN_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_RLAN_ALWD"}, + {I40E_GLPCI_WATMK_TLAN_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_TLAN_ALWD"}, + {I40E_GLPCI_WATMK_RXPE_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_RXPE_ALWD"}, + {I40E_GLPCI_WATMK_TXPE_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_TXPE_ALWD"}, + {I40E_GLPCI_WATMK_PMAT_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_PMAT_ALWD"}, + {I40E_GLPCI_WATMK_MNG_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_MNG_ALWD"}, + {I40E_GLPCI_WATMK_TPDU_ALWD, 0, 0, 0, 0, "GLPCI_WATMK_TPDU_ALWD"}, + {I40E_PRTDCB_TCMSTC(0), 7, 32, 0, 0, "PRTDCB_TCMSTC"}, + {I40E_PRTDCB_TFMSTC(0), 7, 32, 0, 0, "PRTDCB_TFMSTC"}, + {I40E_PRTDCB_TDPMC, 0, 0, 0, 0, "PRTDCB_TDPMC"}, + {I40E_PRTDCB_TCWSTC(0), 7, 32, 0, 0, "PRTDCB_TCWSTC"}, + {I40E_PRTDCB_TCPMC, 0, 0, 0, 0, "PRTDCB_TCPMC"}, + {I40E_GL_TUPM_SPARE, 0, 0, 0, 0, "GL_TUPM_SPARE"}, + {I40E_GLPEOC_CACHESIZE, 0, 0, 0, 0, "GLPEOC_CACHESIZE"}, + {I40E_GLPBLOC_CACHESIZE, 0, 0, 0, 0, "GLPBLOC_CACHESIZE"}, + {I40E_GLFOC_CACHESIZE, 0, 0, 0, 0, "GLFOC_CACHESIZE"}, + {I40E_PRTRPB_DHW(0), 7, 32, 0, 0, "PRTRPB_DHW"}, + {I40E_PRTRPB_DLW(0), 7, 32, 0, 0, "PRTRPB_DLW"}, + {I40E_PRTRPB_DPS(0), 7, 32, 0, 0, "PRTRPB_DPS"}, + {I40E_PRTRPB_SHT(0), 7, 32, 0, 0, "PRTRPB_SHT"}, + {I40E_PRTRPB_SHW, 0, 0, 0, 0, "PRTRPB_SHW"}, + {I40E_PRTRPB_SLT(0), 7, 32, 0, 0, "PRTRPB_SLT"}, + {I40E_PRTRPB_SLW, 0, 0, 0, 0, "PRTRPB_SLW"}, + {I40E_PRTRPB_SPS, 0, 0, 0, 0, "PRTRPB_SPS"}, + {I40E_GLRPB_DPSS, 0, 0, 0, 0, "GLRPB_DPSS"}, + {I40E_GLRPB_GHW, 0, 0, 0, 0, "GLRPB_GHW"}, + {I40E_GLRPB_GLW, 0, 0, 0, 0, "GLRPB_GLW"}, + {I40E_GLRPB_PHW, 0, 0, 0, 0, "GLRPB_PHW"}, + {I40E_GLRPB_PLW, 0, 0, 0, 0, "GLRPB_PLW"}, + {I40E_PRTDCB_TETSC_TCB, 0, 0, 0, 0, "PRTDCB_TETSC_TCB"}, + {I40E_GLNVM_ULD, 0, 0, 0, 0, "GLNVM_ULD"}, + {I40E_GLNVM_ULD, 0, 0, 0, 0, "GLNVM_ULD"}, + {I40E_GLNVM_PROTCSR(0), 59, 4, 0, 0, "GLNVM_PROTCSR"}, + {I40E_GLNVM_GENS, 0, 0, 0, 0, "GLNVM_GENS"}, + {I40E_GLNVM_FLASHID, 0, 0, 0, 0, "GLNVM_FLASHID"}, + {I40E_GLNVM_FLA, 0, 0, 0, 0, "GLNVM_FLA"}, + {I40E_GLNVM_FLA, 0, 0, 0, 0, "GLNVM_FLA"}, + {I40E_GLNVM_SRCTL, 0, 0, 0, 0, "GLNVM_SRCTL"}, + {I40E_GLNVM_SRDATA, 0, 0, 0, 0, "GLNVM_SRDATA"}, + {I40E_GLGEN_STAT, 0, 0, 0, 0, "GLGEN_STAT"}, + {I40E_GL_MNG_HWARB_CTRL, 0, 0, 0, 0, "GL_MNG_HWARB_CTRL"}, + {I40E_GL_MNG_FWSM, 0, 0, 0, 0, "GL_MNG_FWSM"}, + {I40E_GLNVM_ALTIMERS, 0, 0, 0, 0, "GLNVM_ALTIMERS"}, + {I40E_GLNVM_ULT, 0, 0, 0, 0, "GLNVM_ULT"}, + {I40E_MEM_INIT_DONE_STAT, 0, 0, 0, 0, "MEM_INIT_DONE_STAT"}, + {I40E_GLNVM_AL_REQ, 0, 0, 0, 0, "GLNVM_AL_REQ"}, + {I40E_MNGSB_MSGCTL, 0, 0, 0, 0, "MNGSB_MSGCTL"}, + {I40E_MNGSB_RSPCTL, 0, 0, 0, 0, "MNGSB_RSPCTL"}, + {I40E_MNGSB_DADD, 0, 0, 0, 0, "MNGSB_DADD"}, + {I40E_MNGSB_DCNT, 0, 0, 0, 0, "MNGSB_DCNT"}, + {I40E_MNGSB_FDCS, 0, 0, 0, 0, "MNGSB_FDCS"}, + {I40E_MNGSB_FDS, 0, 0, 0, 0, "MNGSB_FDS"}, + {I40E_MNGSB_FDCRC, 0, 0, 0, 0, "MNGSB_FDCRC"}, + {I40E_MNGSB_WHDR0, 0, 0, 0, 0, "MNGSB_WHDR0"}, + {I40E_MNGSB_WHDR1, 0, 0, 0, 0, "MNGSB_WHDR1"}, + {I40E_MNGSB_WHDR2, 0, 0, 0, 0, "MNGSB_WHDR2"}, + {I40E_MNGSB_WDATA, 0, 0, 0, 0, "MNGSB_WDATA"}, + {I40E_MNGSB_RHDR0, 0, 0, 0, 0, "MNGSB_RHDR0"}, + {I40E_MNGSB_RDATA, 0, 0, 0, 0, "MNGSB_RDATA"}, + {I40E_PFPM_APM, 0, 0, 0, 0, "PFPM_APM"}, + {I40E_PRTGEN_STATUS, 0, 0, 0, 0, "PRTGEN_STATUS"}, + {I40E_PRTGEN_CNF, 0, 0, 0, 0, "PRTGEN_CNF"}, + {I40E_PRTPM_GC, 0, 0, 0, 0, "PRTPM_GC"}, + {I40E_PRTGEN_CNF2, 0, 0, 0, 0, "PRTGEN_CNF2"}, + {I40E_GLGEN_RSTCTL, 0, 0, 0, 0, "GLGEN_RSTCTL"}, + {I40E_GLGEN_CLKSTAT, 0, 0, 0, 0, "GLGEN_CLKSTAT"}, + {I40E_GLGEN_RSTAT, 0, 0, 0, 0, "GLGEN_RSTAT"}, + {I40E_GLGEN_RTRIG, 0, 0, 0, 0, "GLGEN_RTRIG"}, + {I40E_GLGEN_PME_TO, 0, 0, 0, 0, "GLGEN_PME_TO"}, + {I40E_GLGEN_CAR_DEBUG, 0, 0, 0, 0, "GLGEN_CAR_DEBUG"}, + {I40E_PFPCI_CNF, 0, 0, 0, 0, "PFPCI_CNF"}, + {I40E_PFPCI_DEVID, 0, 0, 0, 0, "PFPCI_DEVID"}, + {I40E_PFPCI_SUBSYSID, 0, 0, 0, 0, "PFPCI_SUBSYSID"}, + {I40E_PFPCI_FUNC2, 0, 0, 0, 0, "PFPCI_FUNC2"}, + {I40E_PFPCI_FUNC, 0, 0, 0, 0, "PFPCI_FUNC"}, + {I40E_PFPCI_STATUS1, 0, 0, 0, 0, "PFPCI_STATUS1"}, + {I40E_PFPCI_PM, 0, 0, 0, 0, "PFPCI_PM"}, + {I40E_PFPCI_CLASS, 0, 0, 0, 0, "PFPCI_CLASS"}, + {I40E_GLTPH_CTRL, 0, 0, 0, 0, "GLTPH_CTRL"}, + {I40E_GLPCI_LBARCTRL, 0, 0, 0, 0, "GLPCI_LBARCTRL"}, + {I40E_GLPCI_SUBVENID, 0, 0, 0, 0, "GLPCI_SUBVENID"}, + {I40E_GLPCI_PWRDATA, 0, 0, 0, 0, "GLPCI_PWRDATA"}, + {I40E_GLPCI_CNF2, 0, 0, 0, 0, "GLPCI_CNF2"}, + {I40E_GLPCI_SERL, 0, 0, 0, 0, "GLPCI_SERL"}, + {I40E_GLPCI_SERH, 0, 0, 0, 0, "GLPCI_SERH"}, + {I40E_GLPCI_CAPCTRL, 0, 0, 0, 0, "GLPCI_CAPCTRL"}, + {I40E_GLPCI_CAPSUP, 0, 0, 0, 0, "GLPCI_CAPSUP"}, + {I40E_GLPCI_LINKCAP, 0, 0, 0, 0, "GLPCI_LINKCAP"}, + {I40E_GLPCI_PMSUP, 0, 0, 0, 0, "GLPCI_PMSUP"}, + {I40E_GLPCI_REVID, 0, 0, 0, 0, "GLPCI_REVID"}, + {I40E_GLPCI_VFSUP, 0, 0, 0, 0, "GLPCI_VFSUP"}, + {I40E_GLPCI_CNF, 0, 0, 0, 0, "GLPCI_CNF"}, + {I40E_GLPCI_UPADD, 0, 0, 0, 0, "GLPCI_UPADD"}, + {I40E_GLPCI_PCIERR, 0, 0, 0, 0, "GLPCI_PCIERR"}, + {I40E_GLPCI_VENDORID, 0, 0, 0, 0, "GLPCI_VENDORID"}, + {I40E_GL_UFUSE_SOC, 0, 0, 0, 0, "GL_UFUSE_SOC"}, + {I40E_PFHMC_SDCMD, 0, 0, 0, 0, "PFHMC_SDCMD"}, + {I40E_PFHMC_SDDATALOW, 0, 0, 0, 0, "PFHMC_SDDATALOW"}, + {I40E_PFHMC_SDDATAHIGH, 0, 0, 0, 0, "PFHMC_SDDATAHIGH"}, + {I40E_PFHMC_PDINV, 0, 0, 0, 0, "PFHMC_PDINV"}, + {I40E_PFHMC_ERRORINFO, 0, 0, 0, 0, "PFHMC_ERRORINFO"}, + {I40E_PFHMC_ERRORDATA, 0, 0, 0, 0, "PFHMC_ERRORDATA"}, + {I40E_GLHMC_SDPART(0), 15, 4, 0, 0, "GLHMC_SDPART"}, + {I40E_GLHMC_PFPESDPART(0), 15, 4, 0, 0, "GLHMC_PFPESDPART"}, + {I40E_GLHMC_PFASSIGN(0), 15, 4, 0, 0, "GLHMC_PFASSIGN"}, + {I40E_GLHMC_LANTXOBJSZ, 0, 0, 0, 0, "GLHMC_LANTXOBJSZ"}, + {I40E_GLHMC_LANQMAX, 0, 0, 0, 0, "GLHMC_LANQMAX"}, + {I40E_GLHMC_LANRXOBJSZ, 0, 0, 0, 0, "GLHMC_LANRXOBJSZ"}, + {I40E_GLHMC_FCOEDDPOBJSZ, 0, 0, 0, 0, "GLHMC_FCOEDDPOBJSZ"}, + {I40E_GLHMC_FCOEMAX, 0, 0, 0, 0, "GLHMC_FCOEMAX"}, + {I40E_GLHMC_FCOEFOBJSZ, 0, 0, 0, 0, "GLHMC_FCOEFOBJSZ"}, + {I40E_GLHMC_PEQPOBJSZ, 0, 0, 0, 0, "GLHMC_PEQPOBJSZ"}, + {I40E_GLHMC_PECQOBJSZ, 0, 0, 0, 0, "GLHMC_PECQOBJSZ"}, + {I40E_GLHMC_PESRQOBJSZ, 0, 0, 0, 0, "GLHMC_PESRQOBJSZ"}, + {I40E_GLHMC_PESRQMAX, 0, 0, 0, 0, "GLHMC_PESRQMAX"}, + {I40E_GLHMC_PEHTEOBJSZ, 0, 0, 0, 0, "GLHMC_PEHTEOBJSZ"}, + {I40E_GLHMC_PEHTMAX, 0, 0, 0, 0, "GLHMC_PEHTMAX"}, + {I40E_GLHMC_PEARPOBJSZ, 0, 0, 0, 0, "GLHMC_PEARPOBJSZ"}, + {I40E_GLHMC_PEARPMAX, 0, 0, 0, 0, "GLHMC_PEARPMAX"}, + {I40E_GLHMC_PEMROBJSZ, 0, 0, 0, 0, "GLHMC_PEMROBJSZ"}, + {I40E_GLHMC_PEMRMAX, 0, 0, 0, 0, "GLHMC_PEMRMAX"}, + {I40E_GLHMC_PEXFOBJSZ, 0, 0, 0, 0, "GLHMC_PEXFOBJSZ"}, + {I40E_GLHMC_PEXFMAX, 0, 0, 0, 0, "GLHMC_PEXFMAX"}, + {I40E_GLHMC_PEXFFLMAX, 0, 0, 0, 0, "GLHMC_PEXFFLMAX"}, + {I40E_GLHMC_PEQ1OBJSZ, 0, 0, 0, 0, "GLHMC_PEQ1OBJSZ"}, + {I40E_GLHMC_PEQ1MAX, 0, 0, 0, 0, "GLHMC_PEQ1MAX"}, + {I40E_GLHMC_PEQ1FLMAX, 0, 0, 0, 0, "GLHMC_PEQ1FLMAX"}, + {I40E_GLHMC_FSIMCOBJSZ, 0, 0, 0, 0, "GLHMC_FSIMCOBJSZ"}, + {I40E_GLHMC_FSIMCMAX, 0, 0, 0, 0, "GLHMC_FSIMCMAX"}, + {I40E_GLHMC_FSIAVOBJSZ, 0, 0, 0, 0, "GLHMC_FSIAVOBJSZ"}, + {I40E_GLHMC_FSIAVMAX, 0, 0, 0, 0, "GLHMC_FSIAVMAX"}, + {I40E_GLHMC_PEPBLMAX, 0, 0, 0, 0, "GLHMC_PEPBLMAX"}, + {I40E_GLHMC_PETIMEROBJSZ, 0, 0, 0, 0, "GLHMC_PETIMEROBJSZ"}, + {I40E_GLHMC_PETIMERMAX, 0, 0, 0, 0, "GLHMC_PETIMERMAX"}, + {I40E_GLHMC_FCOEFMAX, 0, 0, 0, 0, "GLHMC_FCOEFMAX"}, + {I40E_GLHMC_PEPFFIRSTSD, 0, 0, 0, 0, "GLHMC_PEPFFIRSTSD"}, + {I40E_GLHMC_DBQPMAX, 0, 0, 0, 0, "GLHMC_DBQPMAX"}, + {I40E_GLHMC_DBCQMAX, 0, 0, 0, 0, "GLHMC_DBCQMAX"}, + {I40E_GLHMC_PEQPBASE(0), 15, 4, 0, 0, "GLHMC_PEQPBASE"}, + {I40E_GLHMC_PEQPCNT(0), 15, 4, 0, 0, "GLHMC_PEQPCNT"}, + {I40E_GLHMC_PECQBASE(0), 15, 4, 0, 0, "GLHMC_PECQBASE"}, + {I40E_GLHMC_PECQCNT(0), 15, 4, 0, 0, "GLHMC_PECQCNT"}, + {I40E_GLHMC_PESRQBASE(0), 15, 4, 0, 0, "GLHMC_PESRQBASE"}, + {I40E_GLHMC_PESRQCNT(0), 15, 4, 0, 0, "GLHMC_PESRQCNT"}, + {I40E_GLHMC_PEHTEBASE(0), 15, 4, 0, 0, "GLHMC_PEHTEBASE"}, + {I40E_GLHMC_PEHTCNT(0), 15, 4, 0, 0, "GLHMC_PEHTCNT"}, + {I40E_GLHMC_PEARPBASE(0), 15, 4, 0, 0, "GLHMC_PEARPBASE"}, + {I40E_GLHMC_PEARPCNT(0), 15, 4, 0, 0, "GLHMC_PEARPCNT"}, + {I40E_GLHMC_APBVTINUSEBASE(0), 15, 4, 0, 0, "GLHMC_APBVTINUSEBASE"}, + {I40E_GLHMC_PEMRBASE(0), 15, 4, 0, 0, "GLHMC_PEMRBASE"}, + {I40E_GLHMC_PEMRCNT(0), 15, 4, 0, 0, "GLHMC_PEMRCNT"}, + {I40E_GLHMC_PEXFBASE(0), 15, 4, 0, 0, "GLHMC_PEXFBASE"}, + {I40E_GLHMC_PEXFCNT(0), 15, 4, 0, 0, "GLHMC_PEXFCNT"}, + {I40E_GLHMC_PEXFFLBASE(0), 15, 4, 0, 0, "GLHMC_PEXFFLBASE"}, + {I40E_GLHMC_PEQ1BASE(0), 15, 4, 0, 0, "GLHMC_PEQ1BASE"}, + {I40E_GLHMC_PEQ1CNT(0), 15, 4, 0, 0, "GLHMC_PEQ1CNT"}, + {I40E_GLHMC_PEQ1FLBASE(0), 15, 4, 0, 0, "GLHMC_PEQ1FLBASE"}, + {I40E_GLHMC_FSIAVBASE(0), 15, 4, 0, 0, "GLHMC_FSIAVBASE"}, + {I40E_GLHMC_FSIAVCNT(0), 15, 4, 0, 0, "GLHMC_FSIAVCNT"}, + {I40E_GLHMC_PEPBLBASE(0), 15, 4, 0, 0, "GLHMC_PEPBLBASE"}, + {I40E_GLHMC_PEPBLCNT(0), 15, 4, 0, 0, "GLHMC_PEPBLCNT"}, + {I40E_GLHMC_PETIMERBASE(0), 15, 4, 0, 0, "GLHMC_PETIMERBASE"}, + {I40E_GLHMC_PETIMERCNT(0), 15, 4, 0, 0, "GLHMC_PETIMERCNT"}, + {I40E_GLHMC_FSIMCBASE(0), 15, 4, 0, 0, "GLHMC_FSIMCBASE"}, + {I40E_GLHMC_FSIMCCNT(0), 15, 4, 0, 0, "GLHMC_FSIMCCNT"}, + {I40E_GLHMC_LANTXBASE(0), 15, 4, 0, 0, "GLHMC_LANTXBASE"}, + {I40E_GLHMC_LANTXCNT(0), 15, 4, 0, 0, "GLHMC_LANTXCNT"}, + {I40E_GLHMC_LANRXBASE(0), 15, 4, 0, 0, "GLHMC_LANRXBASE"}, + {I40E_GLHMC_LANRXCNT(0), 15, 4, 0, 0, "GLHMC_LANRXCNT"}, + {I40E_GLHMC_FCOEDDPBASE(0), 15, 4, 0, 0, "GLHMC_FCOEDDPBASE"}, + {I40E_GLHMC_FCOEDDPCNT(0), 15, 4, 0, 0, "GLHMC_FCOEDDPCNT"}, + {I40E_GLHMC_FCOEFBASE(0), 15, 4, 0, 0, "GLHMC_FCOEFBASE"}, + {I40E_GLHMC_FCOEFCNT(0), 15, 4, 0, 0, "GLHMC_FCOEFCNT"}, + {I40E_GLHMC_VFPDINV(0), 31, 4, 0, 0, "GLHMC_VFPDINV"}, + {I40E_GLHMC_VFSDPART(0), 31, 4, 0, 0, "GLHMC_VFSDPART"}, + {I40E_GLHMC_VFPEQPBASE(0), 31, 4, 0, 0, "GLHMC_VFPEQPBASE"}, + {I40E_GLHMC_VFPEQPCNT(0), 31, 4, 0, 0, "GLHMC_VFPEQPCNT"}, + {I40E_GLHMC_VFPECQBASE(0), 31, 4, 0, 0, "GLHMC_VFPECQBASE"}, + {I40E_GLHMC_VFPECQCNT(0), 31, 4, 0, 0, "GLHMC_VFPECQCNT"}, + {I40E_GLHMC_VFPESRQBASE(0), 31, 4, 0, 0, "GLHMC_VFPESRQBASE"}, + {I40E_GLHMC_VFPESRQCNT(0), 31, 4, 0, 0, "GLHMC_VFPESRQCNT"}, + {I40E_GLHMC_VFPEHTEBASE(0), 31, 4, 0, 0, "GLHMC_VFPEHTEBASE"}, + {I40E_GLHMC_VFPEHTCNT(0), 31, 4, 0, 0, "GLHMC_VFPEHTCNT"}, + {I40E_GLHMC_VFPEARPBASE(0), 31, 4, 0, 0, "GLHMC_VFPEARPBASE"}, + {I40E_GLHMC_VFPEARPCNT(0), 31, 4, 0, 0, "GLHMC_VFPEARPCNT"}, + {I40E_GLHMC_VFAPBVTINUSEBASE(0), 31, 4, 0, 0, "GLHMC_VFAPBVTINUSEBASE"}, + {I40E_GLHMC_VFPEMRBASE(0), 31, 4, 0, 0, "GLHMC_VFPEMRBASE"}, + {I40E_GLHMC_VFPEMRCNT(0), 31, 4, 0, 0, "GLHMC_VFPEMRCNT"}, + {I40E_GLHMC_VFPEXFBASE(0), 31, 4, 0, 0, "GLHMC_VFPEXFBASE"}, + {I40E_GLHMC_VFPEXFCNT(0), 31, 4, 0, 0, "GLHMC_VFPEXFCNT"}, + {I40E_GLHMC_VFPEXFFLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEXFFLBASE"}, + {I40E_GLHMC_VFPEQ1BASE(0), 31, 4, 0, 0, "GLHMC_VFPEQ1BASE"}, + {I40E_GLHMC_VFPEQ1CNT(0), 31, 4, 0, 0, "GLHMC_VFPEQ1CNT"}, + {I40E_GLHMC_VFPEQ1FLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEQ1FLBASE"}, + {I40E_GLHMC_VFFSIAVBASE(0), 31, 4, 0, 0, "GLHMC_VFFSIAVBASE"}, + {I40E_GLHMC_VFFSIAVCNT(0), 31, 4, 0, 0, "GLHMC_VFFSIAVCNT"}, + {I40E_GLHMC_VFPEPBLBASE(0), 31, 4, 0, 0, "GLHMC_VFPEPBLBASE"}, + {I40E_GLHMC_VFPEPBLCNT(0), 31, 4, 0, 0, "GLHMC_VFPEPBLCNT"}, + {I40E_GLHMC_VFPETIMERBASE(0), 31, 4, 0, 0, "GLHMC_VFPETIMERBASE"}, + {I40E_GLHMC_VFPETIMERCNT(0), 31, 4, 0, 0, "GLHMC_VFPETIMERCNT"}, + {I40E_GLPDOC_CACHESIZE, 0, 0, 0, 0, "GLPDOC_CACHESIZE"}, + {I40E_QTX_HEAD(0), 1535, 4, 0, 0, "QTX_HEAD"}, + {I40E_VP_MDET_TX(0), 127, 4, 0, 0, "VP_MDET_TX"}, + {I40E_PF_MDET_TX, 0, 0, 0, 0, "PF_MDET_TX"}, + {I40E_GL_MDET_TX, 0, 0, 0, 0, "GL_MDET_TX"}, + {I40E_GL_TLAN_SPARE, 0, 0, 0, 0, "GL_TLAN_SPARE"}, + {I40E_GLLAN_TXPRE_QDIS(0), 11, 4, 0, 0, "GLLAN_TXPRE_QDIS"}, + {I40E_QTX_ENA(0), 1535, 4, 0, 0, "QTX_ENA"}, + {I40E_QTX_CTL(0), 1535, 4, 0, 0, "QTX_CTL"}, + {I40E_QTX_TAIL(0), 1535, 4, 0, 0, "QTX_TAIL"}, + {I40E_PFCM_LAN_ERRINFO, 0, 0, 0, 0, "PFCM_LAN_ERRINFO"}, + {I40E_PFCM_LAN_ERRDATA, 0, 0, 0, 0, "PFCM_LAN_ERRDATA"}, + {I40E_PFCM_LANCTXDATA(0), 3, 128, 0, 0, "PFCM_LANCTXDATA"}, + {I40E_PFCM_LANCTXCTL, 0, 0, 0, 0, "PFCM_LANCTXCTL"}, + {I40E_PFCM_LANCTXSTAT, 0, 0, 0, 0, "PFCM_LANCTXSTAT"}, + {I40E_GLCM_LAN_CACHESIZE, 0, 0, 0, 0, "GLCM_LAN_CACHESIZE"}, + {I40E_QRX_ENA(0), 1535, 4, 0, 0, "QRX_ENA"}, + {I40E_PRTDCB_RETSTCC(0), 7, 32, 0, 0, "PRTDCB_RETSTCC"}, + {I40E_PRTDCB_RPPMC, 0, 0, 0, 0, "PRTDCB_RPPMC"}, + {I40E_PRTDCB_RETSC, 0, 0, 0, 0, "PRTDCB_RETSC"}, + {I40E_PRTDCB_RUPTQ(0), 7, 32, 0, 0, "PRTDCB_RUPTQ"}, + {I40E_GLDCB_RUPTI, 0, 0, 0, 0, "GLDCB_RUPTI"}, + {I40E_QRX_TAIL(0), 1535, 4, 0, 0, "QRX_TAIL"}, + {I40E_VP_MDET_RX(0), 127, 4, 0, 0, "VP_MDET_RX"}, + {I40E_PF_MDET_RX, 0, 0, 0, 0, "PF_MDET_RX"}, + {I40E_GLLAN_RCTL_0, 0, 0, 0, 0, "GLLAN_RCTL_0"}, + {I40E_GL_MDET_RX, 0, 0, 0, 0, "GL_MDET_RX"}, + {I40E_VFPE_CQARM(0), 127, 4, 0, 0, "VFPE_CQARM"}, + {I40E_VFPE_CQACK(0), 127, 4, 0, 0, "VFPE_CQACK"}, + {I40E_VFPE_AEQALLOC(0), 127, 4, 0, 0, "VFPE_AEQALLOC"}, + {I40E_PFPE_CQARM, 0, 0, 0, 0, "PFPE_CQARM"}, + {I40E_PFPE_CQACK, 0, 0, 0, 0, "PFPE_CQACK"}, + {I40E_PFPE_AEQALLOC, 0, 0, 0, 0, "PFPE_AEQALLOC"}, + {I40E_GLHMC_DBCQPART(0), 15, 4, 0, 0, "GLHMC_DBCQPART"}, + {I40E_GLHMC_CEQPART(0), 15, 4, 0, 0, "GLHMC_CEQPART"}, + {I40E_GLPE_PFCQEDROPCNT(0), 15, 4, 0, 0, "GLPE_PFCQEDROPCNT"}, + {I40E_GLPE_PFCEQEDROPCNT(0), 15, 4, 0, 0, "GLPE_PFCEQEDROPCNT"}, + {I40E_GLPE_PFAEQEDROPCNT(0), 15, 4, 0, 0, "GLPE_PFAEQEDROPCNT"}, + {I40E_GLHMC_VFDBCQPART(0), 31, 4, 0, 0, "GLHMC_VFDBCQPART"}, + {I40E_GLHMC_VFCEQPART(0), 31, 4, 0, 0, "GLHMC_VFCEQPART"}, + {I40E_GLPE_VFCQEDROPCNT(0), 31, 4, 0, 0, "GLPE_VFCQEDROPCNT"}, + {I40E_GLPE_VFCEQEDROPCNT(0), 31, 4, 0, 0, "GLPE_VFCEQEDROPCNT"}, + {I40E_GLPE_VFAEQEDROPCNT(0), 31, 4, 0, 0, "GLPE_VFAEQEDROPCNT"}, + {I40E_VFPE_WQEALLOC(0), 127, 4, 0, 0, "VFPE_WQEALLOC"}, + {I40E_VFCM_PE_ERRINFO1(0), 127, 4, 0, 0, "VFCM_PE_ERRINFO1"}, + {I40E_VFCM_PE_ERRDATA1(0), 127, 4, 0, 0, "VFCM_PE_ERRDATA1"}, + {I40E_PFPE_WQEALLOC, 0, 0, 0, 0, "PFPE_WQEALLOC"}, + {I40E_PFCM_PE_ERRINFO, 0, 0, 0, 0, "PFCM_PE_ERRINFO"}, + {I40E_PFCM_PE_ERRDATA, 0, 0, 0, 0, "PFCM_PE_ERRDATA"}, + {I40E_GLHMC_DBQPPART(0), 15, 4, 0, 0, "GLHMC_DBQPPART"}, + {I40E_GLHMC_VFDBQPPART(0), 31, 4, 0, 0, "GLHMC_VFDBQPPART"}, + {I40E_GLCM_PE_CACHESIZE, 0, 0, 0, 0, "GLCM_PE_CACHESIZE"}, + {I40E_PFGEN_PORTNUM, 0, 0, 0, 0, "PFGEN_PORTNUM"}, + {I40E_PF_VT_PFALLOC, 0, 0, 0, 0, "PF_VT_PFALLOC"}, + {I40E_PRTDCB_TC2PFC, 0, 0, 0, 0, "PRTDCB_TC2PFC"}, + {I40E_PRTDCB_RUP2TC, 0, 0, 0, 0, "PRTDCB_RUP2TC"}, + {I40E_GLGEN_PCIFCNCNT, 0, 0, 0, 0, "GLGEN_PCIFCNCNT"}, + {I40E_PRTDCB_RUP, 0, 0, 0, 0, "PRTDCB_RUP"}, + {I40E_PRT_L2TAGSEN, 0, 0, 0, 0, "PRT_L2TAGSEN"}, + {I40E_PRTGL_SAL, 0, 0, 0, 0, "PRTGL_SAL"}, + {I40E_PRTGL_SAH, 0, 0, 0, 0, "PRTGL_SAH"}, + {I40E_PRTDCB_MFLCN, 0, 0, 0, 0, "PRTDCB_MFLCN"}, + {I40E_PRTMAC_LINK_DOWN_COUNTER, 0, 0, 0, 0, + "PRTMAC_LINK_DOWN_COUNTER"}, + {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE"}, + {I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE"}, + {I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_ENABLE_GCP"}, + {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1"}, + {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2"}, + {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1"}, + {I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2"}, + {I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_ENABLE_GPP"}, + {I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_ENABLE_PPP"}, + {I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL"}, + {I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(0), 8, 16, 0, 0, + "PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA"}, + {I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(0), 8, 16, 0, 0, + "PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER"}, + {I40E_PRTMAC_HSEC_CTL_TX_SA_PART1, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_TX_SA_PART1"}, + {I40E_PRTMAC_HSEC_CTL_TX_SA_PART2, 0, 0, 0, 0, + "PRTMAC_HSEC_CTL_TX_SA_PART2"}, + {I40E_PRTTSYN_INC_L, 0, 0, 0, 0, "PRTTSYN_INC_L"}, + {I40E_PRTTSYN_INC_H, 0, 0, 0, 0, "PRTTSYN_INC_H"}, + {I40E_PRTTSYN_EVNT_L(0), 1, 32, 0, 0, "PRTTSYN_EVNT_L"}, + {I40E_PRTTSYN_EVNT_H(0), 1, 32, 0, 0, "PRTTSYN_EVNT_H"}, + {I40E_PRTTSYN_TIME_L, 0, 0, 0, 0, "PRTTSYN_TIME_L"}, + {I40E_PRTTSYN_TIME_H, 0, 0, 0, 0, "PRTTSYN_TIME_H"}, + {I40E_PRTTSYN_TGT_L(0), 1, 32, 0, 0, "PRTTSYN_TGT_L"}, + {I40E_PRTTSYN_TGT_H(0), 1, 32, 0, 0, "PRTTSYN_TGT_H"}, + {I40E_PRTTSYN_TXTIME_L, 0, 0, 0, 0, "PRTTSYN_TXTIME_L"}, + {I40E_PRTTSYN_TXTIME_H, 0, 0, 0, 0, "PRTTSYN_TXTIME_H"}, + {I40E_PRTTSYN_CTL0, 0, 0, 0, 0, "PRTTSYN_CTL0"}, + {I40E_PRTTSYN_STAT_0, 0, 0, 0, 0, "PRTTSYN_STAT_0"}, + {I40E_PRTTSYN_CLKO(0), 1, 32, 0, 0, "PRTTSYN_CLKO"}, + {I40E_PRTTSYN_ADJ, 0, 0, 0, 0, "PRTTSYN_ADJ"}, + {I40E_PRTTSYN_AUX_0(0), 1, 32, 0, 0, "PRTTSYN_AUX_0"}, + {I40E_PRTTSYN_AUX_1(0), 1, 32, 0, 0, "PRTTSYN_AUX_1"}, + {I40E_PRTPM_EEE_STAT, 0, 0, 0, 0, "PRTPM_EEE_STAT"}, + {I40E_PRTPM_EEER, 0, 0, 0, 0, "PRTPM_EEER"}, + {I40E_PRTPM_EEEC, 0, 0, 0, 0, "PRTPM_EEEC"}, + {I40E_PRTPM_RLPIC, 0, 0, 0, 0, "PRTPM_RLPIC"}, + {I40E_PRTPM_TLPIC, 0, 0, 0, 0, "PRTPM_TLPIC"}, + {I40E_PRTPM_EEETXC, 0, 0, 0, 0, "PRTPM_EEETXC"}, + {I40E_PRTPM_EEEFWD, 0, 0, 0, 0, "PRTPM_EEEFWD"}, + {I40E_PRTPM_SAL(0), 3, 32, 0, 0, "PRTPM_SAL"}, + {I40E_PRTPM_SAH(0), 3, 32, 0, 0, "PRTPM_SAH"}, + {I40E_PRTDCB_TFCS, 0, 0, 0, 0, "PRTDCB_TFCS"}, + {I40E_PRTDCB_FCTTVN(0), 3, 32, 0, 0, "PRTDCB_FCTTVN"}, + {I40E_PRTDCB_FCRTV, 0, 0, 0, 0, "PRTDCB_FCRTV"}, + {I40E_PRTDCB_FCCFG, 0, 0, 0, 0, "PRTDCB_FCCFG"}, + {I40E_PRTDCB_TPFCTS(0), 7, 32, 0, 0, "PRTDCB_TPFCTS"}, + {I40E_VFQF_HLUT1(0, 0), 15, 1024, 127, 4, "VFQF_HLUT1"}, + {I40E_VSIQF_HLUT(0, 0), 15, 2048, 383, 4, "VSIQF_HLUT"}, + {I40E_VFQF_HKEY1(0, 0), 12, 1024, 127, 4, "VFQF_HKEY1"}, + {I40E_VFQF_HREGION1(0, 0), 7, 1024, 127, 4, "VFQF_HREGION1"}, + {I40E_VFQF_HENA1(0, 0), 1, 1024, 127, 4, "VFQF_HENA1"}, + {I40E_PFQF_HLUT(0), 127, 128, 0, 0, "PFQF_HLUT"}, + {I40E_X722_PFQF_HLUT(0), 127, 128, 0, 0, "X722_PFQF_HLUT"}, + {I40E_PFQF_CTL_1, 0, 0, 0, 0, "PFQF_CTL_1"}, + {I40E_PFQF_FDSTAT, 0, 0, 0, 0, "PFQF_FDSTAT"}, + {I40E_PRT_MNG_MIPAF6(0), 15, 32, 0, 0, "PRT_MNG_MIPAF6"}, + {I40E_PRT_MNG_MFUTP(0), 15, 32, 0, 0, "PRT_MNG_MFUTP"}, + {I40E_PRTQF_FLX_PIT(0), 8, 32, 0, 0, "PRTQF_FLX_PIT"}, + {I40E_PRT_MNG_MAVTV(0), 7, 32, 0, 0, "PRT_MNG_MAVTV"}, + {I40E_PRT_MNG_MDEF(0), 7, 32, 0, 0, "PRT_MNG_MDEF"}, + {I40E_PRT_MNG_MDEF_EXT(0), 7, 32, 0, 0, "PRT_MNG_MDEF_EXT"}, + {I40E_PRT_MNG_MIPAF4(0), 3, 32, 0, 0, "PRT_MNG_MIPAF4"}, + {I40E_PRT_MNG_MMAH(0), 3, 32, 0, 0, "PRT_MNG_MMAH"}, + {I40E_PRT_MNG_MMAL(0), 3, 32, 0, 0, "PRT_MNG_MMAL"}, + {I40E_PRT_MNG_MDEFVSI(0), 3, 32, 0, 0, "PRT_MNG_MDEFVSI"}, + {I40E_PRT_MNG_METF(0), 3, 32, 0, 0, "PRT_MNG_METF"}, + {I40E_PRT_MNG_MANC, 0, 0, 0, 0, "PRT_MNG_MANC"}, + {I40E_PRT_MNG_MNGONLY, 0, 0, 0, 0, "PRT_MNG_MNGONLY"}, + {I40E_PRT_MNG_MSFM, 0, 0, 0, 0, "PRT_MNG_MSFM"}, + {I40E_GLQF_APBVT(0), 2047, 4, 0, 0, "GLQF_APBVT"}, + {I40E_GLQF_PCNT(0), 511, 4, 0, 0, "GLQF_PCNT"}, + {I40E_GLQF_FD_PCTYPES(0), 63, 4, 0, 0, "GLQF_FD_PCTYPES"}, + {I40E_GLQF_ORT(0), 63, 4, 0, 0, "GLQF_ORT"}, + {I40E_GLQF_PIT(0), 23, 4, 0, 0, "GLQF_PIT"}, + {I40E_GL_PRS_FVBM(0), 3, 4, 0, 0, "GL_PRS_FVBM"}, + {I40E_GLQF_FDCNT_0, 0, 0, 0, 0, "GLQF_FDCNT_0"}, + {I40E_GL_MTG_FLU_MSK_H, 0, 0, 0, 0, "GL_MTG_FLU_MSK_H"}, + {I40E_GL_SWR_DEF_ACT_EN(0), 1, 4, 0, 0, "GL_SWR_DEF_ACT_EN"}, + {I40E_GLQF_HKEY(0), 12, 4, 0, 0, "GLQF_HKEY"}, + {I40E_GL_SWR_DEF_ACT(0), 35, 4, 0, 0, "GL_SWR_DEF_ACT"}, + {I40E_GLQF_FDEVICTFLAG, 0, 0, 0, 0, "GLQF_FDEVICTFLAG"}, + {I40E_PFQF_CTL_2, 0, 0, 0, 0, "PFQF_CTL_2"}, + {I40E_GLQF_FDEVICTENA(0), 1, 4, 0, 0, "GLQF_FDEVICTENA"}, + {I40E_VSIQF_HKEY(0, 0), 12, 2048, 383, 4, "VSIQF_HKEY"}, + {I40E_GLPRT_GORCL(0), 3, 8, 0, 0, "GLPRT_GORCL"}, + {I40E_GLPRT_GORCH(0), 3, 8, 0, 0, "GLPRT_GORCH"}, + {I40E_GLPRT_MLFC(0), 3, 8, 0, 0, "GLPRT_MLFC"}, + {I40E_GLPRT_MRFC(0), 3, 8, 0, 0, "GLPRT_MRFC"}, + {I40E_GLPRT_CRCERRS(0), 3, 8, 0, 0, "GLPRT_CRCERRS"}, + {I40E_GLPRT_RLEC(0), 3, 8, 0, 0, "GLPRT_RLEC"}, + {I40E_GLPRT_ILLERRC(0), 3, 8, 0, 0, "GLPRT_ILLERRC"}, + {I40E_GLPRT_RUC(0), 3, 8, 0, 0, "GLPRT_RUC"}, + {I40E_GLPRT_ROC(0), 3, 8, 0, 0, "GLPRT_ROC"}, + {I40E_GLPRT_LXONRXC(0), 3, 8, 0, 0, "GLPRT_LXONRXC"}, + {I40E_GLPRT_LXOFFRXC(0), 3, 8, 0, 0, "GLPRT_LXOFFRXC"}, + {I40E_GLPRT_PXONRXC(0, 0), 3, 8, 7, 32, "GLPRT_PXONRXC"}, + {I40E_GLPRT_PXOFFRXC(0, 0), 3, 8, 7, 32, "GLPRT_PXOFFRXC"}, + {I40E_GLPRT_RXON2OFFCNT(0, 0), 3, 8, 7, 32, "GLPRT_RXON2OFFCNT"}, + {I40E_GLPRT_PRC64L(0), 3, 8, 0, 0, "GLPRT_PRC64L"}, + {I40E_GLPRT_PRC64H(0), 3, 8, 0, 0, "GLPRT_PRC64H"}, + {I40E_GLPRT_PRC127L(0), 3, 8, 0, 0, "GLPRT_PRC127L"}, + {I40E_GLPRT_PRC127H(0), 3, 8, 0, 0, "GLPRT_PRC127H"}, + {I40E_GLPRT_PRC255L(0), 3, 8, 0, 0, "GLPRT_PRC255L"}, + {I40E_GLPRT_PRC255H(0), 3, 8, 0, 0, "GLPRT_PRC255H"}, + {I40E_GLPRT_PRC511L(0), 3, 8, 0, 0, "GLPRT_PRC511L"}, + {I40E_GLPRT_PRC511H(0), 3, 8, 0, 0, "GLPRT_PRC511H"}, + {I40E_GLPRT_PRC1023L(0), 3, 8, 0, 0, "GLPRT_PRC1023L"}, + {I40E_GLPRT_PRC1023H(0), 3, 8, 0, 0, "GLPRT_PRC1023H"}, + {I40E_GLPRT_PRC1522L(0), 3, 8, 0, 0, "GLPRT_PRC1522L"}, + {I40E_GLPRT_PRC1522H(0), 3, 8, 0, 0, "GLPRT_PRC1522H"}, + {I40E_GLPRT_PRC9522L(0), 3, 8, 0, 0, "GLPRT_PRC9522L"}, + {I40E_GLPRT_PRC9522H(0), 3, 8, 0, 0, "GLPRT_PRC9522H"}, + {I40E_GLPRT_RFC(0), 3, 8, 0, 0, "GLPRT_RFC"}, + {I40E_GLPRT_RJC(0), 3, 8, 0, 0, "GLPRT_RJC"}, + {I40E_GLPRT_UPRCL(0), 3, 8, 0, 0, "GLPRT_UPRCL"}, + {I40E_GLPRT_UPRCH(0), 3, 8, 0, 0, "GLPRT_UPRCH"}, + {I40E_GLPRT_MPRCL(0), 3, 8, 0, 0, "GLPRT_MPRCL"}, + {I40E_GLPRT_MPRCH(0), 3, 8, 0, 0, "GLPRT_MPRCH"}, + {I40E_GLPRT_BPRCL(0), 3, 8, 0, 0, "GLPRT_BPRCL"}, + {I40E_GLPRT_BPRCH(0), 3, 8, 0, 0, "GLPRT_BPRCH"}, + {I40E_GLPRT_RDPC(0), 3, 8, 0, 0, "GLPRT_RDPC"}, + {I40E_GLPRT_LDPC(0), 3, 8, 0, 0, "GLPRT_LDPC"}, + {I40E_GLPRT_RUPP(0), 3, 8, 0, 0, "GLPRT_RUPP"}, + {I40E_GLPRT_GOTCL(0), 3, 8, 0, 0, "GLPRT_GOTCL"}, + {I40E_GLPRT_GOTCH(0), 3, 8, 0, 0, "GLPRT_GOTCH"}, + {I40E_GLPRT_PTC64L(0), 3, 8, 0, 0, "GLPRT_PTC64L"}, + {I40E_GLPRT_PTC64H(0), 3, 8, 0, 0, "GLPRT_PTC64H"}, + {I40E_GLPRT_PTC127L(0), 3, 8, 0, 0, "GLPRT_PTC127L"}, + {I40E_GLPRT_PTC127H(0), 3, 8, 0, 0, "GLPRT_PTC127H"}, + {I40E_GLPRT_PTC255L(0), 3, 8, 0, 0, "GLPRT_PTC255L"}, + {I40E_GLPRT_PTC255H(0), 3, 8, 0, 0, "GLPRT_PTC255H"}, + {I40E_GLPRT_PTC511L(0), 3, 8, 0, 0, "GLPRT_PTC511L"}, + {I40E_GLPRT_PTC511H(0), 3, 8, 0, 0, "GLPRT_PTC511H"}, + {I40E_GLPRT_PTC1023L(0), 3, 8, 0, 0, "GLPRT_PTC1023L"}, + {I40E_GLPRT_PTC1023H(0), 3, 8, 0, 0, "GLPRT_PTC1023H"}, + {I40E_GLPRT_PTC1522L(0), 3, 8, 0, 0, "GLPRT_PTC1522L"}, + {I40E_GLPRT_PTC1522H(0), 3, 8, 0, 0, "GLPRT_PTC1522H"}, + {I40E_GLPRT_PTC9522L(0), 3, 8, 0, 0, "GLPRT_PTC9522L"}, + {I40E_GLPRT_PTC9522H(0), 3, 8, 0, 0, "GLPRT_PTC9522H"}, + {I40E_GLPRT_PXONTXC(0, 0), 3, 8, 7, 32, "GLPRT_PXONTXC"}, + {I40E_GLPRT_PXOFFTXC(0, 0), 3, 8, 7, 32, "GLPRT_PXOFFTXC"}, + {I40E_GLPRT_LXONTXC(0), 3, 8, 0, 0, "GLPRT_LXONTXC"}, + {I40E_GLPRT_LXOFFTXC(0), 3, 8, 0, 0, "GLPRT_LXOFFTXC"}, + {I40E_GLPRT_UPTCL(0), 3, 8, 0, 0, "GLPRT_UPTCL"}, + {I40E_GLPRT_UPTCH(0), 3, 8, 0, 0, "GLPRT_UPTCH"}, + {I40E_GLPRT_MPTCL(0), 3, 8, 0, 0, "GLPRT_MPTCL"}, + {I40E_GLPRT_MPTCH(0), 3, 8, 0, 0, "GLPRT_MPTCH"}, + {I40E_GLPRT_BPTCL(0), 3, 8, 0, 0, "GLPRT_BPTCL"}, + {I40E_GLPRT_BPTCH(0), 3, 8, 0, 0, "GLPRT_BPTCH"}, + {I40E_GLPRT_TDOLD(0), 3, 8, 0, 0, "GLPRT_TDOLD"}, + {I40E_GLV_RDPC(0), 383, 8, 0, 0, "GLV_RDPC"}, + {I40E_GL_FCOELAST(0), 143, 8, 0, 0, "GL_FCOELAST"}, + {I40E_GL_FCOEDDPC(0), 143, 8, 0, 0, "GL_FCOEDDPC"}, + {I40E_GL_FCOECRC(0), 143, 8, 0, 0, "GL_FCOECRC"}, + {I40E_GL_FCOEPRC(0), 143, 8, 0, 0, "GL_FCOEPRC"}, + {I40E_GL_RXERR1_L(0), 143, 8, 0, 0, "GL_RXERR1_L"}, + {I40E_GL_FCOEDIFEC(0), 143, 8, 0, 0, "GL_FCOEDIFEC"}, + {I40E_GL_RXERR2_L(0), 143, 8, 0, 0, "GL_RXERR2_L"}, + {I40E_GL_FCOEDWRCL(0), 143, 8, 0, 0, "GL_FCOEDWRCL"}, + {I40E_GL_FCOEDWRCH(0), 143, 8, 0, 0, "GL_FCOEDWRCH"}, + {I40E_GL_FCOERPDC(0), 143, 8, 0, 0, "GL_FCOERPDC"}, + {I40E_GLV_GOTCL(0), 383, 8, 0, 0, "GLV_GOTCL"}, + {I40E_GLV_GOTCH(0), 383, 8, 0, 0, "GLV_GOTCH"}, + {I40E_GLSW_GOTCL(0), 15, 8, 0, 0, "GLSW_GOTCL"}, + {I40E_GLSW_GOTCH(0), 15, 8, 0, 0, "GLSW_GOTCH"}, + {I40E_GLVEBVL_GOTCL(0), 127, 8, 0, 0, "GLVEBVL_GOTCL"}, + {I40E_GLVEBVL_GOTCH(0), 127, 8, 0, 0, "GLVEBVL_GOTCH"}, + {I40E_GLVEBTC_TBCL(0, 0), 7, 8, 15, 64, "GLVEBTC_TBCL"}, + {I40E_GLVEBTC_TBCH(0, 0), 7, 8, 15, 64, "GLVEBTC_TBCH"}, + {I40E_GLVEBTC_TPCL(0, 0), 7, 8, 15, 64, "GLVEBTC_TPCL"}, + {I40E_GLVEBTC_TPCH(0, 0), 7, 8, 15, 64, "GLVEBTC_TPCH"}, + {I40E_GLV_UPTCL(0), 383, 8, 0, 0, "GLV_UPTCL"}, + {I40E_GLV_UPTCH(0), 383, 8, 0, 0, "GLV_UPTCH"}, + {I40E_GLV_MPTCL(0), 383, 8, 0, 0, "GLV_MPTCL"}, + {I40E_GLV_MPTCH(0), 383, 8, 0, 0, "GLV_MPTCH"}, + {I40E_GLV_BPTCL(0), 383, 8, 0, 0, "GLV_BPTCL"}, + {I40E_GLV_BPTCH(0), 383, 8, 0, 0, "GLV_BPTCH"}, + {I40E_GLSW_UPTCL(0), 15, 8, 0, 0, "GLSW_UPTCL"}, + {I40E_GLSW_UPTCH(0), 15, 8, 0, 0, "GLSW_UPTCH"}, + {I40E_GLSW_MPTCL(0), 15, 8, 0, 0, "GLSW_MPTCL"}, + {I40E_GLSW_MPTCH(0), 15, 8, 0, 0, "GLSW_MPTCH"}, + {I40E_GLSW_BPTCL(0), 15, 8, 0, 0, "GLSW_BPTCL"}, + {I40E_GLSW_BPTCH(0), 15, 8, 0, 0, "GLSW_BPTCH"}, + {I40E_GLV_TEPC(0), 383, 4, 0, 0, "GLV_TEPC"}, + {I40E_GL_FCOEPTC(0), 143, 8, 0, 0, "GL_FCOEPTC"}, + {I40E_GLSW_TDPC(0), 15, 8, 0, 0, "GLSW_TDPC"}, + {I40E_GL_FCOEDWTCL(0), 143, 8, 0, 0, "GL_FCOEDWTCL"}, + {I40E_GL_FCOEDWTCH(0), 143, 8, 0, 0, "GL_FCOEDWTCH"}, + {I40E_GL_FCOEDIXEC(0), 143, 8, 0, 0, "GL_FCOEDIXEC"}, + {I40E_GL_FCOEDIXVC(0), 143, 8, 0, 0, "GL_FCOEDIXVC"}, + {I40E_GL_FCOEDIFTCL(0), 143, 8, 0, 0, "GL_FCOEDIFTCL"}, + {I40E_GLV_GORCL(0), 383, 8, 0, 0, "GLV_GORCL"}, + {I40E_GLV_GORCH(0), 383, 8, 0, 0, "GLV_GORCH"}, + {I40E_GLSW_GORCL(0), 15, 8, 0, 0, "GLSW_GORCL"}, + {I40E_GLSW_GORCH(0), 15, 8, 0, 0, "GLSW_GORCH"}, + {I40E_GLVEBVL_GORCL(0), 127, 8, 0, 0, "GLVEBVL_GORCL"}, + {I40E_GLVEBVL_GORCH(0), 127, 8, 0, 0, "GLVEBVL_GORCH"}, + {I40E_GLVEBTC_RBCL(0, 0), 7, 8, 15, 64, "GLVEBTC_RBCL"}, + {I40E_GLVEBTC_RBCH(0, 0), 7, 8, 15, 64, "GLVEBTC_RBCH"}, + {I40E_GLVEBTC_RPCL(0, 0), 7, 8, 15, 64, "GLVEBTC_RPCL"}, + {I40E_GLVEBTC_RPCH(0, 0), 7, 8, 15, 64, "GLVEBTC_RPCH"}, + {I40E_GLV_UPRCL(0), 383, 8, 0, 0, "GLV_UPRCL"}, + {I40E_GLV_UPRCH(0), 383, 8, 0, 0, "GLV_UPRCH"}, + {I40E_GLV_MPRCL(0), 383, 8, 0, 0, "GLV_MPRCL"}, + {I40E_GLV_MPRCH(0), 383, 8, 0, 0, "GLV_MPRCH"}, + {I40E_GLV_BPRCL(0), 383, 8, 0, 0, "GLV_BPRCL"}, + {I40E_GLV_BPRCH(0), 383, 8, 0, 0, "GLV_BPRCH"}, + {I40E_GLV_RUPP(0), 383, 8, 0, 0, "GLV_RUPP"}, + {I40E_GLSW_UPRCL(0), 15, 8, 0, 0, "GLSW_UPRCL"}, + {I40E_GLSW_UPRCH(0), 15, 8, 0, 0, "GLSW_UPRCH"}, + {I40E_GLSW_MPRCL(0), 15, 8, 0, 0, "GLSW_MPRCL"}, + {I40E_GLSW_MPRCH(0), 15, 8, 0, 0, "GLSW_MPRCH"}, + {I40E_GLSW_BPRCL(0), 15, 8, 0, 0, "GLSW_BPRCL"}, + {I40E_GLSW_BPRCH(0), 15, 8, 0, 0, "GLSW_BPRCH"}, + {I40E_GLSW_RUPP(0), 15, 8, 0, 0, "GLSW_RUPP"}, + {I40E_GLVEBVL_UPCL(0), 127, 8, 0, 0, "GLVEBVL_UPCL"}, + {I40E_GLVEBVL_UPCH(0), 127, 8, 0, 0, "GLVEBVL_UPCH"}, + {I40E_GLVEBVL_MPCL(0), 127, 8, 0, 0, "GLVEBVL_MPCL"}, + {I40E_GLVEBVL_MPCH(0), 127, 8, 0, 0, "GLVEBVL_MPCH"}, + {I40E_GLVEBVL_BPCL(0), 127, 8, 0, 0, "GLVEBVL_BPCL"}, + {I40E_GLVEBVL_BPCH(0), 127, 8, 0, 0, "GLVEBVL_BPCH"}, + {I40E_GLGEN_STAT_HALT, 0, 0, 0, 0, "GLGEN_STAT_HALT"}, + {I40E_GLGEN_STAT_CLEAR, 0, 0, 0, 0, "GLGEN_STAT_CLEAR"}, + {0, 0, 0, 0, 0, NULL} +}; diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c new file mode 100644 index 00000000..4d35d83f --- /dev/null +++ b/drivers/net/i40e/i40e_rxtx.c @@ -0,0 +1,3319 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> +#include <stdint.h> +#include <stdarg.h> +#include <unistd.h> +#include <inttypes.h> +#include <sys/queue.h> + +#include <rte_string_fns.h> +#include <rte_memzone.h> +#include <rte_mbuf.h> +#include <rte_malloc.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_tcp.h> +#include <rte_sctp.h> +#include <rte_udp.h> + +#include "i40e_logs.h" +#include "base/i40e_prototype.h" +#include "base/i40e_type.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" + +#define DEFAULT_TX_RS_THRESH 32 +#define DEFAULT_TX_FREE_THRESH 32 +#define I40E_MAX_PKT_TYPE 256 + +#define I40E_TX_MAX_BURST 32 + +#define I40E_DMA_MEM_ALIGN 4096 + +/* Base address of the HW descriptor ring should be 128B aligned. */ +#define I40E_RING_BASE_ALIGN 128 + +#define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ + ETH_TXQ_FLAGS_NOOFFLOADS) + +#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) + +#define I40E_TX_CKSUM_OFFLOAD_MASK ( \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG | \ + PKT_TX_OUTER_IP_CKSUM) + +static uint16_t i40e_xmit_pkts_simple(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +static inline void +i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp) +{ + if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & + (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) { + mb->ol_flags |= PKT_RX_VLAN_PKT; + mb->vlan_tci = + rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1); + PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u", + rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1)); + } else { + mb->vlan_tci = 0; + } +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) & + (1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) { + mb->ol_flags |= PKT_RX_QINQ_PKT; + mb->vlan_tci_outer = mb->vlan_tci; + mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2); + PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u", + rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1), + rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2)); + } else { + mb->vlan_tci_outer = 0; + } +#endif + PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u", + mb->vlan_tci, mb->vlan_tci_outer); +} + +/* Translate the rx descriptor status to pkt flags */ +static inline uint64_t +i40e_rxd_status_to_pkt_flags(uint64_t qword) +{ + uint64_t flags; + + /* Check if RSS_HASH */ + flags = (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) & + I40E_RX_DESC_FLTSTAT_RSS_HASH) == + I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0; + + /* Check if FDIR Match */ + flags |= (qword & (1 << I40E_RX_DESC_STATUS_FLM_SHIFT) ? + PKT_RX_FDIR : 0); + + return flags; +} + +static inline uint64_t +i40e_rxd_error_to_pkt_flags(uint64_t qword) +{ + uint64_t flags = 0; + uint64_t error_bits = (qword >> I40E_RXD_QW1_ERROR_SHIFT); + +#define I40E_RX_ERR_BITS 0x3f + if (likely((error_bits & I40E_RX_ERR_BITS) == 0)) + return flags; + /* If RXE bit set, all other status bits are meaningless */ + if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + flags |= PKT_RX_MAC_ERR; + return flags; + } + + /* If RECIPE bit set, all other status indications should be ignored */ + if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RECIPE_SHIFT))) { + flags |= PKT_RX_RECIP_ERR; + return flags; + } + if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT))) + flags |= PKT_RX_HBUF_OVERFLOW; + if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT))) + flags |= PKT_RX_IP_CKSUM_BAD; + if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) + flags |= PKT_RX_L4_CKSUM_BAD; + if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) + flags |= PKT_RX_EIP_CKSUM_BAD; + if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_OVERSIZE_SHIFT))) + flags |= PKT_RX_OVERSIZE; + + return flags; +} + +/* Function to check and set the ieee1588 timesync index and get the + * appropriate flags. + */ +#ifdef RTE_LIBRTE_IEEE1588 +static inline uint64_t +i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword) +{ + uint64_t pkt_flags = 0; + uint16_t tsyn = (qword & (I40E_RXD_QW1_STATUS_TSYNVALID_MASK + | I40E_RXD_QW1_STATUS_TSYNINDX_MASK)) + >> I40E_RX_DESC_STATUS_TSYNINDX_SHIFT; + + if ((mb->packet_type & RTE_PTYPE_L2_MASK) + == RTE_PTYPE_L2_ETHER_TIMESYNC) + pkt_flags = PKT_RX_IEEE1588_PTP; + if (tsyn & 0x04) { + pkt_flags |= PKT_RX_IEEE1588_TMST; + mb->timesync = tsyn & 0x03; + } + + return pkt_flags; +} +#endif + +/* For each value it means, datasheet of hardware can tell more details + * + * @note: fix i40e_dev_supported_ptypes_get() if any change here. + */ +static inline uint32_t +i40e_rxd_pkt_type_mapping(uint8_t ptype) +{ + static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { + /* L2 types */ + /* [0] reserved */ + [1] = RTE_PTYPE_L2_ETHER, + [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, + /* [3] - [5] reserved */ + [6] = RTE_PTYPE_L2_ETHER_LLDP, + /* [7] - [10] reserved */ + [11] = RTE_PTYPE_L2_ETHER_ARP, + /* [12] - [21] reserved */ + + /* Non tunneled IPv4 */ + [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + /* [25] reserved */ + [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* IPv4 --> IPv4 */ + [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [32] reserved */ + [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> IPv6 */ + [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [39] reserved */ + [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN */ + [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT, + + /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ + [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [47] reserved */ + [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ + [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [54] reserved */ + [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ + [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ + [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [62] reserved */ + [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ + [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [69] reserved */ + [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ + [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ + [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [77] reserved */ + [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ + [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [84] reserved */ + [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* Non tunneled IPv6 */ + [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + /* [91] reserved */ + [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* IPv6 --> IPv4 */ + [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [98] reserved */ + [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> IPv6 */ + [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [105] reserved */ + [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN */ + [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT, + + /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ + [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [113] reserved */ + [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ + [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [120] reserved */ + [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ + [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ + [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [128] reserved */ + [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ + [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [135] reserved */ + [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ + [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ + [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [143] reserved */ + [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ + [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [150] reserved */ + [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* All others reserved */ + }; + + return type_table[ptype]; +} + +#define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03 +#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01 +#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02 +#define I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK 0x03 +#define I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX 0x01 + +static inline uint64_t +i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb) +{ + uint64_t flags = 0; +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + uint16_t flexbh, flexbl; + + flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >> + I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) & + I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK; + flexbl = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >> + I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT) & + I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK; + + + if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) { + mb->hash.fdir.hi = + rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id); + flags |= PKT_RX_FDIR_ID; + } else if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX) { + mb->hash.fdir.hi = + rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.flex_bytes_hi); + flags |= PKT_RX_FDIR_FLX; + } + if (flexbl == I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX) { + mb->hash.fdir.lo = + rte_le_to_cpu_32(rxdp->wb.qword3.lo_dword.flex_bytes_lo); + flags |= PKT_RX_FDIR_FLX; + } +#else + mb->hash.fdir.hi = + rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id); + flags |= PKT_RX_FDIR_ID; +#endif + return flags; +} +static inline void +i40e_txd_enable_checksum(uint64_t ol_flags, + uint32_t *td_cmd, + uint32_t *td_offset, + union i40e_tx_offload tx_offload, + uint32_t *cd_tunneling) +{ + /* UDP tunneling packet TX checksum offload */ + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) { + + *td_offset |= (tx_offload.outer_l2_len >> 1) + << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; + else if (ol_flags & PKT_TX_OUTER_IPV4) + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + else if (ol_flags & PKT_TX_OUTER_IPV6) + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + + /* Now set the ctx descriptor fields */ + *cd_tunneling |= (tx_offload.outer_l3_len >> 2) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | + (tx_offload.l2_len >> 1) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; + + } else + *td_offset |= (tx_offload.l2_len >> 1) + << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + + /* Enable L3 checksum offloads */ + if (ol_flags & PKT_TX_IP_CKSUM) { + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; + *td_offset |= (tx_offload.l3_len >> 2) + << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + } else if (ol_flags & PKT_TX_IPV4) { + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; + *td_offset |= (tx_offload.l3_len >> 2) + << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + } else if (ol_flags & PKT_TX_IPV6) { + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + *td_offset |= (tx_offload.l3_len >> 2) + << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + } + + if (ol_flags & PKT_TX_TCP_SEG) { + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (tx_offload.l4_len >> 2) + << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + return; + } + + /* Enable L4 checksum offloads */ + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (sizeof(struct tcp_hdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case PKT_TX_SCTP_CKSUM: + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + *td_offset |= (sizeof(struct sctp_hdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case PKT_TX_UDP_CKSUM: + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + *td_offset |= (sizeof(struct udp_hdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + default: + break; + } +} + +static inline struct rte_mbuf * +rte_rxmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + m = __rte_mbuf_raw_alloc(mp); + __rte_mbuf_sanity_check_raw(m, 0); + + return m; +} + +/* Construct the tx flags */ +static inline uint64_t +i40e_build_ctob(uint32_t td_cmd, + uint32_t td_offset, + unsigned int size, + uint32_t td_tag) +{ + return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA | + ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | + ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | + ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); +} + +static inline int +i40e_xmit_cleanup(struct i40e_tx_queue *txq) +{ + struct i40e_tx_entry *sw_ring = txq->sw_ring; + volatile struct i40e_tx_desc *txd = txq->tx_ring; + uint16_t last_desc_cleaned = txq->last_desc_cleaned; + uint16_t nb_tx_desc = txq->nb_tx_desc; + uint16_t desc_to_clean_to; + uint16_t nb_tx_to_clean; + + desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); + if (desc_to_clean_to >= nb_tx_desc) + desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); + + desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; + if ((txd[desc_to_clean_to].cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) != + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) { + PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done " + "(port=%d queue=%d)", desc_to_clean_to, + txq->port_id, txq->queue_id); + return -1; + } + + if (last_desc_cleaned > desc_to_clean_to) + nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) + + desc_to_clean_to); + else + nb_tx_to_clean = (uint16_t)(desc_to_clean_to - + last_desc_cleaned); + + txd[desc_to_clean_to].cmd_type_offset_bsz = 0; + + txq->last_desc_cleaned = desc_to_clean_to; + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); + + return 0; +} + +static inline int +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC +check_rx_burst_bulk_alloc_preconditions(struct i40e_rx_queue *rxq) +#else +check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq) +#endif +{ + int ret = 0; + +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + if (!(rxq->rx_free_thresh >= RTE_PMD_I40E_RX_MAX_BURST)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "RTE_PMD_I40E_RX_MAX_BURST=%d", + rxq->rx_free_thresh, RTE_PMD_I40E_RX_MAX_BURST); + ret = -EINVAL; + } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "rxq->nb_rx_desc=%d", + rxq->rx_free_thresh, rxq->nb_rx_desc); + ret = -EINVAL; + } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->nb_rx_desc=%d, " + "rxq->rx_free_thresh=%d", + rxq->nb_rx_desc, rxq->rx_free_thresh); + ret = -EINVAL; + } else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC - + RTE_PMD_I40E_RX_MAX_BURST))) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->nb_rx_desc=%d, " + "I40E_MAX_RING_DESC=%d, " + "RTE_PMD_I40E_RX_MAX_BURST=%d", + rxq->nb_rx_desc, I40E_MAX_RING_DESC, + RTE_PMD_I40E_RX_MAX_BURST); + ret = -EINVAL; + } +#else + ret = -EINVAL; +#endif + + return ret; +} + +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC +#define I40E_LOOK_AHEAD 8 +#if (I40E_LOOK_AHEAD != 8) +#error "PMD I40E: I40E_LOOK_AHEAD must be 8\n" +#endif +static inline int +i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq) +{ + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *rxep; + struct rte_mbuf *mb; + uint16_t pkt_len; + uint64_t qword1; + uint32_t rx_status; + int32_t s[I40E_LOOK_AHEAD], nb_dd; + int32_t i, j, nb_rx = 0; + uint64_t pkt_flags; + + rxdp = &rxq->rx_ring[rxq->rx_tail]; + rxep = &rxq->sw_ring[rxq->rx_tail]; + + qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); + rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + /* Make sure there is at least 1 packet to receive */ + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + return 0; + + /** + * Scan LOOK_AHEAD descriptors at a time to determine which + * descriptors reference packets that are ready to be received. + */ + for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; i+=I40E_LOOK_AHEAD, + rxdp += I40E_LOOK_AHEAD, rxep += I40E_LOOK_AHEAD) { + /* Read desc statuses backwards to avoid race condition */ + for (j = I40E_LOOK_AHEAD - 1; j >= 0; j--) { + qword1 = rte_le_to_cpu_64(\ + rxdp[j].wb.qword1.status_error_len); + s[j] = (qword1 & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + } + + /* Compute how many status bits were set */ + for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) + nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT); + + nb_rx += nb_dd; + + /* Translate descriptor info to mbuf parameters */ + for (j = 0; j < nb_dd; j++) { + mb = rxep[j].mbuf; + qword1 = rte_le_to_cpu_64(\ + rxdp[j].wb.qword1.status_error_len); + pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len; + mb->data_len = pkt_len; + mb->pkt_len = pkt_len; + mb->ol_flags = 0; + i40e_rxd_to_vlan_tci(mb, &rxdp[j]); + pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); + pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); + mb->packet_type = + i40e_rxd_pkt_type_mapping((uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT)); + if (pkt_flags & PKT_RX_RSS_HASH) + mb->hash.rss = rte_le_to_cpu_32(\ + rxdp[j].wb.qword0.hi_dword.rss); + if (pkt_flags & PKT_RX_FDIR) + pkt_flags |= i40e_rxd_build_fdir(&rxdp[j], mb); + +#ifdef RTE_LIBRTE_IEEE1588 + pkt_flags |= i40e_get_iee15888_flags(mb, qword1); +#endif + mb->ol_flags |= pkt_flags; + + } + + for (j = 0; j < I40E_LOOK_AHEAD; j++) + rxq->rx_stage[i + j] = rxep[j].mbuf; + + if (nb_dd != I40E_LOOK_AHEAD) + break; + } + + /* Clear software ring entries */ + for (i = 0; i < nb_rx; i++) + rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL; + + return nb_rx; +} + +static inline uint16_t +i40e_rx_fill_from_stage(struct i40e_rx_queue *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t i; + struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail]; + + nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail); + + for (i = 0; i < nb_pkts; i++) + rx_pkts[i] = stage[i]; + + rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts); + rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts); + + return nb_pkts; +} + +static inline int +i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq) +{ + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *rxep; + struct rte_mbuf *mb; + uint16_t alloc_idx, i; + uint64_t dma_addr; + int diag; + + /* Allocate buffers in bulk */ + alloc_idx = (uint16_t)(rxq->rx_free_trigger - + (rxq->rx_free_thresh - 1)); + rxep = &(rxq->sw_ring[alloc_idx]); + diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep, + rxq->rx_free_thresh); + if (unlikely(diag != 0)) { + PMD_DRV_LOG(ERR, "Failed to get mbufs in bulk"); + return -ENOMEM; + } + + rxdp = &rxq->rx_ring[alloc_idx]; + for (i = 0; i < rxq->rx_free_thresh; i++) { + if (likely(i < (rxq->rx_free_thresh - 1))) + /* Prefetch next mbuf */ + rte_prefetch0(rxep[i + 1].mbuf); + + mb = rxep[i].mbuf; + rte_mbuf_refcnt_set(mb, 1); + mb->next = NULL; + mb->data_off = RTE_PKTMBUF_HEADROOM; + mb->nb_segs = 1; + mb->port = rxq->port_id; + dma_addr = rte_cpu_to_le_64(\ + rte_mbuf_data_dma_addr_default(mb)); + rxdp[i].read.hdr_addr = 0; + rxdp[i].read.pkt_addr = dma_addr; + } + + /* Update rx tail regsiter */ + rte_wmb(); + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger); + + rxq->rx_free_trigger = + (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh); + if (rxq->rx_free_trigger >= rxq->nb_rx_desc) + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + + return 0; +} + +static inline uint16_t +rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue; + uint16_t nb_rx = 0; + + if (!nb_pkts) + return 0; + + if (rxq->rx_nb_avail) + return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + + nb_rx = (uint16_t)i40e_rx_scan_hw_ring(rxq); + rxq->rx_next_avail = 0; + rxq->rx_nb_avail = nb_rx; + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx); + + if (rxq->rx_tail > rxq->rx_free_trigger) { + if (i40e_rx_alloc_bufs(rxq) != 0) { + uint16_t i, j; + + PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for " + "port_id=%u, queue_id=%u", + rxq->port_id, rxq->queue_id); + rxq->rx_nb_avail = 0; + rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx); + for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++) + rxq->sw_ring[j].mbuf = rxq->rx_stage[i]; + + return 0; + } + } + + if (rxq->rx_tail >= rxq->nb_rx_desc) + rxq->rx_tail = 0; + + if (rxq->rx_nb_avail) + return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + + return 0; +} + +static uint16_t +i40e_recv_pkts_bulk_alloc(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_rx = 0, n, count; + + if (unlikely(nb_pkts == 0)) + return 0; + + if (likely(nb_pkts <= RTE_PMD_I40E_RX_MAX_BURST)) + return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts); + + while (nb_pkts) { + n = RTE_MIN(nb_pkts, RTE_PMD_I40E_RX_MAX_BURST); + count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n); + nb_rx = (uint16_t)(nb_rx + count); + nb_pkts = (uint16_t)(nb_pkts - count); + if (count < n) + break; + } + + return nb_rx; +} +#else +static uint16_t +i40e_recv_pkts_bulk_alloc(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ + +uint16_t +i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct i40e_rx_queue *rxq; + volatile union i40e_rx_desc *rx_ring; + volatile union i40e_rx_desc *rxdp; + union i40e_rx_desc rxd; + struct i40e_rx_entry *sw_ring; + struct i40e_rx_entry *rxe; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + uint16_t nb_rx; + uint32_t rx_status; + uint64_t qword1; + uint16_t rx_packet_len; + uint16_t rx_id, nb_hold; + uint64_t dma_addr; + uint64_t pkt_flags; + + nb_rx = 0; + nb_hold = 0; + rxq = rx_queue; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + + while (nb_rx < nb_pkts) { + rxdp = &rx_ring[rx_id]; + qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); + rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) + >> I40E_RXD_QW1_STATUS_SHIFT; + + /* Check the DD bit first */ + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + nmb = rte_rxmbuf_alloc(rxq->mp); + if (unlikely(!nmb)) + break; + rxd = *rxdp; + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (unlikely(rx_id == rxq->nb_rx_desc)) + rx_id = 0; + + /* Prefetch next mbuf */ + rte_prefetch0(sw_ring[rx_id].mbuf); + + /** + * When next RX descriptor is on a cache line boundary, + * prefetch the next 4 RX descriptors and next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_prefetch0(&rx_ring[rx_id]); + rte_prefetch0(&sw_ring[rx_id]); + } + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + + rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len; + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM)); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = rx_packet_len; + rxm->data_len = rx_packet_len; + rxm->port = rxq->port_id; + rxm->ol_flags = 0; + i40e_rxd_to_vlan_tci(rxm, &rxd); + pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); + pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); + rxm->packet_type = + i40e_rxd_pkt_type_mapping((uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)); + if (pkt_flags & PKT_RX_RSS_HASH) + rxm->hash.rss = + rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); + if (pkt_flags & PKT_RX_FDIR) + pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm); + +#ifdef RTE_LIBRTE_IEEE1588 + pkt_flags |= i40e_get_iee15888_flags(rxm, qword1); +#endif + rxm->ol_flags |= pkt_flags; + + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + /** + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the receive tail register of queue. + * Update that register with the value of the last processed RX + * descriptor minus 1. + */ + nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + + return nb_rx; +} + +uint16_t +i40e_recv_scattered_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct i40e_rx_queue *rxq = rx_queue; + volatile union i40e_rx_desc *rx_ring = rxq->rx_ring; + volatile union i40e_rx_desc *rxdp; + union i40e_rx_desc rxd; + struct i40e_rx_entry *sw_ring = rxq->sw_ring; + struct i40e_rx_entry *rxe; + struct rte_mbuf *first_seg = rxq->pkt_first_seg; + struct rte_mbuf *last_seg = rxq->pkt_last_seg; + struct rte_mbuf *nmb, *rxm; + uint16_t rx_id = rxq->rx_tail; + uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len; + uint32_t rx_status; + uint64_t qword1; + uint64_t dma_addr; + uint64_t pkt_flags; + + while (nb_rx < nb_pkts) { + rxdp = &rx_ring[rx_id]; + qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); + rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + /* Check the DD bit */ + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + nmb = rte_rxmbuf_alloc(rxq->mp); + if (unlikely(!nmb)) + break; + rxd = *rxdp; + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf */ + rte_prefetch0(sw_ring[rx_id].mbuf); + + /** + * When next RX descriptor is on a cache line boundary, + * prefetch the next 4 RX descriptors and next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_prefetch0(&rx_ring[rx_id]); + rte_prefetch0(&sw_ring[rx_id]); + } + + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + + /* Set data buffer address and data length of the mbuf */ + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + rxm->data_len = rx_packet_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + + /** + * If this is the first buffer of the received packet, set the + * pointer to the first mbuf of the packet and initialize its + * context. Otherwise, update the total length and the number + * of segments of the current scattered packet, and update the + * pointer to the last mbuf of the current packet. + */ + if (!first_seg) { + first_seg = rxm; + first_seg->nb_segs = 1; + first_seg->pkt_len = rx_packet_len; + } else { + first_seg->pkt_len = + (uint16_t)(first_seg->pkt_len + + rx_packet_len); + first_seg->nb_segs++; + last_seg->next = rxm; + } + + /** + * If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT))) { + last_seg = rxm; + continue; + } + + /** + * This is the last buffer of the received packet. If the CRC + * is not stripped by the hardware: + * - Subtract the CRC length from the total packet length. + * - If the last buffer only contains the whole CRC or a part + * of it, free the mbuf associated to the last buffer. If part + * of the CRC is also contained in the previous mbuf, subtract + * the length of that CRC part from the data length of the + * previous mbuf. + */ + rxm->next = NULL; + if (unlikely(rxq->crc_len > 0)) { + first_seg->pkt_len -= ETHER_CRC_LEN; + if (rx_packet_len <= ETHER_CRC_LEN) { + rte_pktmbuf_free_seg(rxm); + first_seg->nb_segs--; + last_seg->data_len = + (uint16_t)(last_seg->data_len - + (ETHER_CRC_LEN - rx_packet_len)); + last_seg->next = NULL; + } else + rxm->data_len = (uint16_t)(rx_packet_len - + ETHER_CRC_LEN); + } + + first_seg->port = rxq->port_id; + first_seg->ol_flags = 0; + i40e_rxd_to_vlan_tci(first_seg, &rxd); + pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); + pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); + first_seg->packet_type = + i40e_rxd_pkt_type_mapping((uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)); + if (pkt_flags & PKT_RX_RSS_HASH) + rxm->hash.rss = + rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); + if (pkt_flags & PKT_RX_FDIR) + pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm); + +#ifdef RTE_LIBRTE_IEEE1588 + pkt_flags |= i40e_get_iee15888_flags(first_seg, qword1); +#endif + first_seg->ol_flags |= pkt_flags; + + /* Prefetch data of first segment, if configured to do so. */ + rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr, + first_seg->data_off)); + rx_pkts[nb_rx++] = first_seg; + first_seg = NULL; + } + + /* Record index of the next RX descriptor to probe. */ + rxq->rx_tail = rx_id; + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + + /** + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. Update the RDT with the value of the last processed RX + * descriptor minus 1, to guarantee that the RDT register is never + * equal to the RDH register, which creates a "full" ring situtation + * from the hardware point of view. + */ + nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + rx_id = (uint16_t)(rx_id == 0 ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + + return nb_rx; +} + +/* Check if the context descriptor is needed for TX offloading */ +static inline uint16_t +i40e_calc_context_desc(uint64_t flags) +{ + static uint64_t mask = PKT_TX_OUTER_IP_CKSUM | + PKT_TX_TCP_SEG | + PKT_TX_QINQ_PKT; + +#ifdef RTE_LIBRTE_IEEE1588 + mask |= PKT_TX_IEEE1588_TMST; +#endif + + return (flags & mask) ? 1 : 0; +} + +/* set i40e TSO context descriptor */ +static inline uint64_t +i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload) +{ + uint64_t ctx_desc = 0; + uint32_t cd_cmd, hdr_len, cd_tso_len; + + if (!tx_offload.l4_len) { + PMD_DRV_LOG(DEBUG, "L4 length set to 0"); + return ctx_desc; + } + + /** + * in case of tunneling packet, the outer_l2_len and + * outer_l3_len must be 0. + */ + hdr_len = tx_offload.outer_l2_len + + tx_offload.outer_l3_len + + tx_offload.l2_len + + tx_offload.l3_len + + tx_offload.l4_len; + + cd_cmd = I40E_TX_CTX_DESC_TSO; + cd_tso_len = mbuf->pkt_len - hdr_len; + ctx_desc |= ((uint64_t)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + ((uint64_t)cd_tso_len << + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + ((uint64_t)mbuf->tso_segsz << + I40E_TXD_CTX_QW1_MSS_SHIFT); + + return ctx_desc; +} + +uint16_t +i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct i40e_tx_queue *txq; + struct i40e_tx_entry *sw_ring; + struct i40e_tx_entry *txe, *txn; + volatile struct i40e_tx_desc *txd; + volatile struct i40e_tx_desc *txr; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint32_t cd_tunneling_params; + uint16_t tx_id; + uint16_t nb_tx; + uint32_t td_cmd; + uint32_t td_offset; + uint32_t tx_flags; + uint32_t td_tag; + uint64_t ol_flags; + uint16_t nb_used; + uint16_t nb_ctx; + uint16_t tx_last; + uint16_t slen; + uint64_t buf_dma_addr; + union i40e_tx_offload tx_offload = {0}; + + txq = tx_queue; + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + + /* Check if the descriptor ring needs to be cleaned. */ + if (txq->nb_tx_free < txq->tx_free_thresh) + i40e_xmit_cleanup(txq); + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + td_cmd = 0; + td_tag = 0; + td_offset = 0; + tx_flags = 0; + + tx_pkt = *tx_pkts++; + RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + + ol_flags = tx_pkt->ol_flags; + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.outer_l2_len = tx_pkt->outer_l2_len; + tx_offload.outer_l3_len = tx_pkt->outer_l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + + /* Calculate the number of context descriptors needed. */ + nb_ctx = i40e_calc_context_desc(ol_flags); + + /** + * The number of descriptors that must be allocated for + * a packet equals to the number of the segments of that + * packet plus 1 context descriptor if needed. + */ + nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); + tx_last = (uint16_t)(tx_id + nb_used - 1); + + /* Circular ring */ + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t)(tx_last - txq->nb_tx_desc); + + if (nb_used > txq->nb_tx_free) { + if (i40e_xmit_cleanup(txq) != 0) { + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + if (unlikely(nb_used > txq->tx_rs_thresh)) { + while (nb_used > txq->nb_tx_free) { + if (i40e_xmit_cleanup(txq) != 0) { + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + } + } + } + + /* Descriptor based VLAN insertion */ + if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) { + tx_flags |= tx_pkt->vlan_tci << + I40E_TX_FLAG_L2TAG1_SHIFT; + tx_flags |= I40E_TX_FLAG_INSERT_VLAN; + td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; + td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >> + I40E_TX_FLAG_L2TAG1_SHIFT; + } + + /* Always enable CRC offload insertion */ + td_cmd |= I40E_TX_DESC_CMD_ICRC; + + /* Enable checksum offloading */ + cd_tunneling_params = 0; + if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) { + i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset, + tx_offload, &cd_tunneling_params); + } + + if (nb_ctx) { + /* Setup TX context descriptor if required */ + volatile struct i40e_tx_context_desc *ctx_txd = + (volatile struct i40e_tx_context_desc *)\ + &txr[tx_id]; + uint16_t cd_l2tag2 = 0; + uint64_t cd_type_cmd_tso_mss = + I40E_TX_DESC_DTYPE_CONTEXT; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + /* TSO enabled means no timestamp */ + if (ol_flags & PKT_TX_TCP_SEG) + cd_type_cmd_tso_mss |= + i40e_set_tso_ctx(tx_pkt, tx_offload); + else { +#ifdef RTE_LIBRTE_IEEE1588 + if (ol_flags & PKT_TX_IEEE1588_TMST) + cd_type_cmd_tso_mss |= + ((uint64_t)I40E_TX_CTX_DESC_TSYN << + I40E_TXD_CTX_QW1_CMD_SHIFT); +#endif + } + + ctx_txd->tunneling_params = + rte_cpu_to_le_32(cd_tunneling_params); + if (ol_flags & PKT_TX_QINQ_PKT) { + cd_l2tag2 = tx_pkt->vlan_tci_outer; + cd_type_cmd_tso_mss |= + ((uint64_t)I40E_TX_CTX_DESC_IL2TAG2 << + I40E_TXD_CTX_QW1_CMD_SHIFT); + } + ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2); + ctx_txd->type_cmd_tso_mss = + rte_cpu_to_le_64(cd_type_cmd_tso_mss); + + PMD_TX_LOG(DEBUG, "mbuf: %p, TCD[%u]:\n" + "tunneling_params: %#x;\n" + "l2tag2: %#hx;\n" + "rsvd: %#hx;\n" + "type_cmd_tso_mss: %#"PRIx64";\n", + tx_pkt, tx_id, + ctx_txd->tunneling_params, + ctx_txd->l2tag2, + ctx_txd->rsvd, + ctx_txd->type_cmd_tso_mss); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + m_seg = tx_pkt; + do { + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + + if (txe->mbuf) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* Setup TX Descriptor */ + slen = m_seg->data_len; + buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); + + PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n" + "buf_dma_addr: %#"PRIx64";\n" + "td_cmd: %#x;\n" + "td_offset: %#x;\n" + "td_len: %u;\n" + "td_tag: %#x;\n", + tx_pkt, tx_id, buf_dma_addr, + td_cmd, td_offset, slen, td_tag); + + txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd, + td_offset, slen, td_tag); + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->next; + } while (m_seg != NULL); + + /* The last packet data descriptor needs End Of Packet (EOP) */ + td_cmd |= I40E_TX_DESC_CMD_EOP; + txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used); + + if (txq->nb_tx_used >= txq->tx_rs_thresh) { + PMD_TX_FREE_LOG(DEBUG, + "Setting RS bit on TXD id=" + "%4u (port=%d queue=%d)", + tx_last, txq->port_id, txq->queue_id); + + td_cmd |= I40E_TX_DESC_CMD_RS; + + /* Update txq RS bit counters */ + txq->nb_tx_used = 0; + } + + txd->cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)td_cmd) << + I40E_TXD_QW1_CMD_SHIFT); + } + +end_of_tx: + rte_wmb(); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + (unsigned) txq->port_id, (unsigned) txq->queue_id, + (unsigned) tx_id, (unsigned) nb_tx); + + I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id); + txq->tx_tail = tx_id; + + return nb_tx; +} + +static inline int __attribute__((always_inline)) +i40e_tx_free_bufs(struct i40e_tx_queue *txq) +{ + struct i40e_tx_entry *txep; + uint16_t i; + + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) != + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]); + + for (i = 0; i < txq->tx_rs_thresh; i++) + rte_prefetch0((txep + i)->mbuf); + + if (txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) { + for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { + rte_mempool_put(txep->mbuf->pool, txep->mbuf); + txep->mbuf = NULL; + } + } else { + for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { + rte_pktmbuf_free_seg(txep->mbuf); + txep->mbuf = NULL; + } + } + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + +/* Populate 4 descriptors with data from 4 mbufs */ +static inline void +tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts) +{ + uint64_t dma_addr; + uint32_t i; + + for (i = 0; i < 4; i++, txdp++, pkts++) { + dma_addr = rte_mbuf_data_dma_addr(*pkts); + txdp->buffer_addr = rte_cpu_to_le_64(dma_addr); + txdp->cmd_type_offset_bsz = + i40e_build_ctob((uint32_t)I40E_TD_CMD, 0, + (*pkts)->data_len, 0); + } +} + +/* Populate 1 descriptor with data from 1 mbuf */ +static inline void +tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts) +{ + uint64_t dma_addr; + + dma_addr = rte_mbuf_data_dma_addr(*pkts); + txdp->buffer_addr = rte_cpu_to_le_64(dma_addr); + txdp->cmd_type_offset_bsz = + i40e_build_ctob((uint32_t)I40E_TD_CMD, 0, + (*pkts)->data_len, 0); +} + +/* Fill hardware descriptor ring with mbuf data */ +static inline void +i40e_tx_fill_hw_ring(struct i40e_tx_queue *txq, + struct rte_mbuf **pkts, + uint16_t nb_pkts) +{ + volatile struct i40e_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]); + struct i40e_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]); + const int N_PER_LOOP = 4; + const int N_PER_LOOP_MASK = N_PER_LOOP - 1; + int mainpart, leftover; + int i, j; + + mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK)); + leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK)); + for (i = 0; i < mainpart; i += N_PER_LOOP) { + for (j = 0; j < N_PER_LOOP; ++j) { + (txep + i + j)->mbuf = *(pkts + i + j); + } + tx4(txdp + i, pkts + i); + } + if (unlikely(leftover > 0)) { + for (i = 0; i < leftover; ++i) { + (txep + mainpart + i)->mbuf = *(pkts + mainpart + i); + tx1(txdp + mainpart + i, pkts + mainpart + i); + } + } +} + +static inline uint16_t +tx_xmit_pkts(struct i40e_tx_queue *txq, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + volatile struct i40e_tx_desc *txr = txq->tx_ring; + uint16_t n = 0; + + /** + * Begin scanning the H/W ring for done descriptors when the number + * of available descriptors drops below tx_free_thresh. For each done + * descriptor, free the associated buffer. + */ + if (txq->nb_tx_free < txq->tx_free_thresh) + i40e_tx_free_bufs(txq); + + /* Use available descriptor only */ + nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(!nb_pkts)) + return 0; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) { + n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail); + i40e_tx_fill_hw_ring(txq, tx_pkts, n); + txr[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << + I40E_TXD_QW1_CMD_SHIFT); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + txq->tx_tail = 0; + } + + /* Fill hardware descriptor ring with mbuf data */ + i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n)); + txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n)); + + /* Determin if RS bit needs to be set */ + if (txq->tx_tail > txq->tx_next_rs) { + txr[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << + I40E_TXD_QW1_CMD_SHIFT); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + if (txq->tx_next_rs >= txq->nb_tx_desc) + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + } + + if (txq->tx_tail >= txq->nb_tx_desc) + txq->tx_tail = 0; + + /* Update the tx tail register */ + rte_wmb(); + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +static uint16_t +i40e_xmit_pkts_simple(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + + if (likely(nb_pkts <= I40E_TX_MAX_BURST)) + return tx_xmit_pkts((struct i40e_tx_queue *)tx_queue, + tx_pkts, nb_pkts); + + while (nb_pkts) { + uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts, + I40E_TX_MAX_BURST); + + ret = tx_xmit_pkts((struct i40e_tx_queue *)tx_queue, + &tx_pkts[nb_tx], num); + nb_tx = (uint16_t)(nb_tx + ret); + nb_pkts = (uint16_t)(nb_pkts - ret); + if (ret < num) + break; + } + + return nb_tx; +} + +/* + * Find the VSI the queue belongs to. 'queue_idx' is the queue index + * application used, which assume having sequential ones. But from driver's + * perspective, it's different. For example, q0 belongs to FDIR VSI, q1-q64 + * to MAIN VSI, , q65-96 to SRIOV VSIs, q97-128 to VMDQ VSIs. For application + * running on host, q1-64 and q97-128 can be used, total 96 queues. They can + * use queue_idx from 0 to 95 to access queues, while real queue would be + * different. This function will do a queue mapping to find VSI the queue + * belongs to. + */ +static struct i40e_vsi* +i40e_pf_get_vsi_by_qindex(struct i40e_pf *pf, uint16_t queue_idx) +{ + /* the queue in MAIN VSI range */ + if (queue_idx < pf->main_vsi->nb_qps) + return pf->main_vsi; + + queue_idx -= pf->main_vsi->nb_qps; + + /* queue_idx is greater than VMDQ VSIs range */ + if (queue_idx > pf->nb_cfg_vmdq_vsi * pf->vmdq_nb_qps - 1) { + PMD_INIT_LOG(ERR, "queue_idx out of range. VMDQ configured?"); + return NULL; + } + + return pf->vmdq[queue_idx / pf->vmdq_nb_qps].vsi; +} + +static uint16_t +i40e_get_queue_offset_by_qindex(struct i40e_pf *pf, uint16_t queue_idx) +{ + /* the queue in MAIN VSI range */ + if (queue_idx < pf->main_vsi->nb_qps) + return queue_idx; + + /* It's VMDQ queues */ + queue_idx -= pf->main_vsi->nb_qps; + + if (pf->nb_cfg_vmdq_vsi) + return queue_idx % pf->vmdq_nb_qps; + else { + PMD_INIT_LOG(ERR, "Fail to get queue offset"); + return (uint16_t)(-1); + } +} + +int +i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct i40e_rx_queue *rxq; + int err = -1; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + + err = i40e_alloc_rx_queue_mbufs(rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); + return err; + } + + rte_wmb(); + + /* Init the RX tail regieter. */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + + err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE); + + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", + rx_queue_id); + + i40e_rx_queue_release_mbufs(rxq); + i40e_reset_rx_queue(rxq); + } else + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + } + + return err; +} + +int +i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct i40e_rx_queue *rxq; + int err; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + + /* + * rx_queue_id is queue id aplication refers to, while + * rxq->reg_idx is the real queue index. + */ + err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE); + + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); + return err; + } + i40e_rx_queue_release_mbufs(rxq); + i40e_reset_rx_queue(rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + return 0; +} + +int +i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + int err = -1; + struct i40e_tx_queue *txq; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + if (tx_queue_id < dev->data->nb_tx_queues) { + txq = dev->data->tx_queues[tx_queue_id]; + + /* + * tx_queue_id is queue id aplication refers to, while + * rxq->reg_idx is the real queue index. + */ + err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE); + if (err) + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", + tx_queue_id); + else + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + } + + return err; +} + +int +i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct i40e_tx_queue *txq; + int err; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (tx_queue_id < dev->data->nb_tx_queues) { + txq = dev->data->tx_queues[tx_queue_id]; + + /* + * tx_queue_id is queue id aplication refers to, while + * txq->reg_idx is the real queue index. + */ + err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE); + + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of", + tx_queue_id); + return err; + } + + i40e_tx_queue_release_mbufs(txq); + i40e_reset_tx_queue(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + return 0; +} + +const uint32_t * +i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* refers to i40e_rxd_pkt_type_mapping() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_TIMESYNC, + RTE_PTYPE_L2_ETHER_LLDP, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_NONFRAG, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_GRENAT, + RTE_PTYPE_TUNNEL_IP, + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L2_ETHER_VLAN, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L4_FRAG, + RTE_PTYPE_INNER_L4_ICMP, + RTE_PTYPE_INNER_L4_NONFRAG, + RTE_PTYPE_INNER_L4_SCTP, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == i40e_recv_pkts || +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc || +#endif + dev->rx_pkt_burst == i40e_recv_scattered_pkts) + return ptypes; + return NULL; +} + +int +i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct i40e_vsi *vsi; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct i40e_rx_queue *rxq; + const struct rte_memzone *rz; + uint32_t ring_size; + uint16_t len, i; + uint16_t base, bsf, tc_mapping; + int use_def_burst_func = 1; + + if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { + struct i40e_vf *vf = + I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + vsi = &vf->vsi; + } else + vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx); + + if (vsi == NULL) { + PMD_DRV_LOG(ERR, "VSI not available or queue " + "index exceeds the maximum"); + return I40E_ERR_PARAM; + } + if (nb_desc % I40E_ALIGN_RING_DESC != 0 || + (nb_desc > I40E_MAX_RING_DESC) || + (nb_desc < I40E_MIN_RING_DESC)) { + PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is " + "invalid", nb_desc); + return I40E_ERR_PARAM; + } + + /* Free memory if needed */ + if (dev->data->rx_queues[queue_idx]) { + i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* Allocate the rx queue data structure */ + rxq = rte_zmalloc_socket("i40e rx queue", + sizeof(struct i40e_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (!rxq) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "rx queue data structure"); + return -ENOMEM; + } + rxq->mp = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->queue_id = queue_idx; + if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) + rxq->reg_idx = queue_idx; + else /* PF device */ + rxq->reg_idx = vsi->base_queue + + i40e_get_queue_offset_by_qindex(pf, queue_idx); + + rxq->port_id = dev->data->port_id; + rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? + 0 : ETHER_CRC_LEN); + rxq->drop_en = rx_conf->rx_drop_en; + rxq->vsi = vsi; + rxq->rx_deferred_start = rx_conf->rx_deferred_start; + + /* Allocate the maximun number of RX ring hardware descriptor. */ + ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC; + ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN); + rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, + ring_size, I40E_RING_BASE_ALIGN, socket_id); + if (!rz) { + i40e_dev_rx_queue_release(rxq); + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX"); + return -ENOMEM; + } + + /* Zero all the descriptors in the ring. */ + memset(rz->addr, 0, ring_size); + + rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); + rxq->rx_ring = (union i40e_rx_desc *)rz->addr; + +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST); +#else + len = nb_desc; +#endif + + /* Allocate the software ring. */ + rxq->sw_ring = + rte_zmalloc_socket("i40e rx sw ring", + sizeof(struct i40e_rx_entry) * len, + RTE_CACHE_LINE_SIZE, + socket_id); + if (!rxq->sw_ring) { + i40e_dev_rx_queue_release(rxq); + PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring"); + return -ENOMEM; + } + + i40e_reset_rx_queue(rxq); + rxq->q_set = TRUE; + dev->data->rx_queues[queue_idx] = rxq; + + use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq); + + if (!use_def_burst_func) { +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function will be " + "used on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ + } else { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "not satisfied, Scattered Rx is requested, " + "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " + "not enabled on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); + ad->rx_bulk_alloc_allowed = false; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; + tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); + base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; + bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; + + if (queue_idx >= base && queue_idx < (base + BIT(bsf))) + rxq->dcb_tc = i; + } + + return 0; +} + +void +i40e_dev_rx_queue_release(void *rxq) +{ + struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq; + + if (!q) { + PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL"); + return; + } + + i40e_rx_queue_release_mbufs(q); + rte_free(q->sw_ring); + rte_free(q); +} + +uint32_t +i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define I40E_RXQ_SCAN_INTERVAL 4 + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_queue *rxq; + uint16_t desc = 0; + + if (unlikely(rx_queue_id >= dev->data->nb_rx_queues)) { + PMD_DRV_LOG(ERR, "Invalid RX queue id %u", rx_queue_id); + return 0; + } + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &(rxq->rx_ring[rxq->rx_tail]); + while ((desc < rxq->nb_rx_desc) && + ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & + I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) & + (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { + /** + * Check the DD bit of a rx descriptor of each 4 in a group, + * to avoid checking too frequently and downgrading performance + * too much. + */ + desc += I40E_RXQ_SCAN_INTERVAL; + rxdp += I40E_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return desc; +} + +int +i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_queue *rxq = rx_queue; + uint16_t desc; + int ret; + + if (unlikely(offset >= rxq->nb_rx_desc)) { + PMD_DRV_LOG(ERR, "Invalid RX queue id %u", offset); + return 0; + } + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &(rxq->rx_ring[desc]); + + ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & + I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) & + (1 << I40E_RX_DESC_STATUS_DD_SHIFT)); + + return ret; +} + +int +i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct i40e_vsi *vsi; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_tx_queue *txq; + const struct rte_memzone *tz; + uint32_t ring_size; + uint16_t tx_rs_thresh, tx_free_thresh; + uint16_t i, base, bsf, tc_mapping; + + if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { + struct i40e_vf *vf = + I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + vsi = &vf->vsi; + } else + vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx); + + if (vsi == NULL) { + PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) " + "exceeds the maximum", queue_idx); + return I40E_ERR_PARAM; + } + + if (nb_desc % I40E_ALIGN_RING_DESC != 0 || + (nb_desc > I40E_MAX_RING_DESC) || + (nb_desc < I40E_MIN_RING_DESC)) { + PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is " + "invalid", nb_desc); + return I40E_ERR_PARAM; + } + + /** + * The following two parameters control the setting of the RS bit on + * transmit descriptors. TX descriptors will have their RS bit set + * after txq->tx_rs_thresh descriptors have been used. The TX + * descriptor ring will be cleaned after txq->tx_free_thresh + * descriptors are used or if the number of descriptors required to + * transmit a packet is greater than the number of free TX descriptors. + * + * The following constraints must be satisfied: + * - tx_rs_thresh must be greater than 0. + * - tx_rs_thresh must be less than the size of the ring minus 2. + * - tx_rs_thresh must be less than or equal to tx_free_thresh. + * - tx_rs_thresh must be a divisor of the ring size. + * - tx_free_thresh must be greater than 0. + * - tx_free_thresh must be less than the size of the ring minus 3. + * + * One descriptor in the TX ring is used as a sentinel to avoid a H/W + * race condition, hence the maximum threshold constraints. When set + * to zero use default values. + */ + tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ? + tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH); + tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? + tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); + if (tx_rs_thresh >= (nb_desc - 2)) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " + "number of TX descriptors minus 2. " + "(tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return I40E_ERR_PARAM; + } + if (tx_free_thresh >= (nb_desc - 3)) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " + "tx_free_thresh must be less than the " + "number of TX descriptors minus 3. " + "(tx_free_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return I40E_ERR_PARAM; + } + if (tx_rs_thresh > tx_free_thresh) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or " + "equal to tx_free_thresh. (tx_free_thresh=%u" + " tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return I40E_ERR_PARAM; + } + if ((nb_desc % tx_rs_thresh) != 0) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the " + "number of TX descriptors. (tx_rs_thresh=%u" + " port=%d queue=%d)", + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return I40E_ERR_PARAM; + } + if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) { + PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if " + "tx_rs_thresh is greater than 1. " + "(tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return I40E_ERR_PARAM; + } + + /* Free memory if needed. */ + if (dev->data->tx_queues[queue_idx]) { + i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* Allocate the TX queue data structure. */ + txq = rte_zmalloc_socket("i40e tx queue", + sizeof(struct i40e_tx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "tx queue structure"); + return -ENOMEM; + } + + /* Allocate TX hardware ring descriptors. */ + ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC; + ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN); + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, + ring_size, I40E_RING_BASE_ALIGN, socket_id); + if (!tz) { + i40e_dev_tx_queue_release(txq); + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX"); + return -ENOMEM; + } + + txq->nb_tx_desc = nb_desc; + txq->tx_rs_thresh = tx_rs_thresh; + txq->tx_free_thresh = tx_free_thresh; + txq->pthresh = tx_conf->tx_thresh.pthresh; + txq->hthresh = tx_conf->tx_thresh.hthresh; + txq->wthresh = tx_conf->tx_thresh.wthresh; + txq->queue_id = queue_idx; + if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) + txq->reg_idx = queue_idx; + else /* PF device */ + txq->reg_idx = vsi->base_queue + + i40e_get_queue_offset_by_qindex(pf, queue_idx); + + txq->port_id = dev->data->port_id; + txq->txq_flags = tx_conf->txq_flags; + txq->vsi = vsi; + txq->tx_deferred_start = tx_conf->tx_deferred_start; + + txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); + txq->tx_ring = (struct i40e_tx_desc *)tz->addr; + + /* Allocate software ring */ + txq->sw_ring = + rte_zmalloc_socket("i40e tx sw ring", + sizeof(struct i40e_tx_entry) * nb_desc, + RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq->sw_ring) { + i40e_dev_tx_queue_release(txq); + PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring"); + return -ENOMEM; + } + + i40e_reset_tx_queue(txq); + txq->q_set = TRUE; + dev->data->tx_queues[queue_idx] = txq; + + /* Use a simple TX queue without offloads or multi segs if possible */ + i40e_set_tx_function_flag(dev, txq); + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; + tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); + base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; + bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; + + if (queue_idx >= base && queue_idx < (base + BIT(bsf))) + txq->dcb_tc = i; + } + + return 0; +} + +void +i40e_dev_tx_queue_release(void *txq) +{ + struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq; + + if (!q) { + PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL"); + return; + } + + i40e_tx_queue_release_mbufs(q); + rte_free(q->sw_ring); + rte_free(q); +} + +const struct rte_memzone * +i40e_memzone_reserve(const char *name, uint32_t len, int socket_id) +{ + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(name); + if (mz) + return mz; + + if (rte_xen_dom0_supported()) + mz = rte_memzone_reserve_bounded(name, len, + socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M); + else + mz = rte_memzone_reserve_aligned(name, len, + socket_id, 0, I40E_RING_BASE_ALIGN); + return mz; +} + +void +i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq) +{ + uint16_t i; + + /* SSE Vector driver has a different way of releasing mbufs. */ + if (rxq->rx_using_sse) { + i40e_rx_queue_release_mbufs_vec(rxq); + return; + } + + if (!rxq || !rxq->sw_ring) { + PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); + return; + } + + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + if (rxq->rx_nb_avail == 0) + return; + for (i = 0; i < rxq->rx_nb_avail; i++) { + struct rte_mbuf *mbuf; + + mbuf = rxq->rx_stage[rxq->rx_next_avail + i]; + rte_pktmbuf_free_seg(mbuf); + } + rxq->rx_nb_avail = 0; +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ +} + +void +i40e_reset_rx_queue(struct i40e_rx_queue *rxq) +{ + unsigned i; + uint16_t len; + + if (!rxq) { + PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL"); + return; + } + +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0) + len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_I40E_RX_MAX_BURST); + else +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ + len = rxq->nb_rx_desc; + + for (i = 0; i < len * sizeof(union i40e_rx_desc); i++) + ((volatile char *)rxq->rx_ring)[i] = 0; + +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); + for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i) + rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf; + + rxq->rx_nb_avail = 0; + rxq->rx_next_avail = 0; + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + + rxq->rxrearm_start = 0; + rxq->rxrearm_nb = 0; +} + +void +i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) +{ + uint16_t i; + + if (!txq || !txq->sw_ring) { + PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); + return; + } + + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } +} + +void +i40e_reset_tx_queue(struct i40e_tx_queue *txq) +{ + struct i40e_tx_entry *txe; + uint16_t i, prev, size; + + if (!txq) { + PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL"); + return; + } + + txe = txq->sw_ring; + size = sizeof(struct i40e_tx_desc) * txq->nb_tx_desc; + for (i = 0; i < size; i++) + ((volatile char *)txq->tx_ring)[i] = 0; + + prev = (uint16_t)(txq->nb_tx_desc - 1); + for (i = 0; i < txq->nb_tx_desc; i++) { + volatile struct i40e_tx_desc *txd = &txq->tx_ring[i]; + + txd->cmd_type_offset_bsz = + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE); + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + txq->tx_tail = 0; + txq->nb_tx_used = 0; + + txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); +} + +/* Init the TX queue in hardware */ +int +i40e_tx_queue_init(struct i40e_tx_queue *txq) +{ + enum i40e_status_code err = I40E_SUCCESS; + struct i40e_vsi *vsi = txq->vsi; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t pf_q = txq->reg_idx; + struct i40e_hmc_obj_txq tx_ctx; + uint32_t qtx_ctl; + + /* clear the context structure first */ + memset(&tx_ctx, 0, sizeof(tx_ctx)); + tx_ctx.new_context = 1; + tx_ctx.base = txq->tx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT; + tx_ctx.qlen = txq->nb_tx_desc; + +#ifdef RTE_LIBRTE_IEEE1588 + tx_ctx.timesync_ena = 1; +#endif + tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[txq->dcb_tc]); + if (vsi->type == I40E_VSI_FDIR) + tx_ctx.fd_ena = TRUE; + + err = i40e_clear_lan_tx_queue_context(hw, pf_q); + if (err != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failure of clean lan tx queue context"); + return err; + } + + err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); + if (err != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failure of set lan tx queue context"); + return err; + } + + /* Now associate this queue with this PCI function */ + qtx_ctl = I40E_QTX_CTL_PF_QUEUE; + qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & + I40E_QTX_CTL_PF_INDX_MASK); + I40E_WRITE_REG(hw, I40E_QTX_CTL(pf_q), qtx_ctl); + I40E_WRITE_FLUSH(hw); + + txq->qtx_tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); + + return err; +} + +int +i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq) +{ + struct i40e_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + uint16_t i; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile union i40e_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mp); + + if (unlikely(!mbuf)) { + PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX"); + return -ENOMEM; + } + + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); + + rxd = &rxq->rx_ring[i]; + rxd->read.pkt_addr = dma_addr; + rxd->read.hdr_addr = 0; +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + rxd->read.rsvd1 = 0; + rxd->read.rsvd2 = 0; +#endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */ + + rxe[i].mbuf = mbuf; + } + + return 0; +} + +/* + * Calculate the buffer length, and check the jumbo frame + * and maximum packet length. + */ +static int +i40e_rx_queue_config(struct i40e_rx_queue *rxq) +{ + struct i40e_pf *pf = I40E_VSI_TO_PF(rxq->vsi); + struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi); + struct rte_eth_dev_data *data = pf->dev_data; + uint16_t buf_size, len; + + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + + switch (pf->flags & (I40E_FLAG_HEADER_SPLIT_DISABLED | + I40E_FLAG_HEADER_SPLIT_ENABLED)) { + case I40E_FLAG_HEADER_SPLIT_ENABLED: /* Not supported */ + rxq->rx_hdr_len = RTE_ALIGN(I40E_RXBUF_SZ_1024, + (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); + rxq->rx_buf_len = RTE_ALIGN(I40E_RXBUF_SZ_2048, + (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); + rxq->hs_mode = i40e_header_split_enabled; + break; + case I40E_FLAG_HEADER_SPLIT_DISABLED: + default: + rxq->rx_hdr_len = 0; + rxq->rx_buf_len = RTE_ALIGN(buf_size, + (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); + rxq->hs_mode = i40e_header_split_none; + break; + } + + len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len; + rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len); + if (data->dev_conf.rxmode.jumbo_frame == 1) { + if (rxq->max_pkt_len <= ETHER_MAX_LEN || + rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must " + "be larger than %u and smaller than %u," + "as jumbo frame is enabled", + (uint32_t)ETHER_MAX_LEN, + (uint32_t)I40E_FRAME_SIZE_MAX); + return I40E_ERR_CONFIG; + } + } else { + if (rxq->max_pkt_len < ETHER_MIN_LEN || + rxq->max_pkt_len > ETHER_MAX_LEN) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is disabled", + (uint32_t)ETHER_MIN_LEN, + (uint32_t)ETHER_MAX_LEN); + return I40E_ERR_CONFIG; + } + } + + return 0; +} + +/* Init the RX queue in hardware */ +int +i40e_rx_queue_init(struct i40e_rx_queue *rxq) +{ + int err = I40E_SUCCESS; + struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi); + struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi); + uint16_t pf_q = rxq->reg_idx; + uint16_t buf_size; + struct i40e_hmc_obj_rxq rx_ctx; + + err = i40e_rx_queue_config(rxq); + if (err < 0) { + PMD_DRV_LOG(ERR, "Failed to config RX queue"); + return err; + } + + /* Clear the context structure first */ + memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); + rx_ctx.dbuff = rxq->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; + rx_ctx.hbuff = rxq->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; + + rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT; + rx_ctx.qlen = rxq->nb_rx_desc; +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + rx_ctx.dsize = 1; +#endif + rx_ctx.dtype = rxq->hs_mode; + if (rxq->hs_mode) + rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL; + else + rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE; + rx_ctx.rxmax = rxq->max_pkt_len; + rx_ctx.tphrdesc_ena = 1; + rx_ctx.tphwdesc_ena = 1; + rx_ctx.tphdata_ena = 1; + rx_ctx.tphhead_ena = 1; + rx_ctx.lrxqthresh = 2; + rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0; + rx_ctx.l2tsel = 1; + rx_ctx.showiv = 1; + rx_ctx.prefena = 1; + + err = i40e_clear_lan_rx_queue_context(hw, pf_q); + if (err != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to clear LAN RX queue context"); + return err; + } + err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); + if (err != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to set LAN RX queue context"); + return err; + } + + rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); + + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + + /* Check if scattered RX needs to be used. */ + if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) { + dev_data->scattered_rx = 1; + } + + /* Init the RX tail regieter. */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + + return 0; +} + +void +i40e_dev_clear_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + i40e_tx_queue_release_mbufs(dev->data->tx_queues[i]); + i40e_reset_tx_queue(dev->data->tx_queues[i]); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + i40e_rx_queue_release_mbufs(dev->data->rx_queues[i]); + i40e_reset_rx_queue(dev->data->rx_queues[i]); + } +} + +void +i40e_dev_free_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + i40e_dev_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + i40e_dev_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + +#define I40E_FDIR_NUM_TX_DESC I40E_MIN_RING_DESC +#define I40E_FDIR_NUM_RX_DESC I40E_MIN_RING_DESC + +enum i40e_status_code +i40e_fdir_setup_tx_resources(struct i40e_pf *pf) +{ + struct i40e_tx_queue *txq; + const struct rte_memzone *tz = NULL; + uint32_t ring_size; + struct rte_eth_dev *dev = pf->adapter->eth_dev; + + if (!pf) { + PMD_DRV_LOG(ERR, "PF is not available"); + return I40E_ERR_BAD_PTR; + } + + /* Allocate the TX queue data structure. */ + txq = rte_zmalloc_socket("i40e fdir tx queue", + sizeof(struct i40e_tx_queue), + RTE_CACHE_LINE_SIZE, + SOCKET_ID_ANY); + if (!txq) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "tx queue structure."); + return I40E_ERR_NO_MEMORY; + } + + /* Allocate TX hardware ring descriptors. */ + ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC; + ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN); + + tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring", + I40E_FDIR_QUEUE_ID, ring_size, + I40E_RING_BASE_ALIGN, SOCKET_ID_ANY); + if (!tz) { + i40e_dev_tx_queue_release(txq); + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX."); + return I40E_ERR_NO_MEMORY; + } + + txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC; + txq->queue_id = I40E_FDIR_QUEUE_ID; + txq->reg_idx = pf->fdir.fdir_vsi->base_queue; + txq->vsi = pf->fdir.fdir_vsi; + + txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); + txq->tx_ring = (struct i40e_tx_desc *)tz->addr; + /* + * don't need to allocate software ring and reset for the fdir + * program queue just set the queue has been configured. + */ + txq->q_set = TRUE; + pf->fdir.txq = txq; + + return I40E_SUCCESS; +} + +enum i40e_status_code +i40e_fdir_setup_rx_resources(struct i40e_pf *pf) +{ + struct i40e_rx_queue *rxq; + const struct rte_memzone *rz = NULL; + uint32_t ring_size; + struct rte_eth_dev *dev = pf->adapter->eth_dev; + + if (!pf) { + PMD_DRV_LOG(ERR, "PF is not available"); + return I40E_ERR_BAD_PTR; + } + + /* Allocate the RX queue data structure. */ + rxq = rte_zmalloc_socket("i40e fdir rx queue", + sizeof(struct i40e_rx_queue), + RTE_CACHE_LINE_SIZE, + SOCKET_ID_ANY); + if (!rxq) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "rx queue structure."); + return I40E_ERR_NO_MEMORY; + } + + /* Allocate RX hardware ring descriptors. */ + ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC; + ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN); + + rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring", + I40E_FDIR_QUEUE_ID, ring_size, + I40E_RING_BASE_ALIGN, SOCKET_ID_ANY); + if (!rz) { + i40e_dev_rx_queue_release(rxq); + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX."); + return I40E_ERR_NO_MEMORY; + } + + rxq->nb_rx_desc = I40E_FDIR_NUM_RX_DESC; + rxq->queue_id = I40E_FDIR_QUEUE_ID; + rxq->reg_idx = pf->fdir.fdir_vsi->base_queue; + rxq->vsi = pf->fdir.fdir_vsi; + + rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); + rxq->rx_ring = (union i40e_rx_desc *)rz->addr; + + /* + * Don't need to allocate software ring and reset for the fdir + * rx queue, just set the queue has been configured. + */ + rxq->q_set = TRUE; + pf->fdir.rxq = rxq; + + return I40E_SUCCESS; +} + +void +i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct i40e_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mp; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; +} + +void +i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct i40e_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.txq_flags = txq->txq_flags; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + +void __attribute__((cold)) +i40e_set_rx_function(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + uint16_t rx_using_sse, i; + /* In order to allow Vector Rx there are a few configuration + * conditions to be met and Rx Bulk Allocation should be allowed. + */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (i40e_rx_vec_dev_conf_condition_check(dev) || + !ad->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet" + " Vector Rx preconditions", + dev->data->port_id); + + ad->rx_vec_allowed = false; + } + if (ad->rx_vec_allowed) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct i40e_rx_queue *rxq = + dev->data->rx_queues[i]; + + if (i40e_rxq_vec_setup(rxq)) { + ad->rx_vec_allowed = false; + break; + } + } + } + } + + if (dev->data->scattered_rx) { + /* Set the non-LRO scattered callback: there are Vector and + * single allocation versions. + */ + if (ad->rx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx " + "callback (port=%d).", + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec; + } else { + PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk " + "allocation callback (port=%d).", + dev->data->port_id); + dev->rx_pkt_burst = i40e_recv_scattered_pkts; + } + /* If parameters allow we are going to choose between the following + * callbacks: + * - Vector + * - Bulk Allocation + * - Single buffer allocation (the simplest one) + */ + } else if (ad->rx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX " + "burst size no less than %d (port=%d).", + RTE_I40E_DESCS_PER_LOOP, + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_pkts_vec; + } else if (ad->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function " + "will be used on port=%d.", + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc; + } else { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " + "satisfied, or Scattered Rx is requested " + "(port=%d).", + dev->data->port_id); + + dev->rx_pkt_burst = i40e_recv_pkts; + } + + /* Propagate information about RX function choice through all queues. */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + rx_using_sse = + (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec || + dev->rx_pkt_burst == i40e_recv_pkts_vec); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct i40e_rx_queue *rxq = dev->data->rx_queues[i]; + + rxq->rx_using_sse = rx_using_sse; + } + } +} + +void __attribute__((cold)) +i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + /* Use a simple Tx queue (no offloads, no multi segs) if possible */ + if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) + && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) { + if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) { + PMD_INIT_LOG(DEBUG, "Vector tx" + " can be enabled on this txq."); + + } else { + ad->tx_vec_allowed = false; + } + } else { + ad->tx_simple_allowed = false; + } +} + +void __attribute__((cold)) +i40e_set_tx_function(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int i; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (ad->tx_vec_allowed) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct i40e_tx_queue *txq = + dev->data->tx_queues[i]; + + if (i40e_txq_vec_setup(txq)) { + ad->tx_vec_allowed = false; + break; + } + } + } + } + + if (ad->tx_simple_allowed) { + if (ad->tx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Vector tx finally be used."); + dev->tx_pkt_burst = i40e_xmit_pkts_vec; + } else { + PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); + dev->tx_pkt_burst = i40e_xmit_pkts_simple; + } + } else { + PMD_INIT_LOG(DEBUG, "Xmit tx finally be used."); + dev->tx_pkt_burst = i40e_xmit_pkts; + } +} + +/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */ +int __attribute__((weak)) +i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) +{ + return -1; +} + +uint16_t __attribute__((weak)) +i40e_recv_pkts_vec( + void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t __attribute__((weak)) +i40e_recv_scattered_pkts_vec( + void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +int __attribute__((weak)) +i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq) +{ + return -1; +} + +int __attribute__((weak)) +i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq) +{ + return -1; +} + +void __attribute__((weak)) +i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq) +{ + return; +} + +uint16_t __attribute__((weak)) +i40e_xmit_pkts_vec(void __rte_unused *tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h new file mode 100644 index 00000000..98179f00 --- /dev/null +++ b/drivers/net/i40e/i40e_rxtx.h @@ -0,0 +1,258 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _I40E_RXTX_H_ +#define _I40E_RXTX_H_ + +/** + * 32 bits tx flags, high 16 bits for L2TAG1 (VLAN), + * low 16 bits for others. + */ +#define I40E_TX_FLAG_L2TAG1_SHIFT 16 +#define I40E_TX_FLAG_L2TAG1_MASK 0xffff0000 +#define I40E_TX_FLAG_CSUM ((uint32_t)(1 << 0)) +#define I40E_TX_FLAG_INSERT_VLAN ((uint32_t)(1 << 1)) +#define I40E_TX_FLAG_TSYN ((uint32_t)(1 << 2)) + +#define RTE_PMD_I40E_RX_MAX_BURST 32 +#define RTE_PMD_I40E_TX_MAX_BURST 32 + +#define RTE_I40E_VPMD_RX_BURST 32 +#define RTE_I40E_VPMD_TX_BURST 32 +#define RTE_I40E_RXQ_REARM_THRESH 32 +#define RTE_I40E_MAX_RX_BURST RTE_I40E_RXQ_REARM_THRESH +#define RTE_I40E_TX_MAX_FREE_BUF_SZ 64 +#define RTE_I40E_DESCS_PER_LOOP 4 + +#define I40E_RXBUF_SZ_1024 1024 +#define I40E_RXBUF_SZ_2048 2048 + +/* In none-PXE mode QLEN must be whole number of 32 descriptors. */ +#define I40E_ALIGN_RING_DESC 32 + +#define I40E_MIN_RING_DESC 64 +#define I40E_MAX_RING_DESC 4096 + +#undef container_of +#define container_of(ptr, type, member) ({ \ + typeof(((type *)0)->member)(*__mptr) = (ptr); \ + (type *)((char *)__mptr - offsetof(type, member)); }) + +#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\ + I40E_TX_DESC_CMD_EOP) + +enum i40e_header_split_mode { + i40e_header_split_none = 0, + i40e_header_split_enabled = 1, + i40e_header_split_always = 2, + i40e_header_split_reserved +}; + +#define I40E_HEADER_SPLIT_NONE ((uint8_t)0) +#define I40E_HEADER_SPLIT_L2 ((uint8_t)(1 << 0)) +#define I40E_HEADER_SPLIT_IP ((uint8_t)(1 << 1)) +#define I40E_HEADER_SPLIT_UDP_TCP ((uint8_t)(1 << 2)) +#define I40E_HEADER_SPLIT_SCTP ((uint8_t)(1 << 3)) +#define I40E_HEADER_SPLIT_ALL (I40E_HEADER_SPLIT_L2 | \ + I40E_HEADER_SPLIT_IP | \ + I40E_HEADER_SPLIT_UDP_TCP | \ + I40E_HEADER_SPLIT_SCTP) + +/* HW desc structure, both 16-byte and 32-byte types are supported */ +#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC +#define i40e_rx_desc i40e_16byte_rx_desc +#else +#define i40e_rx_desc i40e_32byte_rx_desc +#endif + +struct i40e_rx_entry { + struct rte_mbuf *mbuf; +}; + +/* + * Structure associated with each RX queue. + */ +struct i40e_rx_queue { + struct rte_mempool *mp; /**< mbuf pool to populate RX ring */ + volatile union i40e_rx_desc *rx_ring;/**< RX ring virtual address */ + uint64_t rx_ring_phys_addr; /**< RX ring DMA address */ + struct i40e_rx_entry *sw_ring; /**< address of RX soft ring */ + uint16_t nb_rx_desc; /**< number of RX descriptors */ + uint16_t rx_free_thresh; /**< max free RX desc to hold */ + uint16_t rx_tail; /**< current value of tail */ + uint16_t nb_rx_hold; /**< number of held free RX desc */ + struct rte_mbuf *pkt_first_seg; /**< first segment of current packet */ + struct rte_mbuf *pkt_last_seg; /**< last segment of current packet */ +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + uint16_t rx_nb_avail; /**< number of staged packets ready */ + uint16_t rx_next_avail; /**< index of next staged packets */ + uint16_t rx_free_trigger; /**< triggers rx buffer allocation */ + struct rte_mbuf fake_mbuf; /**< dummy mbuf */ + struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2]; +#endif + + uint16_t rxrearm_nb; /**< number of remaining to be re-armed */ + uint16_t rxrearm_start; /**< the idx we start the re-arming from */ + uint64_t mbuf_initializer; /**< value to init mbufs */ + + uint8_t port_id; /**< device port ID */ + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise */ + uint16_t queue_id; /**< RX queue index */ + uint16_t reg_idx; /**< RX queue register index */ + uint8_t drop_en; /**< if not 0, set register bit */ + volatile uint8_t *qrx_tail; /**< register address of tail */ + struct i40e_vsi *vsi; /**< the VSI this queue belongs to */ + uint16_t rx_buf_len; /* The packet buffer size */ + uint16_t rx_hdr_len; /* The header buffer size */ + uint16_t max_pkt_len; /* Maximum packet length */ + uint8_t hs_mode; /* Header Split mode */ + bool q_set; /**< indicate if rx queue has been configured */ + bool rx_deferred_start; /**< don't start this queue in dev start */ + uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */ + uint8_t dcb_tc; /**< Traffic class of rx queue */ +}; + +struct i40e_tx_entry { + struct rte_mbuf *mbuf; + uint16_t next_id; + uint16_t last_id; +}; + +/* + * Structure associated with each TX queue. + */ +struct i40e_tx_queue { + uint16_t nb_tx_desc; /**< number of TX descriptors */ + uint64_t tx_ring_phys_addr; /**< TX ring DMA address */ + volatile struct i40e_tx_desc *tx_ring; /**< TX ring virtual address */ + struct i40e_tx_entry *sw_ring; /**< virtual address of SW ring */ + uint16_t tx_tail; /**< current value of tail register */ + volatile uint8_t *qtx_tail; /**< register address of tail */ + uint16_t nb_tx_used; /**< number of TX desc used since RS bit set */ + /**< index to last TX descriptor to have been cleaned */ + uint16_t last_desc_cleaned; + /**< Total number of TX descriptors ready to be allocated. */ + uint16_t nb_tx_free; + /**< Start freeing TX buffers if there are less free descriptors than + this value. */ + uint16_t tx_free_thresh; + /** Number of TX descriptors to use before RS bit is set. */ + uint16_t tx_rs_thresh; + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold reg. */ + uint8_t port_id; /**< Device port identifier. */ + uint16_t queue_id; /**< TX queue index. */ + uint16_t reg_idx; + uint32_t txq_flags; + struct i40e_vsi *vsi; /**< the VSI this queue belongs to */ + uint16_t tx_next_dd; + uint16_t tx_next_rs; + bool q_set; /**< indicate if tx queue has been configured */ + bool tx_deferred_start; /**< don't start this queue in dev start */ + uint8_t dcb_tc; /**< Traffic class of tx queue */ +}; + +/** Offload features */ +union i40e_tx_offload { + uint64_t data; + struct { + uint64_t l2_len:7; /**< L2 (MAC) Header Length. */ + uint64_t l3_len:9; /**< L3 (IP) Header Length. */ + uint64_t l4_len:8; /**< L4 Header Length. */ + uint64_t tso_segsz:16; /**< TCP TSO segment size */ + uint64_t outer_l2_len:8; /**< outer L2 Header Length */ + uint64_t outer_l3_len:16; /**< outer L3 Header Length */ + }; +}; + +int i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +const uint32_t *i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev); +int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +void i40e_dev_rx_queue_release(void *rxq); +void i40e_dev_tx_queue_release(void *txq); +uint16_t i40e_recv_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t i40e_recv_scattered_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t i40e_xmit_pkts(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +int i40e_tx_queue_init(struct i40e_tx_queue *txq); +int i40e_rx_queue_init(struct i40e_rx_queue *rxq); +void i40e_free_tx_resources(struct i40e_tx_queue *txq); +void i40e_free_rx_resources(struct i40e_rx_queue *rxq); +void i40e_dev_clear_queues(struct rte_eth_dev *dev); +void i40e_dev_free_queues(struct rte_eth_dev *dev); +void i40e_reset_rx_queue(struct i40e_rx_queue *rxq); +void i40e_reset_tx_queue(struct i40e_tx_queue *txq); +void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq); +int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq); +void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq); + +uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev, + uint16_t rx_queue_id); +int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); + +uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev); +int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq); +int i40e_txq_vec_setup(struct i40e_tx_queue *txq); +void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq); +uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +void i40e_set_rx_function(struct rte_eth_dev *dev); +void i40e_set_tx_function_flag(struct rte_eth_dev *dev, + struct i40e_tx_queue *txq); +void i40e_set_tx_function(struct rte_eth_dev *dev); + +#endif /* _I40E_RXTX_H_ */ diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c new file mode 100644 index 00000000..047aff53 --- /dev/null +++ b/drivers/net/i40e/i40e_rxtx_vec.c @@ -0,0 +1,777 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> + +#include "base/i40e_prototype.h" +#include "base/i40e_type.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" + +#include <tmmintrin.h> + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +static inline void +i40e_rxq_rearm(struct i40e_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, + RTE_PKTMBUF_HEADROOM); + __m128i dma_addr0, dma_addr1; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (rte_mempool_get_bulk(rxq->mp, + (void *)rxep, + RTE_I40E_RXQ_REARM_THRESH) < 0) { + if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + _mm_store_si128((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + RTE_I40E_RXQ_REARM_THRESH; + return; + } + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) { + __m128i vaddr0, vaddr1; + uintptr_t p0, p1; + + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + + /* Flush mbuf with pkt template. + * Data to be rearmed is 6 bytes long. + * Though, RX will overwrite ol_flags that are coming next + * anyway. So overwrite whole 8 bytes with one load: + * 6 bytes of rearm_data plus first 2 bytes of ol_flags. + */ + p0 = (uintptr_t)&mb0->rearm_data; + *(uint64_t *)p0 = rxq->mbuf_initializer; + p1 = (uintptr_t)&mb1->rearm_data; + *(uint64_t *)p1 = rxq->mbuf_initializer; + + /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + + /* convert pa to dma_addr hdr/data */ + dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); + dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); + + /* add headroom to pa values */ + dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); + dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); + + /* flush desc with pa dma_addr */ + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0); + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1); + } + + rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +/* Handling the offload flags (olflags) field takes computation + * time when receiving packets. Therefore we provide a flag to disable + * the processing of the olflags field when they are not needed. This + * gives improved performance, at the cost of losing the offload info + * in the received packet + */ +#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE + +static inline void +desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) +{ + __m128i vlan0, vlan1, rss; + union { + uint16_t e[4]; + uint64_t dword; + } vol; + + /* mask everything except rss and vlan flags + *bit2 is for vlan tag, bits 13:12 for rss + */ + const __m128i rss_vlan_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + 0x3004, 0x3004, 0x3004, 0x3004); + + /* map rss and vlan type to rss hash and vlan flag */ + const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, PKT_RX_VLAN_PKT, + 0, 0, 0, 0); + + const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_FDIR, 0, PKT_RX_RSS_HASH, 0); + + vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]); + vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]); + vlan0 = _mm_unpacklo_epi32(vlan0, vlan1); + + vlan1 = _mm_and_si128(vlan0, rss_vlan_msk); + vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1); + + rss = _mm_srli_epi16(vlan1, 12); + rss = _mm_shuffle_epi8(rss_flags, rss); + + vlan0 = _mm_or_si128(vlan0, rss); + vol.dword = _mm_cvtsi128_si64(vlan0); + + rx_pkts[0]->ol_flags = vol.e[0]; + rx_pkts[1]->ol_flags = vol.e[1]; + rx_pkts[2]->ol_flags = vol.e[2]; + rx_pkts[3]->ol_flags = vol.e[3]; +} +#else +#define desc_to_olflags_v(desc, rx_pkts) do {} while (0) +#endif + +#define PKTLEN_SHIFT (6) +#define PKTLEN_MASK (0x3FFF) +/* Handling the pkt len field is not aligned with 1byte, so shift is + * needed to let it align + */ +static inline void +desc_pktlen_align(__m128i descs[4]) +{ + __m128i pktlen0, pktlen1, zero; + union { + uint16_t e[4]; + uint64_t dword; + } vol; + + /* mask everything except pktlen field*/ + const __m128i pktlen_msk = _mm_set_epi32(PKTLEN_MASK, PKTLEN_MASK, + PKTLEN_MASK, PKTLEN_MASK); + + pktlen0 = _mm_unpackhi_epi32(descs[0], descs[2]); + pktlen1 = _mm_unpackhi_epi32(descs[1], descs[3]); + pktlen0 = _mm_unpackhi_epi32(pktlen0, pktlen1); + + zero = _mm_xor_si128(pktlen0, pktlen0); + + pktlen0 = _mm_srli_epi32(pktlen0, PKTLEN_SHIFT); + pktlen0 = _mm_and_si128(pktlen0, pktlen_msk); + + pktlen0 = _mm_packs_epi32(pktlen0, zero); + vol.dword = _mm_cvtsi128_si64(pktlen0); + /* let the descriptor byte 15-14 store the pkt len */ + *((uint16_t *)&descs[0]+7) = vol.e[0]; + *((uint16_t *)&descs[1]+7) = vol.e[1]; + *((uint16_t *)&descs[2]+7) = vol.e[2]; + *((uint16_t *)&descs[3]+7) = vol.e[3]; +} + + /* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +static inline uint16_t +_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *sw_ring; + uint16_t nb_pkts_recd; + int pos; + uint64_t var; + __m128i shuf_msk; + + __m128i crc_adjust = _mm_set_epi16( + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0 /* ignore pkt_type field */ + ); + __m128i dd_check, eop_check; + + /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST); + + /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP); + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles + */ + rxdp = rxq->rx_ring + rxq->rx_tail; + + _mm_prefetch((const void *)rxdp, _MM_HINT_T0); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH) + i40e_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.qword1.status_error_len & + rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + return 0; + + /* 4 packets DD mask */ + dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL); + + /* 4 packets EOP mask */ + eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL); + + /* mask to shuffle from desc. to mbuf */ + shuf_msk = _mm_set_epi8( + 7, 6, 5, 4, /* octet 4~7, 32bits rss */ + 3, 2, /* octet 2~3, low 16 bits vlan_macip */ + 15, 14, /* octet 15~14, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 15, 14, /* octet 15~14, low 16 bits pkt_len */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF /*pkt_type set as unknown */ + ); + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache + */ + sw_ring = &rxq->sw_ring[rxq->rx_tail]; + + /* A. load 4 packet in one loop + * [A*. mask out 4 unused dirty field in desc] + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + + for (pos = 0, nb_pkts_recd = 0; pos < RTE_I40E_VPMD_RX_BURST; + pos += RTE_I40E_DESCS_PER_LOOP, + rxdp += RTE_I40E_DESCS_PER_LOOP) { + __m128i descs[RTE_I40E_DESCS_PER_LOOP]; + __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + __m128i zero, staterr, sterr_tmp1, sterr_tmp2; + __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */ + + /* B.1 load 1 mbuf point */ + mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]); + /* Read desc statuses backwards to avoid race condition */ + /* A.1 load 4 pkts desc */ + descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); + + /* B.2 copy 2 mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); + + /* B.1 load 1 mbuf point */ + mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]); + + descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); + /* B.1 load 2 mbuf point */ + descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); + descs[0] = _mm_loadu_si128((__m128i *)(rxdp)); + + /* B.2 copy 2 mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); + + if (split_packet) { + rte_prefetch0(&rx_pkts[pos]->cacheline1); + rte_prefetch0(&rx_pkts[pos + 1]->cacheline1); + rte_prefetch0(&rx_pkts[pos + 2]->cacheline1); + rte_prefetch0(&rx_pkts[pos + 3]->cacheline1); + } + + /*shift the pktlen field*/ + desc_pktlen_align(descs); + + /* avoid compiler reorder optimization */ + rte_compiler_barrier(); + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk); + pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk); + + /* C.1 4=>2 filter staterr info only */ + sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]); + /* C.1 4=>2 filter staterr info only */ + sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); + + desc_to_olflags_v(descs, &rx_pkts[pos]); + + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ + pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); + pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); + pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); + + /* C.2 get 4 pkts staterr value */ + zero = _mm_xor_si128(dd_check, dd_check); + staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); + + /* D.3 copy final 3,4 data to rx_pkts */ + _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1, + pkt_mb4); + _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1, + pkt_mb3); + + /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ + pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust); + pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust); + + /* C* extract and record EOP bit */ + if (split_packet) { + __m128i eop_shuf_mask = _mm_set_epi8( + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0x04, 0x0C, 0x00, 0x08 + ); + + /* and with mask to extract bits, flipping 1-0 */ + __m128i eop_bits = _mm_andnot_si128(staterr, eop_check); + /* the staterr values are not in order, as the count + * count of dd bits doesn't care. However, for end of + * packet tracking, we do care, so shuffle. This also + * compresses the 32-bit values to 8-bit + */ + eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask); + /* store the resulting 32-bit value */ + *(int *)split_packet = _mm_cvtsi128_si32(eop_bits); + split_packet += RTE_I40E_DESCS_PER_LOOP; + + /* zero-out next pointers */ + rx_pkts[pos]->next = NULL; + rx_pkts[pos + 1]->next = NULL; + rx_pkts[pos + 2]->next = NULL; + rx_pkts[pos + 3]->next = NULL; + } + + /* C.3 calc available number of desc */ + staterr = _mm_and_si128(staterr, dd_check); + staterr = _mm_packs_epi32(staterr, zero); + + /* D.3 copy final 1,2 data to rx_pkts */ + _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1, + pkt_mb2); + _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); + /* C.4 calc avaialbe number of desc */ + var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + nb_pkts_recd += var; + if (likely(var != RTE_I40E_DESCS_PER_LOOP)) + break; + } + + /* Update our internal tail pointer */ + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); + rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + + /* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +uint16_t +i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); +} + +static inline uint16_t +reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs, + uint16_t nb_bufs, uint8_t *split_flags) +{ + struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/ + struct rte_mbuf *start = rxq->pkt_first_seg; + struct rte_mbuf *end = rxq->pkt_last_seg; + unsigned pkt_idx, buf_idx; + + for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { + if (end != NULL) { + /* processing a split packet */ + end->next = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + + start->nb_segs++; + start->pkt_len += rx_bufs[buf_idx]->data_len; + end = end->next; + + if (!split_flags[buf_idx]) { + /* it's the last packet of the set */ + start->hash = end->hash; + start->ol_flags = end->ol_flags; + /* we need to strip crc for the whole packet */ + start->pkt_len -= rxq->crc_len; + if (end->data_len > rxq->crc_len) { + end->data_len -= rxq->crc_len; + } else { + /* free up last mbuf */ + struct rte_mbuf *secondlast = start; + + while (secondlast->next != end) + secondlast = secondlast->next; + secondlast->data_len -= (rxq->crc_len - + end->data_len); + secondlast->next = NULL; + rte_pktmbuf_free_seg(end); + end = secondlast; + } + pkts[pkt_idx++] = start; + start = end = NULL; + } + } else { + /* not processing a split packet */ + if (!split_flags[buf_idx]) { + /* not a split packet, save and skip */ + pkts[pkt_idx++] = rx_bufs[buf_idx]; + continue; + } + end = start = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + rx_bufs[buf_idx]->pkt_len += rxq->crc_len; + } + } + + /* save the partial packet for next time */ + rxq->pkt_first_seg = start; + rxq->pkt_last_seg = end; + memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); + return pkt_idx; +} + + /* vPMD receive routine that reassembles scattered packets + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +uint16_t +i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + + struct i40e_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (rxq->pkt_first_seg == NULL && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned i = 0; + + if (rxq->pkt_first_seg == NULL) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +static inline void +vtx1(volatile struct i40e_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) | + ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)); + + __m128i descriptor = _mm_set_epi64x(high_qw, + pkt->buf_physaddr + pkt->data_off); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +vtx(volatile struct i40e_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + int i; + + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + vtx1(txdp, *pkt, flags); +} + +static inline int __attribute__((always_inline)) +i40e_tx_free_bufs(struct i40e_tx_queue *txq) +{ + struct i40e_tx_entry *txep; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ]; + + /* check DD bits on threshold descriptor */ + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) != + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + n = txq->tx_rs_thresh; + + /* first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1) + */ + txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; + m = __rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = __rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) { + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, + nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = __rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m != NULL) + rte_mempool_put(m->pool, m); + } + } + + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + +static inline void __attribute__((always_inline)) +tx_backlog_entry(struct i40e_tx_entry *txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i].mbuf = tx_pkts[i]; +} + +uint16_t +i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue; + volatile struct i40e_tx_desc *txdp; + struct i40e_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = I40E_TD_CMD; + uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + i40e_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + vtx1(txdp, *tx_pkts, flags); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << + I40E_TXD_QW1_CMD_SHIFT); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +void __attribute__((cold)) +i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq) +{ + const unsigned mask = rxq->nb_rx_desc - 1; + unsigned i; + + if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc) + return; + + /* free all mbufs that are valid in the ring */ + for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask) + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->rxrearm_nb = rxq->nb_rx_desc; + + /* set all entries to NULL */ + memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); +} + +int __attribute__((cold)) +i40e_rxq_vec_setup(struct i40e_rx_queue *rxq) +{ + uintptr_t p; + struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ + + mb_def.nb_segs = 1; + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = rxq->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; + return 0; +} + +int __attribute__((cold)) +i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq) +{ + return 0; +} + +int __attribute__((cold)) +i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) +{ +#ifndef RTE_LIBRTE_IEEE1588 + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; + +#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE + /* whithout rx ol_flags, no VP flag report */ + if (rxmode->hw_vlan_strip != 0 || + rxmode->hw_vlan_extend != 0) + return -1; +#endif + + /* no fdir support */ + if (fconf->mode != RTE_FDIR_MODE_NONE) + return -1; + + /* - no csum error report support + * - no header split support + */ + if (rxmode->hw_ip_checksum == 1 || + rxmode->header_split == 1) + return -1; + + return 0; +#else + RTE_SET_USED(dev); + return -1; +#endif +} diff --git a/drivers/net/i40e/rte_pmd_i40e_version.map b/drivers/net/i40e/rte_pmd_i40e_version.map new file mode 100644 index 00000000..ef353984 --- /dev/null +++ b/drivers/net/i40e/rte_pmd_i40e_version.map @@ -0,0 +1,4 @@ +DPDK_2.0 { + + local: *; +}; diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile new file mode 100644 index 00000000..50bf51c9 --- /dev/null +++ b/drivers/net/ixgbe/Makefile @@ -0,0 +1,124 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2015 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_ixgbe.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_ixgbe_version.map + +LIBABIVER := 1 + +ifeq ($(CC), icc) +# +# CFLAGS for icc +# +CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 + +CFLAGS_ixgbe_rxtx.o += -wd3656 + +else ifeq ($(CC), clang) +# +# CFLAGS for clang +# +CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args + +else +# +# CFLAGS for gcc +# +ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1) +CFLAGS += -Wno-deprecated +CFLAGS_ixgbe_common.o += -Wno-unused-but-set-variable +CFLAGS_ixgbe_x550.o += -Wno-unused-but-set-variable +endif +CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args + +ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1) +CFLAGS_ixgbe_x550.o += -Wno-maybe-uninitialized +endif + +ifeq ($(shell test $(GCC_VERSION) -ge 50 && echo 1), 1) +CFLAGS_ixgbe_common.o += -Wno-logical-not-parentheses +endif + +endif + +# +# Add extra flags for base driver files (also known as shared code) +# to disable warnings in them +# +BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))) +$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +VPATH += $(SRCDIR)/base + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_common.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82598.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x540.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x550.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_phy.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_api.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82599.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82598.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_mbx.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_pf.c +SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec.c + +ifeq ($(CONFIG_RTE_NIC_BYPASS),y) +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c +endif + + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README new file mode 100644 index 00000000..caa26640 --- /dev/null +++ b/drivers/net/ixgbe/base/README @@ -0,0 +1,61 @@ +.. + BSD LICENSE + + Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Intel® IXGBE driver +=================== + +This directory contains source code of FreeBSD ixgbe driver of version +cid-10g-shared-code.2016.01.07 released by ND. The sub-directory of base/ +contains the original source package. +This driver is valid for the product(s) listed below + +* Intel® 10 Gigabit AF DA Dual Port Server Adapter +* Intel® 10 Gigabit AT Server Adapter +* Intel® 10 Gigabit AT2 Server Adapter +* Intel® 10 Gigabit CX4 Dual Port Server Adapter +* Intel® 10 Gigabit XF LR Server Adapter +* Intel® 10 Gigabit XF SR Dual Port Server Adapter +* Intel® 10 Gigabit XF SR Server Adapter +* Intel® 82598 10 Gigabit Ethernet Controller +* Intel® 82599 10 Gigabit Ethernet Controller +* Intel® Ethernet Controller X540-AT2 +* Intel® Ethernet Server Adapter X520 Series +* Intel® Ethernet Server Adapter X520-T2 +* Intel® Ethernet Controller X550 Series + +Updating the driver +=================== + +NOTE: The source code in this directory should not be modified apart from +the following file(s): + + ixgbe_osdep.h diff --git a/drivers/net/ixgbe/base/ixgbe_82598.c b/drivers/net/ixgbe/base/ixgbe_82598.c new file mode 100644 index 00000000..9e65fffa --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_82598.c @@ -0,0 +1,1436 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "ixgbe_type.h" +#include "ixgbe_82598.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +#define IXGBE_82598_MAX_TX_QUEUES 32 +#define IXGBE_82598_MAX_RX_QUEUES 64 +#define IXGBE_82598_RAR_ENTRIES 16 +#define IXGBE_82598_MC_TBL_SIZE 128 +#define IXGBE_82598_VFT_TBL_SIZE 128 +#define IXGBE_82598_RX_PB_SIZE 512 + +STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); +STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete); +STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *link_up, + bool link_up_wait_to_complete); +STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); +STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); +STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy); +STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); +/** + * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82598 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 250ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) +{ + u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) + goto out; + + /* + * if capababilities version is type 1 we can write the + * timeout of 10ms to 250ms through the GCR register + */ + if (!(gcr & IXGBE_GCR_CAP_VER2)) { + gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* + * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); + pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; + IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; + IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); +} + +/** + * ixgbe_init_ops_82598 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for 82598. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_82598"); + + ret_val = ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.init = ixgbe_init_phy_ops_82598; + + /* MAC */ + mac->ops.start_hw = ixgbe_start_hw_82598; + mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598; + mac->ops.reset_hw = ixgbe_reset_hw_82598; + mac->ops.get_media_type = ixgbe_get_media_type_82598; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_82598; + mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598; + mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598; + mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598; + + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = ixgbe_set_vmdq_82598; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598; + mac->ops.set_vfta = ixgbe_set_vfta_82598; + mac->ops.set_vlvf = NULL; + mac->ops.clear_vfta = ixgbe_clear_vfta_82598; + + /* Flow Control */ + mac->ops.fc_enable = ixgbe_fc_enable_82598; + + mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; + mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + /* SFP+ Module */ + phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598; + phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598; + + /* Link */ + mac->ops.check_link = ixgbe_check_mac_link_82598; + mac->ops.setup_link = ixgbe_setup_mac_link_82598; + mac->ops.flap_tx_laser = NULL; + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598; + mac->ops.setup_rxpba = ixgbe_set_rxpba_82598; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = NULL; + + mac->ops.get_rtrup2tc = NULL; + + return ret_val; +} + +/** + * ixgbe_init_phy_ops_82598 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val = IXGBE_SUCCESS; + u16 list_offset, data_offset; + + DEBUGFUNC("ixgbe_init_phy_ops_82598"); + + /* Identify the PHY */ + phy->ops.identify(hw); + + /* Overwrite the link function pointers if copper PHY */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = ixgbe_setup_copper_link_82598; + mac->ops.get_link_capabilities = + ixgbe_get_copper_link_capabilities_generic; + } + + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.setup_link = ixgbe_setup_phy_link_tnx; + phy->ops.check_link = ixgbe_check_phy_link_tnx; + phy->ops.get_firmware_version = + ixgbe_get_phy_firmware_version_tnx; + break; + case ixgbe_phy_nl: + phy->ops.reset = ixgbe_reset_phy_nl; + + /* Call SFP+ identify routine to get the SFP+ module type */ + ret_val = phy->ops.identify_sfp(hw); + if (ret_val != IXGBE_SUCCESS) + goto out; + else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + /* Check to see if SFP+ module is supported */ + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, + &list_offset, + &data_offset); + if (ret_val != IXGBE_SUCCESS) { + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + break; + default: + break; + } + +out: + return ret_val; +} + +/** + * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function. + * Disables relaxed ordering Then set pcie completion timeout + * + **/ +s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) +{ + u32 regval; + u32 i; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_82598"); + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val) + return ret_val; + + /* Disable relaxed ordering */ + for (i = 0; ((i < hw->mac.max_tx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); + } + + for (i = 0; ((i < hw->mac.max_rx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + + /* set the completion timeout for interface */ + ixgbe_set_pcie_completion_timeout(hw); + + return ret_val; +} + +/** + * ixgbe_get_link_capabilities_82598 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the link capabilities by reading the AUTOC register. + **/ +STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status = IXGBE_SUCCESS; + u32 autoc = 0; + + DEBUGFUNC("ixgbe_get_link_capabilities_82598"); + + /* + * Determine link capabilities based on the stored value of AUTOC, + * which represents EEPROM defaults. If AUTOC value has not been + * stored, use the current register value. + */ + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; + else + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_1G_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + default: + status = IXGBE_ERR_LINK_SETUP; + break; + } + + return status; +} + +/** + * ixgbe_get_media_type_82598 - Determines media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + DEBUGFUNC("ixgbe_get_media_type_82598"); + + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: + case ixgbe_phy_tn: + media_type = ixgbe_media_type_copper; + goto out; + default: + break; + } + + /* Media type for I82598 is based on device ID */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82598_BX: + /* Default device ID is mezzanine card KX/KX4 */ + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + case IXGBE_DEV_ID_82598EB_XF_LR: + case IXGBE_DEV_ID_82598EB_SFP_LOM: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_82598EB_CX4: + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + media_type = ixgbe_media_type_cx4; + break; + case IXGBE_DEV_ID_82598AT: + case IXGBE_DEV_ID_82598AT2: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } +out: + return media_type; +} + +/** + * ixgbe_fc_enable_82598 - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + u32 fctrl_reg; + u32 rmcs_reg; + u32 reg; + u32 fcrtl, fcrth; + u32 link_speed = 0; + int i; + bool link_up; + + DEBUGFUNC("ixgbe_fc_enable_82598"); + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* Low water mark of zero causes XOFF floods */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + DEBUGOUT("Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + } + + /* + * On 82598 having Rx FC on causes resets while doing 1G + * so if it's on turn it off once we know link_speed. For + * more details see 82598 Specification update. + */ + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + hw->fc.requested_mode = ixgbe_fc_tx_pause; + break; + case ixgbe_fc_rx_pause: + hw->fc.requested_mode = ixgbe_fc_none; + break; + default: + /* no change */ + break; + } + } + + /* Negotiate the fc mode to use */ + ixgbe_fc_autoneg(hw); + + /* Disable any previous flow control settings */ + fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); + + rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ixgbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + fctrl_reg |= IXGBE_FCTRL_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + fctrl_reg |= IXGBE_FCTRL_RFCE; + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + + /* Set 802.3x based flow control settings. */ + fctrl_reg |= IXGBE_FCTRL_DPF; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); + IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); + } + + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + +out: + return ret_val; +} + +/** + * ixgbe_start_mac_link_82598 - Configures MAC link settings + * @hw: pointer to hardware structure + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) +{ + u32 autoc_reg; + u32 links_reg; + u32 i; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_mac_link_82598"); + + /* Restart link */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_AN || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { + links_reg = 0; /* Just in case Autoneg time = 0 */ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msec_delay(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + DEBUGOUT("Autonegotiation did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msec_delay(50); + + return status; +} + +/** + * ixgbe_validate_link_ready - Function looks for phy link + * @hw: pointer to hardware structure + * + * Function indicates success when phy link is available. If phy is not ready + * within 5 seconds of MAC indicating link, the function returns error. + **/ +STATIC s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) +{ + u32 timeout; + u16 an_reg; + + if (hw->device_id != IXGBE_DEV_ID_82598AT2) + return IXGBE_SUCCESS; + + for (timeout = 0; + timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg); + + if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) && + (an_reg & IXGBE_MII_AUTONEG_LINK_UP)) + break; + + msec_delay(100); + } + + if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { + DEBUGOUT("Link was indicated but link is down\n"); + return IXGBE_ERR_LINK_SETUP; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_check_mac_link_82598 - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *link_up, + bool link_up_wait_to_complete) +{ + u32 links_reg; + u32 i; + u16 link_reg, adapt_comp_reg; + + DEBUGFUNC("ixgbe_check_mac_link_82598"); + + /* + * SERDES PHY requires us to read link status from undocumented + * register 0xC79F. Bit 0 set indicates link is up/ready; clear + * indicates link down. OxC00C is read to check that the XAUI lanes + * are active. Bit 0 clear indicates active; set indicates inactive. + */ + if (hw->phy.type == ixgbe_phy_nl) { + hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); + hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, + &adapt_comp_reg); + if (link_up_wait_to_complete) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { + if ((link_reg & 1) && + ((adapt_comp_reg & 1) == 0)) { + *link_up = true; + break; + } else { + *link_up = false; + } + msec_delay(100); + hw->phy.ops.read_reg(hw, 0xC79F, + IXGBE_TWINAX_DEV, + &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, + IXGBE_TWINAX_DEV, + &adapt_comp_reg); + } + } else { + if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) + *link_up = true; + else + *link_up = false; + } + + if (*link_up == false) + goto out; + } + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (link_up_wait_to_complete) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msec_delay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { + if (links_reg & IXGBE_LINKS_UP) + *link_up = true; + else + *link_up = false; + } + + if (links_reg & IXGBE_LINKS_SPEED) + *speed = IXGBE_LINK_SPEED_10GB_FULL; + else + *speed = IXGBE_LINK_SPEED_1GB_FULL; + + if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) && + (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS)) + *link_up = false; + +out: + return IXGBE_SUCCESS; +} + +/** + * ixgbe_setup_mac_link_82598 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + bool autoneg = false; + s32 status = IXGBE_SUCCESS; + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc = curr_autoc; + u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + + DEBUGFUNC("ixgbe_setup_mac_link_82598"); + + /* Check to see if speed passed in is supported. */ + ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); + speed &= link_capabilities; + + if (speed == IXGBE_LINK_SPEED_UNKNOWN) + status = IXGBE_ERR_LINK_SETUP; + + /* Set KX4/KX support according to speed requested */ + else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { + autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + autoc |= IXGBE_AUTOC_KX4_SUPP; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + autoc |= IXGBE_AUTOC_KX_SUPP; + if (autoc != curr_autoc) + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + } + + if (status == IXGBE_SUCCESS) { + /* + * Setup and restart the link based on the new values in + * ixgbe_hw This will write the AUTOC register based on the new + * stored values + */ + status = ixgbe_start_mac_link_82598(hw, + autoneg_wait_to_complete); + } + + return status; +} + + +/** + * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Sets the link speed in the AUTOC register in the MAC and restarts link. + **/ +STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status; + + DEBUGFUNC("ixgbe_setup_copper_link_82598"); + + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_link_speed(hw, speed, + autoneg_wait_to_complete); + /* Set up MAC */ + ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); + + return status; +} + +/** + * ixgbe_reset_hw_82598 - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts, performing a PHY reset, and performing a link (MAC) + * reset. + **/ +STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + s32 phy_status = IXGBE_SUCCESS; + u32 ctrl; + u32 gheccr; + u32 i; + u32 autoc; + u8 analog_val; + + DEBUGFUNC("ixgbe_reset_hw_82598"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != IXGBE_SUCCESS) + goto reset_hw_out; + + /* + * Power up the Atlas Tx lanes if they are currently powered down. + * Atlas Tx lanes are powered down for MAC loopback tests, but + * they are not automatically restored on reset. + */ + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); + if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { + /* Enable Tx Atlas so packets can be transmitted again */ + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + analog_val); + } + + /* Reset PHY */ + if (hw->phy.reset_disable == false) { + /* PHY ops must be identified and initialized prior to reset */ + + /* Init PHY and function pointers, perform SFP setup */ + phy_status = hw->phy.ops.init(hw); + if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) + goto mac_reset_top; + + hw->phy.ops.reset(hw); + } + +mac_reset_top: + /* + * Issue global reset to the MAC. This needs to be a SW reset. + * If link reset is used, it might reset the MAC when mng is using it + */ + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST)) + break; + } + if (ctrl & IXGBE_CTRL_RST) { + status = IXGBE_ERR_RESET_FAILED; + DEBUGOUT("Reset polling failed to complete.\n"); + } + + msec_delay(50); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); + gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); + IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); + + /* + * Store the original AUTOC value if it has not been + * stored off yet. Otherwise restore the stored original + * AUTOC value since the reset operation sets back to deaults. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_autoc = autoc; + hw->mac.orig_link_settings_stored = true; + } else if (autoc != hw->mac.orig_autoc) { + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table + */ + hw->mac.ops.init_rx_addrs(hw); + +reset_hw_out: + if (phy_status != IXGBE_SUCCESS) + status = phy_status; + + return status; +} + +/** + * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq set index + **/ +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_set_vmdq_82598"); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + DEBUGOUT1("RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + rar_high &= ~IXGBE_RAH_VIND_MASK; + rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq clear index (not used in 82598, but elsewhere) + **/ +STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + UNREFERENCED_1PARAMETER(vmdq); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + DEBUGOUT1("RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + if (rar_high & IXGBE_RAH_VIND_MASK) { + rar_high &= ~IXGBE_RAH_VIND_MASK; + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_vfta_82598 - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFTA + * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + u32 regindex; + u32 bitindex; + u32 bits; + u32 vftabyte; + + DEBUGFUNC("ixgbe_set_vfta_82598"); + + if (vlan > 4095) + return IXGBE_ERR_PARAM; + + /* Determine 32-bit word position in array */ + regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ + + /* Determine the location of the (VMD) queue index */ + vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ + bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ + + /* Set the nibble for VMD queue index */ + bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); + bits &= (~(0x0F << bitindex)); + bits |= (vind << bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); + + /* Determine the location of the bit for this VLAN id */ + bitindex = vlan & 0x1F; /* lower five bits */ + + bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); + if (vlan_on) + /* Turn on this VLAN id */ + bits |= (1 << bitindex); + else + /* Turn off this VLAN id */ + bits &= ~(1 << bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clear_vfta_82598 - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) +{ + u32 offset; + u32 vlanbyte; + + DEBUGFUNC("ixgbe_clear_vfta_82598"); + + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), + 0); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Atlas analog register specified. + **/ +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 atlas_ctl; + + DEBUGFUNC("ixgbe_read_analog_reg8_82598"); + + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + usec_delay(10); + atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + *val = (u8)atlas_ctl; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 atlas_ctl; + + DEBUGFUNC("ixgbe_write_analog_reg8_82598"); + + atlas_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); + IXGBE_WRITE_FLUSH(hw); + usec_delay(10); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @dev_addr: address to read from + * @byte_offset: byte offset to read from dev_addr + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + **/ +STATIC s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, + u8 byte_offset, u8 *eeprom_data) +{ + s32 status = IXGBE_SUCCESS; + u16 sfp_addr = 0; + u16 sfp_data = 0; + u16 sfp_stat = 0; + u16 gssr; + u32 i; + + DEBUGFUNC("ixgbe_read_i2c_phy_82598"); + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS) + return IXGBE_ERR_SWFW_SYNC; + + if (hw->phy.type == ixgbe_phy_nl) { + /* + * NetLogic phy SDA/SCL registers are at addresses 0xC30A to + * 0xC30D. These registers are used to talk to the SFP+ + * module's EEPROM through the SDA/SCL (I2C) interface. + */ + sfp_addr = (dev_addr << 8) + byte_offset; + sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); + hw->phy.ops.write_reg_mdi(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + sfp_addr); + + /* Poll status */ + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg_mdi(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &sfp_stat); + sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) + break; + msec_delay(10); + } + + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { + DEBUGOUT("EEPROM read did not pass.\n"); + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* Read data */ + hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); + + *eeprom_data = (u8)(sfp_data >> 8); + } else { + status = IXGBE_ERR_PHY; + } + +out: + hw->mac.ops.release_swfw_sync(hw, gssr); + return status; +} + +/** + * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, + byte_offset, eeprom_data); +} + +/** + * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C + **/ +STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, + byte_offset, sff8472_data); +} + +/** + * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) +{ + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_supported_physical_layer_82598"); + + hw->phy.ops.identify(hw); + + /* Copper PHY must be checked before AUTOC LMS to determine correct + * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_cu_unknown: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + default: + break; + } + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_AN: + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; + else + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; + break; + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + else /* XAUI */ + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + break; + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + if (autoc & IXGBE_AUTOC_KX_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + break; + default: + break; + } + + if (hw->phy.type == ixgbe_phy_nl) { + hw->phy.ops.identify_sfp(hw); + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_da_cu: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case ixgbe_sfp_type_sr: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + break; + case ixgbe_sfp_type_lr: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + break; + } + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + break; + case IXGBE_DEV_ID_82598EB_XF_LR: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + break; + } + +out: + return physical_layer; +} + +/** + * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple + * port devices. + * @hw: pointer to the HW structure + * + * Calls common function and corrects issue with some single port devices + * that enable LAN1 but not LAN0. + **/ +void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_bus_info *bus = &hw->bus; + u16 pci_gen = 0; + u16 pci_ctrl2 = 0; + + DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598"); + + ixgbe_set_lan_id_multi_port_pcie(hw); + + /* check if LAN0 is disabled */ + hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); + if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { + + hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); + + /* if LAN0 is completely disabled force function to 0 */ + if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && + !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && + !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { + + bus->func = 0; + } + } +} + +/** + * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering + * @hw: pointer to hardware structure + * + **/ +void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw) +{ + u32 regval; + u32 i; + + DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598"); + + /* Enable relaxed ordering */ + for (i = 0; ((i < hw->mac.max_tx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); + } + + for (i = 0; ((i < hw->mac.max_rx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + +} + +/** + * ixgbe_set_rxpba_82598 - Initialize RX packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy) +{ + u32 rxpktsize = IXGBE_RXPBSIZE_64KB; + u8 i = 0; + UNREFERENCED_1PARAMETER(headroom); + + if (!num_pb) + return; + + /* Setup Rx packet buffer sizes */ + switch (strategy) { + case PBA_STRATEGY_WEIGHTED: + /* Setup the first four at 80KB */ + rxpktsize = IXGBE_RXPBSIZE_80KB; + for (; i < 4; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + /* Setup the last four at 48KB...don't re-init i */ + rxpktsize = IXGBE_RXPBSIZE_48KB; + /* Fall Through */ + case PBA_STRATEGY_EQUAL: + default: + /* Divide the remaining Rx packet buffer evenly among the TCs */ + for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + break; + } + + /* Setup Tx packet buffer sizes */ + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); +} + +/** + * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit + **/ +s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval) +{ + DEBUGFUNC("ixgbe_enable_rx_dma_82598"); + + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + + return IXGBE_SUCCESS; +} diff --git a/drivers/net/ixgbe/base/ixgbe_82598.h b/drivers/net/ixgbe/base/ixgbe_82598.h new file mode 100644 index 00000000..89dd11a5 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_82598.h @@ -0,0 +1,52 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _IXGBE_82598_H_ +#define _IXGBE_82598_H_ + +u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw); +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw); +void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw); +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw); +void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw); +void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval); +#endif /* _IXGBE_82598_H_ */ diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c new file mode 100644 index 00000000..154c1f10 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_82599.c @@ -0,0 +1,2608 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "ixgbe_type.h" +#include "ixgbe_82599.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +#define IXGBE_82599_MAX_TX_QUEUES 128 +#define IXGBE_82599_MAX_RX_QUEUES 128 +#define IXGBE_82599_RAR_ENTRIES 128 +#define IXGBE_82599_MC_TBL_SIZE 128 +#define IXGBE_82599_VFT_TBL_SIZE 128 +#define IXGBE_82599_RX_PB_SIZE 512 + +STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); +STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, + u16 offset, u16 *data); +STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); + +void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); + + /* + * enable the laser control functions for SFP+ fiber + * and MNG not enabled + */ + if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && + !ixgbe_mng_enabled(hw)) { + mac->ops.disable_tx_laser = + ixgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + ixgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber; + + } else { + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + } + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ + mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; + mac->ops.set_rate_select_speed = + ixgbe_set_hard_rate_select_speed; + } else { + if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && + (hw->phy.smart_speed == ixgbe_smart_speed_auto || + hw->phy.smart_speed == ixgbe_smart_speed_on) && + !ixgbe_verify_lesm_fw_enabled_82599(hw)) { + mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed; + } else { + mac->ops.setup_link = ixgbe_setup_mac_link_82599; + } + } +} + +/** + * ixgbe_init_phy_ops_82599 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val = IXGBE_SUCCESS; + u32 esdp; + + DEBUGFUNC("ixgbe_init_phy_ops_82599"); + + if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { + /* Store flag indicating I2C bus access control unit. */ + hw->phy.qsfp_shared_i2c_bus = TRUE; + + /* Initialize access to QSFP+ I2C bus */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0_DIR; + esdp &= ~IXGBE_ESDP_SDP1_DIR; + esdp &= ~IXGBE_ESDP_SDP0; + esdp &= ~IXGBE_ESDP_SDP0_NATIVE; + esdp &= ~IXGBE_ESDP_SDP1_NATIVE; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599; + phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599; + } + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto init_phy_ops_out; + + /* Setup function pointers based on detected SFP module and speeds */ + ixgbe_init_mac_link_ops_82599(hw); + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) + hw->phy.ops.reset = NULL; + + /* If copper media, overwrite with copper function pointers */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = ixgbe_setup_copper_link_82599; + mac->ops.get_link_capabilities = + ixgbe_get_copper_link_capabilities_generic; + } + + /* Set necessary function pointers based on PHY type */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.setup_link = ixgbe_setup_phy_link_tnx; + phy->ops.check_link = ixgbe_check_phy_link_tnx; + phy->ops.get_firmware_version = + ixgbe_get_phy_firmware_version_tnx; + break; + default: + break; + } +init_phy_ops_out: + return ret_val; +} + +s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + u16 list_offset, data_offset, data_value; + + DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); + + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { + ixgbe_init_mac_link_ops_82599(hw); + + hw->phy.ops.reset = NULL; + + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, + &data_offset); + if (ret_val != IXGBE_SUCCESS) + goto setup_sfp_out; + + /* PHY config will finish before releasing the semaphore */ + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val != IXGBE_SUCCESS) { + ret_val = IXGBE_ERR_SWFW_SYNC; + goto setup_sfp_out; + } + + if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) + goto setup_sfp_err; + while (data_value != 0xffff) { + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); + IXGBE_WRITE_FLUSH(hw); + if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) + goto setup_sfp_err; + } + + /* Release the semaphore */ + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + /* Delay obtaining semaphore again to allow FW access + * prot_autoc_write uses the semaphore too. + */ + msec_delay(hw->eeprom.semaphore_delay); + + /* Restart DSP and set SFI mode */ + ret_val = hw->mac.ops.prot_autoc_write(hw, + hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL, + false); + + if (ret_val) { + DEBUGOUT("sfp module setup not complete\n"); + ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + goto setup_sfp_out; + } + + } + +setup_sfp_out: + return ret_val; + +setup_sfp_err: + /* Release the semaphore */ + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + /* Delay obtaining semaphore again to allow FW access */ + msec_delay(hw->eeprom.semaphore_delay); + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", data_offset); + return IXGBE_ERR_PHY; +} + +/** + * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @locked: Return the if we locked for this read. + * @reg_val: Value we read from AUTOC + * + * For this part (82599) we need to wrap read-modify-writes with a possible + * FW/SW lock. It is assumed this lock will be freed with the next + * prot_autoc_write_82599(). + */ +s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) +{ + s32 ret_val; + + *locked = false; + /* If LESM is on then we need to hold the SW/FW semaphore. */ + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val != IXGBE_SUCCESS) + return IXGBE_ERR_SWFW_SYNC; + + *locked = true; + } + + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); + return IXGBE_SUCCESS; +} + +/** + * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write + * @hw: pointer to hardware structure + * @reg_val: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by + * previous proc_autoc_read_82599. + * + * This part (82599) may need to hold the SW/FW lock around all writes to + * AUTOC. Likewise after a write we need to do a pipeline reset. + */ +s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) +{ + s32 ret_val = IXGBE_SUCCESS; + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + + /* We only need to get the lock if: + * - We didn't do it already (in the read part of a read-modify-write) + * - LESM is enabled. + */ + if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val != IXGBE_SUCCESS) + return IXGBE_ERR_SWFW_SYNC; + + locked = true; + } + + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + ret_val = ixgbe_reset_pipeline_82599(hw); + +out: + /* Free the SW/FW semaphore as we either grabbed it here or + * already had it when this function was called. + */ + if (locked) + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + + return ret_val; +} + +/** + * ixgbe_init_ops_82599 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for 82599. + * Does not touch the hardware. + **/ + +s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_82599"); + + ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.identify = ixgbe_identify_phy_82599; + phy->ops.init = ixgbe_init_phy_ops_82599; + + /* MAC */ + mac->ops.reset_hw = ixgbe_reset_hw_82599; + mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2; + mac->ops.get_media_type = ixgbe_get_media_type_82599; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_82599; + mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; + mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599; + mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599; + mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599; + mac->ops.start_hw = ixgbe_start_hw_82599; + mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; + mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; + mac->ops.get_device_caps = ixgbe_get_device_caps_generic; + mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; + mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; + mac->ops.prot_autoc_read = prot_autoc_read_82599; + mac->ops.prot_autoc_write = prot_autoc_write_82599; + + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = ixgbe_set_vmdq_generic; + mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; + mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; + mac->rar_highwater = 1; + mac->ops.set_vfta = ixgbe_set_vfta_generic; + mac->ops.set_vlvf = ixgbe_set_vlvf_generic; + mac->ops.clear_vfta = ixgbe_clear_vfta_generic; + mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; + mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599; + mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; + + /* Link */ + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599; + mac->ops.check_link = ixgbe_check_mac_link_generic; + mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; + ixgbe_init_mac_link_ops_82599(hw); + + mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; + mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) + & IXGBE_FWSM_MODE_MASK); + + hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; + + /* EEPROM */ + eeprom->ops.read = ixgbe_read_eeprom_82599; + eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; + + mac->ops.get_thermal_sensor_data = + ixgbe_get_thermal_sensor_data_generic; + mac->ops.init_thermal_sensor_thresh = + ixgbe_init_thermal_sensor_thresh_generic; + + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; + + return ret_val; +} + +/** + * ixgbe_get_link_capabilities_82599 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status = IXGBE_SUCCESS; + u32 autoc = 0; + + DEBUGFUNC("ixgbe_get_link_capabilities_82599"); + + + /* Check if 1G SFP module. */ + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + goto out; + } + + /* + * Determine link capabilities based on the stored value of AUTOC, + * which represents EEPROM defaults. If AUTOC value has not + * been stored, use the current register values. + */ + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; + else + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_1G_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_10G_SERIAL: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_KX4_KX_KR: + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (autoc & IXGBE_AUTOC_KR_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (autoc & IXGBE_AUTOC_KR_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_SGMII_1G_100M: + *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; + *autoneg = false; + break; + + default: + status = IXGBE_ERR_LINK_SETUP; + goto out; + break; + } + + if (hw->phy.multispeed_fiber) { + *speed |= IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + + /* QSFP must not enable full auto-negotiation + * Limited autoneg is enabled at 1G + */ + if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) + *autoneg = false; + else + *autoneg = true; + } + +out: + return status; +} + +/** + * ixgbe_get_media_type_82599 - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + DEBUGFUNC("ixgbe_get_media_type_82599"); + + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: + case ixgbe_phy_tn: + media_type = ixgbe_media_type_copper; + goto out; + default: + break; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: + case IXGBE_DEV_ID_82599_XAUI_LOM: + /* Default device ID is mezzanine card KX/KX4 */ + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_82599_SFP: + case IXGBE_DEV_ID_82599_SFP_FCOE: + case IXGBE_DEV_ID_82599_SFP_EM: + case IXGBE_DEV_ID_82599_SFP_SF2: + case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599EN_SFP: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_82599_CX4: + media_type = ixgbe_media_type_cx4; + break; + case IXGBE_DEV_ID_82599_T3_LOM: + media_type = ixgbe_media_type_copper; + break; + case IXGBE_DEV_ID_82599_LS: + media_type = ixgbe_media_type_fiber_lco; + break; + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + media_type = ixgbe_media_type_fiber_qsfp; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } +out: + return media_type; +} + +/** + * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 + * @hw: pointer to hardware structure + * + * Disables link during D3 power down sequence. + * + **/ +void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) +{ + u32 autoc2_reg; + u16 ee_ctrl_2 = 0; + + DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599"); + ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); + + if (!ixgbe_mng_present(hw) && !hw->wol_enabled && + ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { + autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); + } +} + +/** + * ixgbe_start_mac_link_82599 - Setup MAC link settings + * @hw: pointer to hardware structure + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) +{ + u32 autoc_reg; + u32 links_reg; + u32 i; + s32 status = IXGBE_SUCCESS; + bool got_lock = false; + + DEBUGFUNC("ixgbe_start_mac_link_82599"); + + + /* reset_pipeline requires us to hold this lock as it writes to + * AUTOC. + */ + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + status = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (status != IXGBE_SUCCESS) + goto out; + + got_lock = true; + } + + /* Restart link */ + ixgbe_reset_pipeline_82599(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + links_reg = 0; /* Just in case Autoneg time = 0 */ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msec_delay(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + DEBUGOUT("Autoneg did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msec_delay(50); + +out: + return status; +} + +/** + * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively shutting down the Tx + * laser on the PHY, effectively halting physical link. + **/ +void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return; + + /* Disable Tx laser; allow 100us to go dark per spec */ + esdp_reg |= IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + usec_delay(100); +} + +/** + * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively turning on the Tx + * laser on the PHY, effectively starting physical link. + **/ +void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* Enable Tx laser; allow 100ms to light up */ + esdp_reg &= ~IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + msec_delay(100); +} + +/** + * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support, + * it sets autotry_restart to true to indicate that we need to + * initiate a new autotry session with the link partner. To do + * so, we set the speed then disable and re-enable the Tx laser, to + * alert the link partner that it also needs to restart autotry on its + * end. This is consistent with true clause 37 autoneg, which also + * involves a loss of signal. + **/ +void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return; + + if (hw->mac.autotry_restart) { + ixgbe_disable_tx_laser_multispeed_fiber(hw); + ixgbe_enable_tx_laser_multispeed_fiber(hw); + hw->mac.autotry_restart = false; + } +} + +/** + * ixgbe_set_hard_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via RS0/RS1 rate select pins. + */ +void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); + break; + case IXGBE_LINK_SPEED_1GB_FULL: + esdp_reg &= ~IXGBE_ESDP_SDP5; + esdp_reg |= IXGBE_ESDP_SDP5_DIR; + break; + default: + DEBUGOUT("Invalid fixed module speed\n"); + return; + } + + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Implements the Intel SmartSpeed algorithm. + **/ +s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status = IXGBE_SUCCESS; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + s32 i, j; + bool link_up = false; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); + + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + + /* + * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the + * autoneg advertisement if link is unable to be established at the + * highest negotiated rate. This can sometimes happen due to integrity + * issues with the physical media connection. + */ + + /* First, try to get link with full advertisement */ + hw->phy.smart_speed_active = false; + for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { + status = ixgbe_setup_mac_link_82599(hw, speed, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + goto out; + + /* + * Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per + * Table 9 in the AN MAS. + */ + for (i = 0; i < 5; i++) { + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, + false); + if (status != IXGBE_SUCCESS) + goto out; + + if (link_up) + goto out; + } + } + + /* + * We didn't get link. If we advertised KR plus one of KX4/KX + * (or BX4/BX), then disable KR and try again. + */ + if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || + ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) + goto out; + + /* Turn SmartSpeed on to disable KR support */ + hw->phy.smart_speed_active = true; + status = ixgbe_setup_mac_link_82599(hw, speed, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + goto out; + + /* + * Wait for the controller to acquire link. 600ms will allow for + * the AN link_fail_inhibit_timer as well for multiple cycles of + * parallel detect, both 10g and 1g. This allows for the maximum + * connect attempts as defined in the AN MAS table 73-7. + */ + for (i = 0; i < 6; i++) { + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) + goto out; + + if (link_up) + goto out; + } + + /* We didn't get link. Turn SmartSpeed back off. */ + hw->phy.smart_speed_active = false; + status = ixgbe_setup_mac_link_82599(hw, speed, + autoneg_wait_to_complete); + +out: + if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) + DEBUGOUT("Smartspeed has downgraded the link speed " + "from the maximum advertised\n"); + return status; +} + +/** + * ixgbe_setup_mac_link_82599 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + bool autoneg = false; + s32 status = IXGBE_SUCCESS; + u32 pma_pmd_1g, link_mode; + u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */ + u32 orig_autoc = 0; /* holds the cached value of AUTOC register */ + u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */ + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + u32 links_reg; + u32 i; + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + + DEBUGFUNC("ixgbe_setup_mac_link_82599"); + + /* Check to see if speed passed in is supported. */ + status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); + if (status) + goto out; + + speed &= link_capabilities; + + if (speed == IXGBE_LINK_SPEED_UNKNOWN) { + status = IXGBE_ERR_LINK_SETUP; + goto out; + } + + /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ + if (hw->mac.orig_link_settings_stored) + orig_autoc = hw->mac.orig_autoc; + else + orig_autoc = autoc; + + link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + /* Set KX4/KX/KR support according to speed requested */ + autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) + autoc |= IXGBE_AUTOC_KX4_SUPP; + if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && + (hw->phy.smart_speed_active == false)) + autoc |= IXGBE_AUTOC_KR_SUPP; + } + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + autoc |= IXGBE_AUTOC_KX_SUPP; + } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && + (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || + link_mode == IXGBE_AUTOC_LMS_1G_AN)) { + /* Switch from 1G SFI to 10G SFI if requested */ + if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && + (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { + autoc &= ~IXGBE_AUTOC_LMS_MASK; + autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; + } + } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && + (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { + /* Switch from 10G SFI to 1G SFI if requested */ + if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && + (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { + autoc &= ~IXGBE_AUTOC_LMS_MASK; + if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel) + autoc |= IXGBE_AUTOC_LMS_1G_AN; + else + autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; + } + } + + if (autoc != current_autoc) { + /* Restart link */ + status = hw->mac.ops.prot_autoc_write(hw, autoc, false); + if (status != IXGBE_SUCCESS) + goto out; + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + links_reg = 0; /*Just in case Autoneg time=0*/ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = + IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msec_delay(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = + IXGBE_ERR_AUTONEG_NOT_COMPLETE; + DEBUGOUT("Autoneg did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msec_delay(50); + } + +out: + return status; +} + +/** + * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Restarts link on PHY and MAC based on settings passed in. + **/ +STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status; + + DEBUGFUNC("ixgbe_setup_copper_link_82599"); + + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_link_speed(hw, speed, + autoneg_wait_to_complete); + /* Set up MAC */ + ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); + + return status; +} + +/** + * ixgbe_reset_hw_82599 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status; + u32 ctrl = 0; + u32 i, autoc, autoc2; + u32 curr_lms; + bool link_up = false; + + DEBUGFUNC("ixgbe_reset_hw_82599"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != IXGBE_SUCCESS) + goto reset_hw_out; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + /* PHY ops must be identified and initialized prior to reset */ + + /* Identify PHY and related function pointers */ + status = hw->phy.ops.init(hw); + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = false; + } + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + + /* Reset PHY */ + if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) + hw->phy.ops.reset(hw); + + /* remember AUTOC from before we reset */ + curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK; + +mac_reset_top: + /* + * Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + ctrl = IXGBE_CTRL_LNK_RST; + if (!hw->force_full_reset) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) + ctrl = IXGBE_CTRL_RST; + } + + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear meaning reset is complete */ + for (i = 0; i < 10; i++) { + usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + DEBUGOUT("Reset polling failed to complete.\n"); + } + + msec_delay(50); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to + * allow time for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* + * Store the original AUTOC/AUTOC2 values if they have not been + * stored off yet. Otherwise restore the stored original + * values since the reset operation sets back to defaults. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + + /* Enable link if disabled in NVM */ + if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) { + autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); + IXGBE_WRITE_FLUSH(hw); + } + + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_autoc = autoc; + hw->mac.orig_autoc2 = autoc2; + hw->mac.orig_link_settings_stored = true; + } else { + + /* If MNG FW is running on a multi-speed device that + * doesn't autoneg with out driver support we need to + * leave LMS in the state it was before we MAC reset. + * Likewise if we support WoL we don't want change the + * LMS state. + */ + if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || + hw->wol_enabled) + hw->mac.orig_autoc = + (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | + curr_lms; + + if (autoc != hw->mac.orig_autoc) { + status = hw->mac.ops.prot_autoc_write(hw, + hw->mac.orig_autoc, + false); + if (status != IXGBE_SUCCESS) + goto reset_hw_out; + } + + if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != + (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { + autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; + autoc2 |= (hw->mac.orig_autoc2 & + IXGBE_AUTOC2_UPPER_MASK); + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); + } + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + +reset_hw_out: + return status; +} + +/** + * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete + * @hw: pointer to hardware structure + * @fdircmd: current value of FDIRCMD register + */ +STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) +{ + int i; + + for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { + *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); + if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) + return IXGBE_SUCCESS; + usec_delay(10); + } + + return IXGBE_ERR_FDIR_CMD_INCOMPLETE; +} + +/** + * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) +{ + s32 err; + int i; + u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + u32 fdircmd; + fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; + + DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); + + /* + * Before starting reinitialization process, + * FDIRCMD.CMD must be zero. + */ + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n"); + return err; + } + + IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); + IXGBE_WRITE_FLUSH(hw); + /* + * 82599 adapters flow director init flow cannot be restarted, + * Workaround 82599 silicon errata by performing the following steps + * before re-writing the FDIRCTRL control register with the same value. + * - write 1 to bit 8 of FDIRCMD register & + * - write 0 to bit 8 of FDIRCMD register + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | + IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + ~IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + /* + * Clear FDIR Hash register to clear any leftover hashes + * waiting to be programmed. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll init-done after we write FDIRCTRL register */ + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + msec_delay(1); + } + if (i >= IXGBE_FDIR_INIT_DONE_POLL) { + DEBUGOUT("Flow Director Signature poll time exceeded!\n"); + return IXGBE_ERR_FDIR_REINIT_FAILED; + } + + /* Clear FDIR statistics registers (read to clear) */ + IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); + IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); + IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + IXGBE_READ_REG(hw, IXGBE_FDIRLEN); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +STATIC void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + int i; + + DEBUGFUNC("ixgbe_fdir_enable_82599"); + + /* Prime the keys for hashing */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); + + /* + * Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + msec_delay(1); + } + + if (i >= IXGBE_FDIR_INIT_DONE_POLL) + DEBUGOUT("Flow Director poll time exceeded!\n"); +} + +/** + * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + **/ +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + DEBUGFUNC("ixgbe_init_fdir_signature_82599"); + + /* + * Continue setup of fdirctrl register bits: + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + * @cloud_mode: true - cloud mode, false - other mode + **/ +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, + bool cloud_mode) +{ + DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); + + /* + * Continue setup of fdirctrl register bits: + * Turn perfect match filtering on + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue to queue 127 + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left + */ + fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | + IXGBE_FDIRCTRL_REPORT_STATUS | + (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | + (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + if (cloud_mode) + fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD << + IXGBE_FDIRCTRL_FILTERMODE_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue + * @hw: pointer to hardware structure + * @dropqueue: Rx queue index used for the dropped packets + **/ +void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue) +{ + u32 fdirctrl; + + DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599"); + /* Clear init done bit and drop queue field */ + fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE); + + /* Set drop queue */ + fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); + if ((hw->mac.type == ixgbe_mac_X550) || + (hw->mac.type == ixgbe_mac_X550EM_x) || + (hw->mac.type == ixgbe_mac_X550EM_a)) + fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | + IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + ~IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); +} + +/* + * These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define IXGBE_ATR_COMMON_HASH_KEY \ + (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) +#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0) + +/** + * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash + * @stream: input bitstream to compute the hash on + * + * This function is almost identical to the function above but contains + * several optimizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. + **/ +u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = IXGBE_NTOHL(input.dword); + + /* generate common hash dword */ + hi_hash_dword = IXGBE_NTOHL(common.dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + IXGBE_COMPUTE_SIG_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + IXGBE_COMPUTE_SIG_HASH_ITERATION(1); + IXGBE_COMPUTE_SIG_HASH_ITERATION(2); + IXGBE_COMPUTE_SIG_HASH_ITERATION(3); + IXGBE_COMPUTE_SIG_HASH_ITERATION(4); + IXGBE_COMPUTE_SIG_HASH_ITERATION(5); + IXGBE_COMPUTE_SIG_HASH_ITERATION(6); + IXGBE_COMPUTE_SIG_HASH_ITERATION(7); + IXGBE_COMPUTE_SIG_HASH_ITERATION(8); + IXGBE_COMPUTE_SIG_HASH_ITERATION(9); + IXGBE_COMPUTE_SIG_HASH_ITERATION(10); + IXGBE_COMPUTE_SIG_HASH_ITERATION(11); + IXGBE_COMPUTE_SIG_HASH_ITERATION(12); + IXGBE_COMPUTE_SIG_HASH_ITERATION(13); + IXGBE_COMPUTE_SIG_HASH_ITERATION(14); + IXGBE_COMPUTE_SIG_HASH_ITERATION(15); + + /* combine common_hash result with signature and bucket hashes */ + bucket_hash ^= common_hash; + bucket_hash &= IXGBE_ATR_HASH_MASK; + + sig_hash ^= common_hash << 16; + sig_hash &= IXGBE_ATR_HASH_MASK << 16; + + /* return completed signature hash */ + return sig_hash ^ bucket_hash; +} + +/** + * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter + * @hw: pointer to hardware structure + * @input: unique input dword + * @common: compressed common input dword + * @queue: queue index to direct traffic to + * + * Note that the tunnel bit in input must not be set when the hardware + * tunneling support does not exist. + **/ +void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue) +{ + u64 fdirhashcmd; + u8 flow_type; + bool tunnel; + u32 fdircmd; + + DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); + + /* + * Get the flow_type in order to program FDIRCMD properly + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + * fifth is FDIRCMD.TUNNEL_FILTER + */ + tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); + flow_type = input.formatted.flow_type & + (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1); + switch (flow_type) { + case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_UDPV4: + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + case IXGBE_ATR_FLOW_TYPE_TCPV6: + case IXGBE_ATR_FLOW_TYPE_UDPV6: + case IXGBE_ATR_FLOW_TYPE_SCTPV6: + break; + default: + DEBUGOUT(" Error on flow type input\n"); + return; + } + + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + if (tunnel) + fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; + + /* + * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits + * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. + */ + fdirhashcmd = (u64)fdircmd << 32; + fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); + IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); + + DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + + return; +} + +#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0) + +/** + * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash + * @atr_input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * + * This function serves two main purposes. First it applies the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for + * future use without needing to recompute the hash. + **/ +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *input_mask) +{ + + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 bucket_hash = 0; + u32 hi_dword = 0; + u32 i = 0; + + /* Apply masks to input data */ + for (i = 0; i < 14; i++) + input->dword_stream[i] &= input_mask->dword_stream[i]; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); + + /* generate common hash dword */ + for (i = 1; i <= 13; i++) + hi_dword ^= input->dword_stream[i]; + hi_hash_dword = IXGBE_NTOHL(hi_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + IXGBE_COMPUTE_BKT_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + for (i = 1; i <= 15; i++) + IXGBE_COMPUTE_BKT_HASH_ITERATION(i); + + /* + * Limit hash to 13 bits since max bucket count is 8K. + * Store result at the end of the input stream. + */ + input->formatted.bkt_hash = bucket_hash & 0x1FFF; +} + +/** + * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks + * @input_mask: mask to be bit swapped + * + * The source and destination port masks for flow director are bit swapped + * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to + * generate a correctly swapped value we need to bit swap the mask and that + * is what is accomplished by this function. + **/ +STATIC u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) +{ + u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port); + mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; + mask |= IXGBE_NTOHS(input_mask->formatted.src_port); + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); + return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); +} + +/* + * These two macros are meant to address the fact that we have registers + * that are either all or in part big-endian. As a result on big-endian + * systems we will end up byte swapping the value to little-endian before + * it is byte swapped again and written to the hardware in the original + * big-endian format. + */ +#define IXGBE_STORE_AS_BE32(_value) \ + (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + +#define IXGBE_WRITE_REG_BE32(a, reg, value) \ + IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) + +#define IXGBE_STORE_AS_BE16(_value) \ + IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask, bool cloud_mode) +{ + /* mask IPv6 since it is currently not supported */ + u32 fdirm = IXGBE_FDIRM_DIPv6; + u32 fdirtcpm; + u32 fdirip6m; + DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); + + /* + * Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + * + * This also assumes IPv4 only. IPv6 masking isn't supported at this + * point in time. + */ + + /* verify bucket hash is cleared on hash generation */ + if (input_mask->formatted.bkt_hash) + DEBUGOUT(" bucket hash should always be 0 in mask\n"); + + /* Program FDIRM and verify partial masks */ + switch (input_mask->formatted.vm_pool & 0x7F) { + case 0x0: + fdirm |= IXGBE_FDIRM_POOL; + case 0x7F: + break; + default: + DEBUGOUT(" Error on vm pool mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { + case 0x0: + fdirm |= IXGBE_FDIRM_L4P; + if (input_mask->formatted.dst_port || + input_mask->formatted.src_port) { + DEBUGOUT(" Error on src/dst port mask\n"); + return IXGBE_ERR_CONFIG; + } + case IXGBE_ATR_L4TYPE_MASK: + break; + default: + DEBUGOUT(" Error on flow type mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) { + case 0x0000: + /* mask VLAN ID, fall through to mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANID; + case 0x0FFF: + /* mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANP; + break; + case 0xE000: + /* mask VLAN ID only, fall through */ + fdirm |= IXGBE_FDIRM_VLANID; + case 0xEFFF: + /* no VLAN fields masked */ + break; + default: + DEBUGOUT(" Error on VLAN mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: + /* Mask Flex Bytes, fall through */ + fdirm |= IXGBE_FDIRM_FLEX; + case 0xFFFF: + break; + default: + DEBUGOUT(" Error on flexible byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + if (cloud_mode) { + fdirm |= IXGBE_FDIRM_L3P; + fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); + fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; + + switch (input_mask->formatted.inner_mac[0] & 0xFF) { + case 0x00: + /* Mask inner MAC, fall through */ + fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC; + case 0xFF: + break; + default: + DEBUGOUT(" Error on inner_mac byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) { + case 0x0: + /* Mask vxlan id */ + fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI; + break; + case 0x00FFFFFF: + fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24; + break; + case 0xFFFFFFFF: + break; + default: + DEBUGOUT(" Error on TNI/VNI byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.tunnel_type & 0xFFFF) { + case 0x0: + /* Mask turnnel type, fall through */ + fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; + case 0xFFFF: + break; + default: + DEBUGOUT(" Error on tunnel type byte mask\n"); + return IXGBE_ERR_CONFIG; + } + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m); + + /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSIP4M and + * FDIRDIP4M in cloud mode to allow L3/L3 packets to + * tunnel. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); + } + + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + + if (!cloud_mode) { + /* store the TCP/UDP port masks, bit reversed from port + * layout */ + fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); + + /* write both the same so that UDP and TCP use the same mask */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + /* also use it for SCTP */ + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); + break; + default: + break; + } + + /* store source and destination IP masks (big-enian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, + ~input_mask->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, + ~input_mask->formatted.dst_ip[0]); + } + return IXGBE_SUCCESS; +} + +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue, bool cloud_mode) +{ + u32 fdirport, fdirvlan, fdirhash, fdircmd; + u32 addr_low, addr_high; + u32 cloud_type = 0; + s32 err; + + DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); + if (!cloud_mode) { + /* currently IPv6 is not supported, must be programmed with 0 */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), + input->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), + input->formatted.src_ip[1]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), + input->formatted.src_ip[2]); + + /* record the source address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, + input->formatted.src_ip[0]); + + /* record the first 32 bits of the destination address + * (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, + input->formatted.dst_ip[0]); + + /* record source and destination port (little-endian)*/ + fdirport = IXGBE_NTOHS(input->formatted.dst_port); + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; + fdirport |= IXGBE_NTOHS(input->formatted.src_port); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + } + + /* record VLAN (little-endian) and flex_bytes(big-endian) */ + fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); + fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; + fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + + if (cloud_mode) { + if (input->formatted.tunnel_type != 0) + cloud_type = 0x80000000; + + addr_low = ((u32)input->formatted.inner_mac[0] | + ((u32)input->formatted.inner_mac[1] << 8) | + ((u32)input->formatted.inner_mac[2] << 16) | + ((u32)input->formatted.inner_mac[3] << 24)); + addr_high = ((u32)input->formatted.inner_mac[4] | + ((u32)input->formatted.inner_mac[5] << 8)); + cloud_type |= addr_high; + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni); + } + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* + * flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + IXGBE_WRITE_FLUSH(hw); + + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + if (queue == IXGBE_FDIR_DROP_QUEUE) + fdircmd |= IXGBE_FDIRCMD_DROP; + if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK) + fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director command did not complete!\n"); + return err; + } + + return IXGBE_SUCCESS; +} + +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id) +{ + u32 fdirhash; + u32 fdircmd; + s32 err; + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* flush hash to HW */ + IXGBE_WRITE_FLUSH(hw); + + /* Query if filter is present */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); + + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director command did not complete!\n"); + return err; + } + + /* if filter exists in hardware then remove it */ + if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + IXGBE_FDIRCMD_CMD_REMOVE_FLOW); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter + * @hw: pointer to hardware structure + * @input: input bitstream + * @input_mask: mask for the input bitstream + * @soft_id: software index for the filters + * @queue: queue index to direct traffic to + * + * Note that the caller to this function must lock before calling, since the + * hardware writes must be protected from one another. + **/ +s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + union ixgbe_atr_input *input_mask, + u16 soft_id, u8 queue, bool cloud_mode) +{ + s32 err = IXGBE_ERR_CONFIG; + + DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); + + /* + * Check flow_type formatting, and bail out before we touch the hardware + * if there's a configuration issue + */ + switch (input->formatted.flow_type) { + case IXGBE_ATR_FLOW_TYPE_IPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4: + input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; + if (input->formatted.dst_port || input->formatted.src_port) { + DEBUGOUT(" Error on src/dst port\n"); + return IXGBE_ERR_CONFIG; + } + break; + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4: + if (input->formatted.dst_port || input->formatted.src_port) { + DEBUGOUT(" Error on src/dst port\n"); + return IXGBE_ERR_CONFIG; + } + case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4: + case IXGBE_ATR_FLOW_TYPE_UDPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4: + input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + break; + default: + DEBUGOUT(" Error on flow type input\n"); + return err; + } + + /* program input mask into the HW */ + err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode); + if (err) + return err; + + /* apply mask and compute/store hash */ + ixgbe_atr_compute_perfect_hash_82599(input, input_mask); + + /* program filters to filter memory */ + return ixgbe_fdir_write_perfect_filter_82599(hw, input, + soft_id, queue, cloud_mode); +} + +/** + * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Omer analog register specified. + **/ +s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 core_ctl; + + DEBUGFUNC("ixgbe_read_analog_reg8_82599"); + + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | + (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + usec_delay(10); + core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); + *val = (u8)core_ctl; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Omer analog register specified. + **/ +s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 core_ctl; + + DEBUGFUNC("ixgbe_write_analog_reg8_82599"); + + core_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); + IXGBE_WRITE_FLUSH(hw); + usec_delay(10); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_82599"); + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val != IXGBE_SUCCESS) + goto out; + + ret_val = ixgbe_start_hw_gen2(hw); + if (ret_val != IXGBE_SUCCESS) + goto out; + + /* We need to run link autotry after the driver loads */ + hw->mac.autotry_restart = true; + + if (ret_val == IXGBE_SUCCESS) + ret_val = ixgbe_verify_fw_version_82599(hw); +out: + return ret_val; +} + +/** + * ixgbe_identify_phy_82599 - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + * If PHY already detected, maintains current PHY type in hw struct, + * otherwise executes the PHY detection routine. + **/ +s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) +{ + s32 status; + + DEBUGFUNC("ixgbe_identify_phy_82599"); + + /* Detect PHY if not unknown - returns success if already detected. */ + status = ixgbe_identify_phy_generic(hw); + if (status != IXGBE_SUCCESS) { + /* 82599 10GBASE-T requires an external PHY */ + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) + return status; + else + status = ixgbe_identify_module_generic(hw); + } + + /* Set PHY type none if no PHY detected */ + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.type = ixgbe_phy_none; + return IXGBE_SUCCESS; + } + + /* Return error if SFP module has been detected but is not supported */ + if (hw->phy.type == ixgbe_phy_sfp_unsupported) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + return status; +} + +/** + * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) +{ + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); + + hw->phy.ops.identify(hw); + + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_cu_unknown: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + default: + break; + } + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_AN: + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | + IXGBE_PHYSICAL_LAYER_1000BASE_BX; + goto out; + } else + /* SFI mode so read SFP module */ + goto sfp_check; + break; + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; + goto out; + break; + case IXGBE_AUTOC_LMS_10G_SERIAL: + if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; + goto out; + } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) + goto sfp_check; + break; + case IXGBE_AUTOC_LMS_KX4_KX_KR: + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: + if (autoc & IXGBE_AUTOC_KX_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + if (autoc & IXGBE_AUTOC_KR_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; + goto out; + break; + default: + goto out; + break; + } + +sfp_check: + /* SFP check must be done last since DA modules are sometimes used to + * test KR mode - we need to id KR mode correctly before SFP module. + * Call identify_sfp because the pluggable module may have changed */ + physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); +out: + return physical_layer; +} + +/** + * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit for 82599 + **/ +s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) +{ + + DEBUGFUNC("ixgbe_enable_rx_dma_82599"); + + /* + * Workaround for 82599 silicon errata when enabling the Rx datapath. + * If traffic is incoming before we enable the Rx unit, it could hang + * the Rx DMA unit. Therefore, make sure the security engine is + * completely disabled prior to enabling the Rx unit. + */ + + hw->mac.ops.disable_sec_rx_path(hw); + + if (regval & IXGBE_RXCTRL_RXEN) + ixgbe_enable_rx(hw); + else + ixgbe_disable_rx(hw); + + hw->mac.ops.enable_sec_rx_path(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_verify_fw_version_82599 - verify FW version for 82599 + * @hw: pointer to hardware structure + * + * Verifies that installed the firmware version is 0.6 or higher + * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. + * + * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or + * if the FW version is not supported. + **/ +STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM_VERSION; + u16 fw_offset, fw_ptp_cfg_offset; + u16 fw_version; + + DEBUGFUNC("ixgbe_verify_fw_version_82599"); + + /* firmware check is only necessary for SFI devices */ + if (hw->phy.media_type != ixgbe_media_type_fiber) { + status = IXGBE_SUCCESS; + goto fw_version_out; + } + + /* get the offset to the Firmware Module block */ + if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", IXGBE_FW_PTR); + return IXGBE_ERR_EEPROM_VERSION; + } + + if ((fw_offset == 0) || (fw_offset == 0xFFFF)) + goto fw_version_out; + + /* get the offset to the Pass Through Patch Configuration block */ + if (hw->eeprom.ops.read(hw, (fw_offset + + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), + &fw_ptp_cfg_offset)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + fw_offset + + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR); + return IXGBE_ERR_EEPROM_VERSION; + } + + if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) + goto fw_version_out; + + /* get the firmware version */ + if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + + IXGBE_FW_PATCH_VERSION_4), &fw_version)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4); + return IXGBE_ERR_EEPROM_VERSION; + } + + if (fw_version > 0x5) + status = IXGBE_SUCCESS; + +fw_version_out: + return status; +} + +/** + * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. + * @hw: pointer to hardware structure + * + * Returns true if the LESM FW module is present and enabled. Otherwise + * returns false. Smart Speed must be disabled if LESM FW module is enabled. + **/ +bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) +{ + bool lesm_enabled = false; + u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; + s32 status; + + DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); + + /* get the offset to the Firmware Module block */ + status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); + + if ((status != IXGBE_SUCCESS) || + (fw_offset == 0) || (fw_offset == 0xFFFF)) + goto out; + + /* get the offset to the LESM Parameters block */ + status = hw->eeprom.ops.read(hw, (fw_offset + + IXGBE_FW_LESM_PARAMETERS_PTR), + &fw_lesm_param_offset); + + if ((status != IXGBE_SUCCESS) || + (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) + goto out; + + /* get the LESM state word */ + status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + + IXGBE_FW_LESM_STATE_1), + &fw_lesm_state); + + if ((status == IXGBE_SUCCESS) && + (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) + lesm_enabled = true; + +out: + return lesm_enabled; +} + +/** + * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of word in EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Retrieves 16 bit word(s) read from EEPROM + **/ +STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val = IXGBE_ERR_CONFIG; + + DEBUGFUNC("ixgbe_read_eeprom_buffer_82599"); + + /* + * If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang + */ + if ((eeprom->type == ixgbe_eeprom_spi) && + (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) + ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, + data); + else + ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, + words, + data); + + return ret_val; +} + +/** + * ixgbe_read_eeprom_82599 - Read EEPROM word using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM + **/ +STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, + u16 offset, u16 *data) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val = IXGBE_ERR_CONFIG; + + DEBUGFUNC("ixgbe_read_eeprom_82599"); + + /* + * If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang + */ + if ((eeprom->type == ixgbe_eeprom_spi) && + (offset <= IXGBE_EERD_MAX_ADDR)) + ret_val = ixgbe_read_eerd_generic(hw, offset, data); + else + ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); + + return ret_val; +} + +/** + * ixgbe_reset_pipeline_82599 - perform pipeline reset + * + * @hw: pointer to hardware structure + * + * Reset pipeline by asserting Restart_AN together with LMS change to ensure + * full pipeline reset. This function assumes the SW/FW lock is held. + **/ +s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) +{ + s32 ret_val; + u32 anlp1_reg = 0; + u32 i, autoc_reg, autoc2_reg; + + /* Enable link if disabled in NVM */ + autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) { + autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); + IXGBE_WRITE_FLUSH(hw); + } + + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, + autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); + /* Wait for AN to leave state 0 */ + for (i = 0; i < 10; i++) { + msec_delay(4); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) + break; + } + + if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { + DEBUGOUT("auto negotiation not completed\n"); + ret_val = IXGBE_ERR_RESET_FAILED; + goto reset_pipeline_out; + } + + ret_val = IXGBE_SUCCESS; + +reset_pipeline_out: + /* Write AUTOC register with original LMS field and Restart_AN */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + IXGBE_WRITE_FLUSH(hw); + + return ret_val; +} + +/** + * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + u32 esdp; + s32 status; + s32 timeout = 200; + + DEBUGFUNC("ixgbe_read_i2c_byte_82599"); + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + while (timeout) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + break; + + msec_delay(5); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Driver can't access resource," + " acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } + } + + status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); + +release_i2c_access: + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + } + + return status; +} + +/** + * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + u32 esdp; + s32 status; + s32 timeout = 200; + + DEBUGFUNC("ixgbe_write_i2c_byte_82599"); + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + while (timeout) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + break; + + msec_delay(5); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Driver can't access resource," + " acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } + } + + status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); + +release_i2c_access: + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + } + + return status; +} diff --git a/drivers/net/ixgbe/base/ixgbe_82599.h b/drivers/net/ixgbe/base/ixgbe_82599.h new file mode 100644 index 00000000..c034d3d9 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_82599.h @@ -0,0 +1,64 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _IXGBE_82599_H_ +#define _IXGBE_82599_H_ + +s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *autoneg); +enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); +void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed); +s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); +void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw); +s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw); +s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); +u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); +s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val); +s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked); +#endif /* _IXGBE_82599_H_ */ diff --git a/drivers/net/ixgbe/base/ixgbe_api.c b/drivers/net/ixgbe/base/ixgbe_api.c new file mode 100644 index 00000000..cf1e5169 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_api.c @@ -0,0 +1,1664 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "ixgbe_api.h" +#include "ixgbe_common.h" + +#define IXGBE_EMPTY_PARAM + +static const u32 ixgbe_mvals_base[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(IXGBE_EMPTY_PARAM) +}; + +static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X540) +}; + +static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X550) +}; + +static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X550EM_x) +}; + +static const u32 ixgbe_mvals_X550EM_a[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X550EM_a) +}; + +/** + * ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg + * @hw: pointer to hardware structure + * @map: pointer to u8 arr for returning map + * + * Read the rtrup2tc HW register and resolve its content into map + **/ +void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map) +{ + if (hw->mac.ops.get_rtrup2tc) + hw->mac.ops.get_rtrup2tc(hw, map); +} + +/** + * ixgbe_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This will assign function pointers and assign the MAC type and PHY code. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The ixgbe_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +s32 ixgbe_init_shared_code(struct ixgbe_hw *hw) +{ + s32 status; + + DEBUGFUNC("ixgbe_init_shared_code"); + + /* + * Set the mac type + */ + ixgbe_set_mac_type(hw); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + status = ixgbe_init_ops_82598(hw); + break; + case ixgbe_mac_82599EB: + status = ixgbe_init_ops_82599(hw); + break; + case ixgbe_mac_X540: + status = ixgbe_init_ops_X540(hw); + break; + case ixgbe_mac_X550: + status = ixgbe_init_ops_X550(hw); + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + status = ixgbe_init_ops_X550EM(hw); + break; + case ixgbe_mac_82599_vf: + case ixgbe_mac_X540_vf: + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + status = ixgbe_init_ops_vf(hw); + break; + default: + status = IXGBE_ERR_DEVICE_NOT_SUPPORTED; + break; + } + hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME; + + return status; +} + +/** + * ixgbe_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_mac_type\n"); + + if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) { + ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, + "Unsupported vendor id: %x", hw->vendor_id); + return IXGBE_ERR_DEVICE_NOT_SUPPORTED; + } + + hw->mvals = ixgbe_mvals_base; + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AT: + case IXGBE_DEV_ID_82598AT2: + case IXGBE_DEV_ID_82598EB_CX4: + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + case IXGBE_DEV_ID_82598EB_XF_LR: + case IXGBE_DEV_ID_82598EB_SFP_LOM: + hw->mac.type = ixgbe_mac_82598EB; + break; + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_82599_XAUI_LOM: + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_82599_SFP: + case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: + case IXGBE_DEV_ID_82599_SFP_FCOE: + case IXGBE_DEV_ID_82599_SFP_EM: + case IXGBE_DEV_ID_82599_SFP_SF2: + case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + case IXGBE_DEV_ID_82599EN_SFP: + case IXGBE_DEV_ID_82599_CX4: + case IXGBE_DEV_ID_82599_LS: + case IXGBE_DEV_ID_82599_T3_LOM: + hw->mac.type = ixgbe_mac_82599EB; + break; + case IXGBE_DEV_ID_82599_VF: + case IXGBE_DEV_ID_82599_VF_HV: + hw->mac.type = ixgbe_mac_82599_vf; + break; + case IXGBE_DEV_ID_X540_VF: + case IXGBE_DEV_ID_X540_VF_HV: + hw->mac.type = ixgbe_mac_X540_vf; + hw->mvals = ixgbe_mvals_X540; + break; + case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: + hw->mac.type = ixgbe_mac_X540; + hw->mvals = ixgbe_mvals_X540; + break; + case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550T1: + hw->mac.type = ixgbe_mac_X550; + hw->mvals = ixgbe_mvals_X550; + break; + case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_SFP: + hw->mac.type = ixgbe_mac_X550EM_x; + hw->mvals = ixgbe_mvals_X550EM_x; + break; + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_QSFP: + case IXGBE_DEV_ID_X550EM_A_QSFP_N: + case IXGBE_DEV_ID_X550EM_A_SFP: + hw->mac.type = ixgbe_mac_X550EM_a; + hw->mvals = ixgbe_mvals_X550EM_a; + break; + case IXGBE_DEV_ID_X550_VF: + case IXGBE_DEV_ID_X550_VF_HV: + hw->mac.type = ixgbe_mac_X550_vf; + hw->mvals = ixgbe_mvals_X550; + break; + case IXGBE_DEV_ID_X550EM_X_VF: + case IXGBE_DEV_ID_X550EM_X_VF_HV: + hw->mac.type = ixgbe_mac_X550EM_x_vf; + hw->mvals = ixgbe_mvals_X550EM_x; + break; + case IXGBE_DEV_ID_X550EM_A_VF: + case IXGBE_DEV_ID_X550EM_A_VF_HV: + hw->mac.type = ixgbe_mac_X550EM_a_vf; + hw->mvals = ixgbe_mvals_X550EM_a; + break; + default: + ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; + ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, + "Unsupported device id: %x", + hw->device_id); + break; + } + + DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, ret_val); + return ret_val; +} + +/** + * ixgbe_init_hw - Initialize the hardware + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting and then starting the hardware + **/ +s32 ixgbe_init_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_reset_hw - Performs a hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts, performs a PHY reset, and performs a MAC reset + **/ +s32 ixgbe_reset_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_start_hw - Prepares hardware for Rx/Tx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, + * clears all on chip counters, initializes receive address registers, + * multicast table, VLAN filter table, calls routine to setup link and + * flow control settings, and leaves transmit and receive units disabled + * and uninitialized. + **/ +s32 ixgbe_start_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_enable_relaxed_ordering - Enables tx relaxed ordering, + * which is disabled by default in ixgbe_start_hw(); + * + * @hw: pointer to hardware structure + * + * Enable relaxed ordering; + **/ +void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_relaxed_ordering) + hw->mac.ops.enable_relaxed_ordering(hw); +} + +/** + * ixgbe_clear_hw_cntrs - Clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_media_type - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw), + ixgbe_media_type_unknown); +} + +/** + * ixgbe_get_mac_addr - Get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from the first Receive Address Register + * (RAR0) A reset of the adapter must have been performed prior to calling + * this function in order for the MAC address to have been loaded from the + * EEPROM into RAR0 + **/ +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr, + (hw, mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_san_mac_addr - Get SAN MAC address + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + **/ +s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr, + (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_san_mac_addr - Write a SAN MAC address + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Writes A SAN MAC address to the EEPROM. + **/ +s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr, + (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_device_caps - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word for device capabilities + * + * Reads the extra device capabilities from the EEPROM + **/ +s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_device_caps, + (hw, device_caps), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix, + (hw, wwnn_prefix, wwpn_prefix), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM + * @hw: pointer to hardware structure + * @bs: the fcoe boot status + * + * This function will read the FCOE boot status from the iSCSI FCOE block + **/ +s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status, + (hw, bs), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_bus_info - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure + **/ +s32 ixgbe_get_bus_info(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_num_of_tx_queues - Get Tx queues + * @hw: pointer to hardware structure + * + * Returns the number of transmit queues for the given adapter. + **/ +u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw) +{ + return hw->mac.max_tx_queues; +} + +/** + * ixgbe_get_num_of_rx_queues - Get Rx queues + * @hw: pointer to hardware structure + * + * Returns the number of receive queues for the given adapter. + **/ +u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw) +{ + return hw->mac.max_rx_queues; +} + +/** + * ixgbe_stop_adapter - Disable Rx/Tx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_pba_string - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size) +{ + return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size); +} + +/** + * ixgbe_read_pba_num - Reads part number from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number from the EEPROM + * + * Reads the part number from the EEPROM. + **/ +s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num) +{ + return ixgbe_read_pba_num_generic(hw, pba_num); +} + +/** + * ixgbe_identify_phy - Get PHY type + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + **/ +s32 ixgbe_identify_phy(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + + if (hw->phy.type == ixgbe_phy_unknown) { + status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw), + IXGBE_NOT_IMPLEMENTED); + } + + return status; +} + +/** + * ixgbe_reset_phy - Perform a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + + if (hw->phy.type == ixgbe_phy_unknown) { + if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS) + status = IXGBE_ERR_PHY; + } + + if (status == IXGBE_SUCCESS) { + status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw), + IXGBE_NOT_IMPLEMENTED); + } + return status; +} + +/** + * ixgbe_get_phy_firmware_version - + * @hw: pointer to hardware structure + * @firmware_version: pointer to firmware version + **/ +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version) +{ + s32 status = IXGBE_SUCCESS; + + status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version, + (hw, firmware_version), + IXGBE_NOT_IMPLEMENTED); + return status; +} + +/** + * ixgbe_read_phy_reg - Read PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register + **/ +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) +{ + if (hw->phy.id == 0) + ixgbe_identify_phy(hw); + + return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_phy_reg - Write PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register + **/ +s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data) +{ + if (hw->phy.id == 0) + ixgbe_identify_phy(hw); + + return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_phy_link - Restart PHY autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_internal_phy - Configure integrated PHY + * @hw: pointer to hardware structure + * + * Reconfigure the integrated PHY in order to enable talk to the external PHY. + * Returns success if not implemented, since nothing needs to be done in this + * case. + */ +s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_internal_link, (hw), + IXGBE_SUCCESS); +} + +/** + * ixgbe_check_phy_link - Determine link and speed status + * @hw: pointer to hardware structure + * + * Reads a PHY register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed, + link_up), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_phy_link_speed - Set auto advertise + * @hw: pointer to hardware structure + * @speed: new link speed + * + * Sets the auto advertised capabilities + **/ +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed, + autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_phy_power - Control the phy power state + * @hw: pointer to hardware structure + * @on: true for on, false for off + */ +s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on) +{ + return ixgbe_call_func(hw, hw->phy.ops.set_phy_power, (hw, on), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_check_link - Get link and speed status + * @hw: pointer to hardware structure + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed, + link_up, link_up_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_tx_laser - Disable Tx laser + * @hw: pointer to hardware structure + * + * If the driver needs to disable the laser on SFI optics. + **/ +void ixgbe_disable_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); +} + +/** + * ixgbe_enable_tx_laser - Enable Tx laser + * @hw: pointer to hardware structure + * + * If the driver needs to enable the laser on SFI optics. + **/ +void ixgbe_enable_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); +} + +/** + * ixgbe_flap_tx_laser - flap Tx laser to start autotry process + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support then + * flap the tx laser to alert the link partner to start autotry + * process on its end. + **/ +void ixgbe_flap_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.flap_tx_laser) + hw->mac.ops.flap_tx_laser(hw); +} + +/** + * ixgbe_setup_link - Set link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * + * Configures link settings. Restarts the link. + * Performs autonegotiation if needed. + **/ +s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed, + autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_mac_link - Set link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * + * Configures link settings. Restarts the link. + * Performs autonegotiation if needed. + **/ +s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_mac_link, (hw, speed, + autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_link_capabilities - Returns link capabilities + * @hw: pointer to hardware structure + * + * Determines the link capabilities of the current configuration. + **/ +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *autoneg) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw, + speed, autoneg), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_led_on - Turn on LEDs + * @hw: pointer to hardware structure + * @index: led number to turn on + * + * Turns on the software controllable LEDs. + **/ +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_led_off - Turn off LEDs + * @hw: pointer to hardware structure + * @index: led number to turn off + * + * Turns off the software controllable LEDs. + **/ +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_blink_led_start - Blink LEDs + * @hw: pointer to hardware structure + * @index: led number to blink + * + * Blink LED based on index. + **/ +s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_blink_led_stop - Stop blinking LEDs + * @hw: pointer to hardware structure + * + * Stop blinking LED based on index. + **/ +s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_eeprom_params - Initialize EEPROM parameters + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw), + IXGBE_NOT_IMPLEMENTED); +} + + +/** + * ixgbe_write_eeprom - Write word to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word to be written to the EEPROM + * + * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not + * called after this function, the EEPROM will most likely contain an + * invalid checksum. + **/ +s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_eeprom_buffer - Write word(s) to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word(s) to be written to the EEPROM + * @words: number of words + * + * Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not + * called after this function, the EEPROM will most likely contain an + * invalid checksum. + **/ +s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words, + u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer, + (hw, offset, words, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_eeprom - Read word from EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit value from EEPROM + * + * Reads 16 bit value from EEPROM + **/ +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_eeprom_buffer - Read word(s) from EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit word(s) from EEPROM + * @words: number of words + * + * Reads 16 bit word(s) from EEPROM + **/ +s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer, + (hw, offset, words, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum + **/ +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum, + (hw, checksum_val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum + * @hw: pointer to hardware structure + **/ +s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_insert_mac_addr - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr, + (hw, addr, vmdq), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_rar - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq, + enable_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_rar - Clear Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vmdq - Associate a VMDq index with a receive address + * @hw: pointer to hardware structure + * @rar: receive address register index to associate with VMDq index + * @vmdq: VMDq set or pool index + **/ +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq), + IXGBE_NOT_IMPLEMENTED); + +} + +/** + * ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address + * @hw: pointer to hardware structure + * @vmdq: VMDq default pool index + **/ +s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac, + (hw, vmdq), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address + * @hw: pointer to hardware structure + * @rar: receive address register index to disassociate with VMDq index + * @vmdq: VMDq set or pool index + **/ +s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_rx_addrs - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_num_rx_addrs - Returns the number of RAR entries. + * @hw: pointer to hardware structure + **/ +u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw) +{ + return hw->mac.num_rar_entries; +} + +/** + * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new multicast addresses + * @addr_count: number of addresses + * @func: iterator function to walk the multicast address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + **/ +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func) +{ + return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw, + addr_list, addr_count, func), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @func: iterator function to walk the multicast address list + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unused receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr func, + bool clear) +{ + return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw, + mc_addr_list, mc_addr_count, func, clear), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_enable_mc - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_enable_mc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_mc - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_disable_mc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_vfta - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vfta - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFTA + * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind, + vlan_on), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vlvf - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vfta_changed: pointer to boolean flag which indicates whether VFTA + * should be changed + * + * Turn on/off specified bit in VLVF table. + **/ +s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, + bool *vfta_changed) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind, + vlan_on, vfta_changed), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_fc_enable - Enable flow control + * @hw: pointer to hardware structure + * + * Configures the flow control settings based on SW configuration. + **/ +s32 ixgbe_fc_enable(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_fw_drv_ver - Try to send the driver version number FW + * @hw: pointer to hardware structure + * @maj: driver major number to be sent to firmware + * @min: driver minor number to be sent to firmware + * @build: driver build number to be sent to firmware + * @ver: driver version number to be sent to firmware + **/ +s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, + u8 ver) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min, + build, ver), IXGBE_NOT_IMPLEMENTED); +} + + +/** + * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_thermal_sensor_data, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + **/ +s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_thermal_sensor_thresh, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_dmac_config - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Configure DMA coalescing. If enabling dmac, dmac is activated. + * When disabling dmac, dmac enable dmac bit is cleared. + **/ +s32 ixgbe_dmac_config(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_config, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_dmac_update_tcs - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Disables dmac, updates per TC settings, and then enable dmac. + **/ +s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_update_tcs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_dmac_config_tcs - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Configure DMA coalescing threshold per TC and set high priority bit for + * FCOE TC. The dmac enable bit must be cleared before configuring. + **/ +s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_config_tcs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_eee - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE + * + * Enable/disable EEE based on enable_ee flag. + * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C + * are modified. + * + **/ +s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_eee, (hw, enable_eee), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_source_address_pruning - Enable/Disable source address pruning + * @hw: pointer to hardware structure + * @enbale: enable or disable source address pruning + * @pool: Rx pool - Rx pool to toggle source address pruning + **/ +void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, + unsigned int pool) +{ + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, enable, pool); +} + +/** + * ixgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ + if (hw->mac.ops.set_ethertype_anti_spoofing) + hw->mac.ops.set_ethertype_anti_spoofing(hw, enable, vf); +} + +/** + * ixgbe_read_iosf_sb_reg - Read 32 bit PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @device_type: type of device you want to communicate with + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register + **/ +s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *phy_data) +{ + return ixgbe_call_func(hw, hw->mac.ops.read_iosf_sb_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_iosf_sb_reg - Write 32 bit register through IOSF Sideband + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: type of device you want to communicate with + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register + **/ +s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 phy_data) +{ + return ixgbe_call_func(hw, hw->mac.ops.write_iosf_sb_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_mdd - Disable malicious driver detection + * @hw: pointer to hardware structure + * + **/ +void ixgbe_disable_mdd(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_mdd) + hw->mac.ops.disable_mdd(hw); +} + +/** + * ixgbe_enable_mdd - Enable malicious driver detection + * @hw: pointer to hardware structure + * + **/ +void ixgbe_enable_mdd(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_mdd) + hw->mac.ops.enable_mdd(hw); +} + +/** + * ixgbe_mdd_event - Handle malicious driver detection event + * @hw: pointer to hardware structure + * @vf_bitmap: vf bitmap of malicious vfs + * + **/ +void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap) +{ + if (hw->mac.ops.mdd_event) + hw->mac.ops.mdd_event(hw, vf_bitmap); +} + +/** + * ixgbe_restore_mdd_vf - Restore VF that was disabled during malicious driver + * detection event + * @hw: pointer to hardware structure + * @vf: vf index + * + **/ +void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf) +{ + if (hw->mac.ops.restore_mdd_vf) + hw->mac.ops.restore_mdd_vf(hw, vf); +} + +/** + * ixgbe_enter_lplu - Transition to low power states + * @hw: pointer to hardware structure + * + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). + **/ +s32 ixgbe_enter_lplu(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.enter_lplu, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_handle_lasi - Handle external Base T PHY interrupt + * @hw: pointer to hardware structure + * + * Handle external Base T PHY interrupt. If high temperature + * failure alarm then return error, else if link status change + * then setup internal/external PHY link + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + */ +s32 ixgbe_handle_lasi(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.handle_lasi, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_analog_reg8 - Reads 8 bit analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs write operation to analog register specified. + **/ +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg, + val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_analog_reg8 - Writes 8 bit analog register + * @hw: pointer to hardware structure + * @reg: analog register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg, + val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_uta_tables - Initializes Unicast Table Arrays. + * @hw: pointer to hardware structure + * + * Initializes the Unicast Table Arrays to zero on device load. This + * is part of the Rx init addr execution path. + **/ +s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: I2C bus address to read from + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset, + dev_addr, data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_byte_unlocked - Reads 8 bit word via I2C from device address + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: I2C bus address to read from + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte_unlocked, + (hw, byte_offset, dev_addr, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_link - Perform read operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to read from + * @reg: device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + */ +s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) +{ + return ixgbe_call_func(hw, hw->link.ops.read_link, (hw, addr, + reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_link_unlocked - Perform read operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to read from + * @reg: device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) +{ + return ixgbe_call_func(hw, hw->link.ops.read_link_unlocked, + (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: I2C bus address to write to + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface + * at a specified device address. + **/ +s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset, + dev_addr, data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_byte_unlocked - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: I2C bus address to write to + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface + * at a specified device address. + **/ +s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte_unlocked, + (hw, byte_offset, dev_addr, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_link - Perform write operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to write to + * @reg: device register to write to + * @val: value to write + * + * Returns an error code on error. + */ +s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) +{ + return ixgbe_call_func(hw, hw->link.ops.write_link, + (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_link_unlocked - Perform write operation on link device + * @hw: pointer to the hardware structure + * @addr: bus address to write to + * @reg: device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) +{ + return ixgbe_call_func(hw, hw->link.ops.write_link_unlocked, + (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, + u8 byte_offset, u8 eeprom_data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom, + (hw, byte_offset, eeprom_data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom, + (hw, byte_offset, eeprom_data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_supported_physical_layer - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer, + (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN); +} + +/** + * ixgbe_enable_rx_dma - Enables Rx DMA unit, dependent on device specifics + * @hw: pointer to hardware structure + * @regval: bitfield to write to the Rx DMA register + * + * Enables the Rx DMA unit of the device. + **/ +s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval) +{ + return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma, + (hw, regval), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_sec_rx_path - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path. + **/ +s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path, + (hw), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_enable_sec_rx_path - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path. + **/ +s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path, + (hw), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) +{ + return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync, + (hw, mask), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_release_swfw_semaphore - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through SW_FW_SYNC register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) +{ + if (hw->mac.ops.release_swfw_sync) + hw->mac.ops.release_swfw_sync(hw, mask); +} + + +void ixgbe_disable_rx(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_rx) + hw->mac.ops.disable_rx(hw); +} + +void ixgbe_enable_rx(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_rx) + hw->mac.ops.enable_rx(hw); +} + +/** + * ixgbe_set_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via the rate select. + */ +void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) +{ + if (hw->mac.ops.set_rate_select_speed) + hw->mac.ops.set_rate_select_speed(hw, speed); +} diff --git a/drivers/net/ixgbe/base/ixgbe_api.h b/drivers/net/ixgbe/base/ixgbe_api.h new file mode 100644 index 00000000..ae26a6ac --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_api.h @@ -0,0 +1,219 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _IXGBE_API_H_ +#define _IXGBE_API_H_ + +#include "ixgbe_type.h" + +void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map); + +s32 ixgbe_init_shared_code(struct ixgbe_hw *hw); + +extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw); + +s32 ixgbe_set_mac_type(struct ixgbe_hw *hw); +s32 ixgbe_init_hw(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw(struct ixgbe_hw *hw); +s32 ixgbe_start_hw(struct ixgbe_hw *hw); +void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw); +s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw); +enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw); +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_get_bus_info(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num); +s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size); + +s32 ixgbe_identify_phy(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data); +s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data); + +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); +s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw); +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_set_phy_power(struct ixgbe_hw *, bool on); +void ixgbe_disable_tx_laser(struct ixgbe_hw *hw); +void ixgbe_enable_tx_laser(struct ixgbe_hw *hw); +void ixgbe_flap_tx_laser(struct ixgbe_hw *hw); +s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *autoneg); +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw); +s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); + +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); +s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw); + +s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq); +s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); +u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw); +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func); +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr func, + bool clear); +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq); +s32 ixgbe_enable_mc(struct ixgbe_hw *hw); +s32 ixgbe_disable_mc(struct ixgbe_hw *hw); +s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on); +s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed); +s32 ixgbe_fc_enable(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc(struct ixgbe_hw *hw); +s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, + u8 ver); +s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw); +s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw); +void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr); +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, + u16 *firmware_version); +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); +u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval); +s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw); +s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw); +s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw); +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, + bool cloud_mode); +void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue); +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask, bool cloud_mode); +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue, bool cloud_mode); +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id); +s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + union ixgbe_atr_input *mask, + u16 soft_id, + u8 queue, + bool cloud_mode); +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *mask); +u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common); +bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data); +s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); +s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); +s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data); +void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue); +s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); +s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); +s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); +s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps); +s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); +s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs); +s32 ixgbe_dmac_config(struct ixgbe_hw *hw); +s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw); +s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw); +s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee); +void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, + unsigned int vf); +void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, + int vf); +s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *phy_data); +s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 phy_data); +void ixgbe_disable_mdd(struct ixgbe_hw *hw); +void ixgbe_enable_mdd(struct ixgbe_hw *hw); +void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap); +void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf); +s32 ixgbe_enter_lplu(struct ixgbe_hw *hw); +s32 ixgbe_handle_lasi(struct ixgbe_hw *hw); +void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed); +void ixgbe_disable_rx(struct ixgbe_hw *hw); +void ixgbe_enable_rx(struct ixgbe_hw *hw); + +#endif /* _IXGBE_API_H_ */ diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c new file mode 100644 index 00000000..ec61408d --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_common.c @@ -0,0 +1,5142 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "ixgbe_common.h" +#include "ixgbe_phy.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82599.h" +#include "ixgbe_api.h" + +STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); +STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); +STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); +STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); +STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw); +STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count); +STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); +STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw); + +STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); +STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, + u16 *san_mac_offset); +STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + u16 offset); + +/** + * ixgbe_init_ops_generic - Inits function ptrs + * @hw: pointer to the hardware structure + * + * Initialize the function pointers. + **/ +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + struct ixgbe_mac_info *mac = &hw->mac; + u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + DEBUGFUNC("ixgbe_init_ops_generic"); + + /* EEPROM */ + eeprom->ops.init_params = ixgbe_init_eeprom_params_generic; + /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ + if (eec & IXGBE_EEC_PRES) { + eeprom->ops.read = ixgbe_read_eerd_generic; + eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic; + } else { + eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic; + eeprom->ops.read_buffer = + ixgbe_read_eeprom_buffer_bit_bang_generic; + } + eeprom->ops.write = ixgbe_write_eeprom_generic; + eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic; + eeprom->ops.validate_checksum = + ixgbe_validate_eeprom_checksum_generic; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic; + + /* MAC */ + mac->ops.init_hw = ixgbe_init_hw_generic; + mac->ops.reset_hw = NULL; + mac->ops.start_hw = ixgbe_start_hw_generic; + mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic; + mac->ops.get_media_type = NULL; + mac->ops.get_supported_physical_layer = NULL; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic; + mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic; + mac->ops.stop_adapter = ixgbe_stop_adapter_generic; + mac->ops.get_bus_info = ixgbe_get_bus_info_generic; + mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync; + mac->ops.prot_autoc_read = prot_autoc_read_generic; + mac->ops.prot_autoc_write = prot_autoc_write_generic; + + /* LEDs */ + mac->ops.led_on = ixgbe_led_on_generic; + mac->ops.led_off = ixgbe_led_off_generic; + mac->ops.blink_led_start = ixgbe_blink_led_start_generic; + mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic; + + /* RAR, Multicast, VLAN */ + mac->ops.set_rar = ixgbe_set_rar_generic; + mac->ops.clear_rar = ixgbe_clear_rar_generic; + mac->ops.insert_mac_addr = NULL; + mac->ops.set_vmdq = NULL; + mac->ops.clear_vmdq = NULL; + mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic; + mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic; + mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic; + mac->ops.enable_mc = ixgbe_enable_mc_generic; + mac->ops.disable_mc = ixgbe_disable_mc_generic; + mac->ops.clear_vfta = NULL; + mac->ops.set_vfta = NULL; + mac->ops.set_vlvf = NULL; + mac->ops.init_uta_tables = NULL; + mac->ops.enable_rx = ixgbe_enable_rx_generic; + mac->ops.disable_rx = ixgbe_disable_rx_generic; + + /* Flow Control */ + mac->ops.fc_enable = ixgbe_fc_enable_generic; + mac->ops.setup_fc = ixgbe_setup_fc_generic; + + /* Link */ + mac->ops.get_link_capabilities = NULL; + mac->ops.setup_link = NULL; + mac->ops.check_link = NULL; + mac->ops.dmac_config = NULL; + mac->ops.dmac_update_tcs = NULL; + mac->ops.dmac_config_tcs = NULL; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation + * of flow control + * @hw: pointer to hardware structure + * + * This function returns true if the device supports flow control + * autonegotiation, and false if it does not. + * + **/ +bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) +{ + bool supported = false; + ixgbe_link_speed speed; + bool link_up; + + DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); + + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber_qsfp: + case ixgbe_media_type_fiber: + hw->mac.ops.check_link(hw, &speed, &link_up, false); + /* if link is down, assume supported */ + if (link_up) + supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? + true : false; + else + supported = true; + break; + case ixgbe_media_type_backplane: + supported = true; + break; + case ixgbe_media_type_copper: + /* only some copper devices support flow control autoneg */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_T3_LOM: + case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: + case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550T1: + case IXGBE_DEV_ID_X550EM_X_10G_T: + supported = true; + break; + default: + supported = false; + } + default: + break; + } + + ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, + "Device %x does not support flow control autoneg", + hw->device_id); + return supported; +} + +/** + * ixgbe_setup_fc_generic - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + u32 reg = 0, reg_bp = 0; + u16 reg_cu = 0; + bool locked = false; + + DEBUGFUNC("ixgbe_setup_fc_generic"); + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* + * 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* + * Set up the 1G and 10G flow control advertisement registers so the + * HW will be able to do fc autoneg once the cable is plugged in. If + * we link at 10G, the 1G advertisement is harmless and vice versa. + */ + switch (hw->phy.media_type) { + case ixgbe_media_type_backplane: + /* some MAC's need RMW protection on AUTOC */ + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); + if (ret_val != IXGBE_SUCCESS) + goto out; + + /* only backplane uses autoc so fall though */ + case ixgbe_media_type_fiber_qsfp: + case ixgbe_media_type_fiber: + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + + break; + case ixgbe_media_type_copper: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); + break; + default: + break; + } + + /* + * The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + /* Flow control completely disabled by software override. */ + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE); + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + reg |= IXGBE_PCS1GANA_ASM_PAUSE; + reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; + if (hw->phy.media_type == ixgbe_media_type_backplane) { + reg_bp |= IXGBE_AUTOC_ASM_PAUSE; + reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; + } else if (hw->phy.media_type == ixgbe_media_type_copper) { + reg_cu |= IXGBE_TAF_ASM_PAUSE; + reg_cu &= ~IXGBE_TAF_SYM_PAUSE; + } + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp |= IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE; + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; + break; + default: + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + + if (hw->mac.type < ixgbe_mac_X540) { + /* + * Enable auto-negotiation between the MAC & PHY; + * the MAC will advertise clause 37 flow control. + */ + IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + + /* Disable AN timeout */ + if (hw->fc.strict_ieee) + reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; + + IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); + DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); + } + + /* + * AUTOC restart handles negotiation of 1G and 10G on backplane + * and copper. There is no need to set the PCS1GCTL register. + * + */ + if (hw->phy.media_type == ixgbe_media_type_backplane) { + reg_bp |= IXGBE_AUTOC_AN_RESTART; + ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); + if (ret_val) + goto out; + } else if ((hw->phy.media_type == ixgbe_media_type_copper) && + (ixgbe_device_supports_autoneg_fc(hw))) { + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); + } + + DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); +out: + return ret_val; +} + +/** + * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) +{ + s32 ret_val; + u32 ctrl_ext; + + DEBUGFUNC("ixgbe_start_hw_generic"); + + /* Set the media type */ + hw->phy.media_type = hw->mac.ops.get_media_type(hw); + + /* PHY ops initialization must be done in reset_hw() */ + + /* Clear the VLAN filter table */ + hw->mac.ops.clear_vfta(hw); + + /* Clear statistics registers */ + hw->mac.ops.clear_hw_cntrs(hw); + + /* Set No Snoop Disable */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); + + /* Setup flow control */ + ret_val = ixgbe_setup_fc(hw); + if (ret_val != IXGBE_SUCCESS) + goto out; + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + +out: + return ret_val; +} + +/** + * ixgbe_start_hw_gen2 - Init sequence for common device family + * @hw: pointer to hw structure + * + * Performs the init sequence common to the second generation + * of 10 GbE devices. + * Devices in the second generation: + * 82599 + * X540 + **/ +s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) +{ + u32 i; + u32 regval; + + /* Clear the rate limiters */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); + } + IXGBE_WRITE_FLUSH(hw); + + /* Disable relaxed ordering */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); + regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); + } + + for (i = 0; i < hw->mac.max_rx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_hw_generic - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) +{ + s32 status; + + DEBUGFUNC("ixgbe_init_hw_generic"); + + /* Reset the hardware */ + status = hw->mac.ops.reset_hw(hw); + + if (status == IXGBE_SUCCESS) { + /* Start the HW */ + status = hw->mac.ops.start_hw(hw); + } + + return status; +} + +/** + * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) +{ + u16 i = 0; + + DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); + + IXGBE_READ_REG(hw, IXGBE_CRCERRS); + IXGBE_READ_REG(hw, IXGBE_ILLERRC); + IXGBE_READ_REG(hw, IXGBE_ERRBC); + IXGBE_READ_REG(hw, IXGBE_MSPDC); + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_MPC(i)); + + IXGBE_READ_REG(hw, IXGBE_MLFC); + IXGBE_READ_REG(hw, IXGBE_MRFC); + IXGBE_READ_REG(hw, IXGBE_RLEC); + IXGBE_READ_REG(hw, IXGBE_LXONTXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } else { + IXGBE_READ_REG(hw, IXGBE_LXONRXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + } + + for (i = 0; i < 8; i++) { + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + } else { + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + } + } + if (hw->mac.type >= ixgbe_mac_82599EB) + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); + IXGBE_READ_REG(hw, IXGBE_PRC64); + IXGBE_READ_REG(hw, IXGBE_PRC127); + IXGBE_READ_REG(hw, IXGBE_PRC255); + IXGBE_READ_REG(hw, IXGBE_PRC511); + IXGBE_READ_REG(hw, IXGBE_PRC1023); + IXGBE_READ_REG(hw, IXGBE_PRC1522); + IXGBE_READ_REG(hw, IXGBE_GPRC); + IXGBE_READ_REG(hw, IXGBE_BPRC); + IXGBE_READ_REG(hw, IXGBE_MPRC); + IXGBE_READ_REG(hw, IXGBE_GPTC); + IXGBE_READ_REG(hw, IXGBE_GORCL); + IXGBE_READ_REG(hw, IXGBE_GORCH); + IXGBE_READ_REG(hw, IXGBE_GOTCL); + IXGBE_READ_REG(hw, IXGBE_GOTCH); + if (hw->mac.type == ixgbe_mac_82598EB) + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + IXGBE_READ_REG(hw, IXGBE_RUC); + IXGBE_READ_REG(hw, IXGBE_RFC); + IXGBE_READ_REG(hw, IXGBE_ROC); + IXGBE_READ_REG(hw, IXGBE_RJC); + IXGBE_READ_REG(hw, IXGBE_MNGPRC); + IXGBE_READ_REG(hw, IXGBE_MNGPDC); + IXGBE_READ_REG(hw, IXGBE_MNGPTC); + IXGBE_READ_REG(hw, IXGBE_TORL); + IXGBE_READ_REG(hw, IXGBE_TORH); + IXGBE_READ_REG(hw, IXGBE_TPR); + IXGBE_READ_REG(hw, IXGBE_TPT); + IXGBE_READ_REG(hw, IXGBE_PTC64); + IXGBE_READ_REG(hw, IXGBE_PTC127); + IXGBE_READ_REG(hw, IXGBE_PTC255); + IXGBE_READ_REG(hw, IXGBE_PTC511); + IXGBE_READ_REG(hw, IXGBE_PTC1023); + IXGBE_READ_REG(hw, IXGBE_PTC1522); + IXGBE_READ_REG(hw, IXGBE_MPTC); + IXGBE_READ_REG(hw, IXGBE_BPTC); + for (i = 0; i < 16; i++) { + IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); + IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + } else { + IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + } + } + + if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { + if (hw->phy.id == 0) + ixgbe_identify_phy(hw); + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, + IXGBE_MDIO_PCS_DEV_TYPE, &i); + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, + IXGBE_MDIO_PCS_DEV_TYPE, &i); + hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, + IXGBE_MDIO_PCS_DEV_TYPE, &i); + hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, + IXGBE_MDIO_PCS_DEV_TYPE, &i); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_pba_string_generic - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 data; + u16 pba_ptr; + u16 offset; + u16 length; + + DEBUGFUNC("ixgbe_read_pba_string_generic"); + + if (pba_num == NULL) { + DEBUGOUT("PBA string buffer was null\n"); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* + * if data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (data != IXGBE_PBANUM_PTR_GUARD) { + DEBUGOUT("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { + DEBUGOUT("PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (data >> 12) & 0xF; + pba_num[1] = (data >> 8) & 0xF; + pba_num[2] = (data >> 4) & 0xF; + pba_num[3] = data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return IXGBE_SUCCESS; + } + + ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return IXGBE_ERR_PBA_SECTION; + } + + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + DEBUGOUT("PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(data >> 8); + pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_pba_num_generic - Reads part number from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number from the EEPROM + * + * Reads the part number from the EEPROM. + **/ +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) +{ + s32 ret_val; + u16 data; + + DEBUGFUNC("ixgbe_read_pba_num_generic"); + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } else if (data == IXGBE_PBANUM_PTR_GUARD) { + DEBUGOUT("NVM Not supported\n"); + return IXGBE_NOT_IMPLEMENTED; + } + *pba_num = (u32)(data << 16); + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + *pba_num |= data; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_pba_raw + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @max_pba_block_size: PBA block size limit + * @pba: pointer to output PBA structure + * + * Reads PBA from EEPROM image when eeprom_buf is not NULL. + * Reads PBA from physical EEPROM device when eeprom_buf is NULL. + * + **/ +s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 max_pba_block_size, + struct ixgbe_pba *pba) +{ + s32 ret_val; + u16 pba_block_size; + + if (pba == NULL) + return IXGBE_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, + &pba->word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { + pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; + pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; + } else { + return IXGBE_ERR_PARAM; + } + } + + if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { + if (pba->pba_block == NULL) + return IXGBE_ERR_PARAM; + + ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf, + eeprom_buf_size, + &pba_block_size); + if (ret_val) + return ret_val; + + if (pba_block_size > max_pba_block_size) + return IXGBE_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1], + pba_block_size, + pba->pba_block); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > (u32)(pba->word[1] + + pba_block_size)) { + memcpy(pba->pba_block, + &eeprom_buf[pba->word[1]], + pba_block_size * sizeof(u16)); + } else { + return IXGBE_ERR_PARAM; + } + } + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_pba_raw + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @pba: pointer to PBA structure + * + * Writes PBA to EEPROM image when eeprom_buf is not NULL. + * Writes PBA to physical EEPROM device when eeprom_buf is NULL. + * + **/ +s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, struct ixgbe_pba *pba) +{ + s32 ret_val; + + if (pba == NULL) + return IXGBE_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2, + &pba->word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { + eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0]; + eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1]; + } else { + return IXGBE_ERR_PARAM; + } + } + + if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { + if (pba->pba_block == NULL) + return IXGBE_ERR_PARAM; + + if (eeprom_buf == NULL) { + ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1], + pba->pba_block[0], + pba->pba_block); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > (u32)(pba->word[1] + + pba->pba_block[0])) { + memcpy(&eeprom_buf[pba->word[1]], + pba->pba_block, + pba->pba_block[0] * sizeof(u16)); + } else { + return IXGBE_ERR_PARAM; + } + } + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_pba_block_size + * @hw: pointer to the HW structure + * @eeprom_buf: optional pointer to EEPROM image + * @eeprom_buf_size: size of EEPROM image in words + * @pba_data_size: pointer to output variable + * + * Returns the size of the PBA block in words. Function operates on EEPROM + * image if the eeprom_buf pointer is not NULL otherwise it accesses physical + * EEPROM device. + * + **/ +s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 *pba_block_size) +{ + s32 ret_val; + u16 pba_word[2]; + u16 length; + + DEBUGFUNC("ixgbe_get_pba_block_size"); + + if (eeprom_buf == NULL) { + ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, + &pba_word[0]); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { + pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; + pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; + } else { + return IXGBE_ERR_PARAM; + } + } + + if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) { + if (eeprom_buf == NULL) { + ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0, + &length); + if (ret_val) + return ret_val; + } else { + if (eeprom_buf_size > pba_word[1]) + length = eeprom_buf[pba_word[1] + 0]; + else + return IXGBE_ERR_PARAM; + } + + if (length == 0xFFFF || length == 0) + return IXGBE_ERR_PBA_SECTION; + } else { + /* PBA number in legacy format, there is no PBA Block. */ + length = 0; + } + + if (pba_block_size != NULL) + *pba_block_size = length; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_mac_addr_generic - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 + **/ +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + DEBUGFUNC("ixgbe_get_mac_addr_generic"); + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); + + for (i = 0; i < 4; i++) + mac_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < 2; i++) + mac_addr[i+4] = (u8)(rar_high >> (i*8)); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_pci_config_data_generic - Generic store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status returned by the PCI config space + * + * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure + **/ +void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + if (hw->bus.type == ixgbe_bus_type_unknown) + hw->bus.type = ixgbe_bus_type_pci_express; + + switch (link_status & IXGBE_PCI_LINK_WIDTH) { + case IXGBE_PCI_LINK_WIDTH_1: + hw->bus.width = ixgbe_bus_width_pcie_x1; + break; + case IXGBE_PCI_LINK_WIDTH_2: + hw->bus.width = ixgbe_bus_width_pcie_x2; + break; + case IXGBE_PCI_LINK_WIDTH_4: + hw->bus.width = ixgbe_bus_width_pcie_x4; + break; + case IXGBE_PCI_LINK_WIDTH_8: + hw->bus.width = ixgbe_bus_width_pcie_x8; + break; + default: + hw->bus.width = ixgbe_bus_width_unknown; + break; + } + + switch (link_status & IXGBE_PCI_LINK_SPEED) { + case IXGBE_PCI_LINK_SPEED_2500: + hw->bus.speed = ixgbe_bus_speed_2500; + break; + case IXGBE_PCI_LINK_SPEED_5000: + hw->bus.speed = ixgbe_bus_speed_5000; + break; + case IXGBE_PCI_LINK_SPEED_8000: + hw->bus.speed = ixgbe_bus_speed_8000; + break; + default: + hw->bus.speed = ixgbe_bus_speed_unknown; + break; + } + + mac->ops.set_lan_id(hw); +} + +/** + * ixgbe_get_bus_info_generic - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Gets the PCI bus info (speed, width, type) then calls helper function to + * store this data within the ixgbe_hw structure. + **/ +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) +{ + u16 link_status; + + DEBUGFUNC("ixgbe_get_bus_info_generic"); + + /* Get the negotiated link width and speed from PCI config space */ + link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); + + ixgbe_set_pci_config_data_generic(hw, link_status); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) +{ + struct ixgbe_bus_info *bus = &hw->bus; + u32 reg; + + DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); + + reg = IXGBE_READ_REG(hw, IXGBE_STATUS); + bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; + bus->lan_id = bus->func; + + /* check for a port swap */ + reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); + if (reg & IXGBE_FACTPS_LFS) + bus->func ^= 0x1; +} + +/** + * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) +{ + u32 reg_val; + u16 i; + + DEBUGFUNC("ixgbe_stop_adapter_generic"); + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit */ + ixgbe_disable_rx(hw); + + /* Clear interrupt mask to stop interrupts from being generated */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts, flush previous writes */ + IXGBE_READ_REG(hw, IXGBE_EICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + reg_val &= ~IXGBE_RXDCTL_ENABLE; + reg_val |= IXGBE_RXDCTL_SWFLSH; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); + } + + /* flush all queues disables */ + IXGBE_WRITE_FLUSH(hw); + msec_delay(2); + + /* + * Prevent the PCI-E bus from hanging by disabling PCI-E master + * access and verify no pending requests + */ + return ixgbe_disable_pcie_master(hw); +} + +/** + * ixgbe_led_on_generic - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + DEBUGFUNC("ixgbe_led_on_generic"); + + /* To turn on the LED, set mode to ON. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_led_off_generic - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + DEBUGFUNC("ixgbe_led_off_generic"); + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_eeprom_params_generic - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + DEBUGFUNC("ixgbe_init_eeprom_params_generic"); + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->type = ixgbe_eeprom_none; + /* Set default semaphore delay to 10ms which is a well + * tested value */ + eeprom->semaphore_delay = 10; + /* Clear EEPROM page size, it will be initialized as needed */ + eeprom->word_page_size = 0; + + /* + * Check for EEPROM present first. + * If not present leave as none + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + if (eec & IXGBE_EEC_PRES) { + eeprom->type = ixgbe_eeprom_spi; + + /* + * SPI EEPROM is assumed here. This code would need to + * change if a future EEPROM is not SPI. + */ + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + } + + if (eec & IXGBE_EEC_ADDR_SIZE) + eeprom->address_bits = 16; + else + eeprom->address_bits = 8; + DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " + "%d\n", eeprom->type, eeprom->word_size, + eeprom->address_bits); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to write + * @words: number of word(s) + * @data: 16 bit word(s) to write to EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + u16 i, count; + + DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic"); + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } + + if (offset + words > hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + /* + * The EEPROM page size cannot be queried from the chip. We do lazy + * initialization. It is worth to do that when we write large buffer. + */ + if ((hw->eeprom.word_page_size == 0) && + (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) + ixgbe_detect_eeprom_page_size_generic(hw, offset); + + /* + * We cannot hold synchronization semaphores for too long + * to avoid other entity starvation. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, + count, &data[i]); + + if (status != IXGBE_SUCCESS) + break; + } + +out: + return status; +} + +/** + * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @words: number of word(s) + * @data: 16 bit word(s) to be written to the EEPROM + * + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 word; + u16 page_size; + u16 i; + u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; + + DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang"); + + /* Prepare the EEPROM for writing */ + status = ixgbe_acquire_eeprom(hw); + + if (status == IXGBE_SUCCESS) { + if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } + } + + if (status == IXGBE_SUCCESS) { + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + ixgbe_shift_out_eeprom_bits(hw, + IXGBE_EEPROM_WREN_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); + + ixgbe_standby_eeprom(hw); + + /* + * Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, write_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + page_size = hw->eeprom.word_page_size; + + /* Send the data in burst via SPI*/ + do { + word = data[i]; + word = (word >> 8) | (word << 8); + ixgbe_shift_out_eeprom_bits(hw, word, 16); + + if (page_size == 0) + break; + + /* do not wrap around page */ + if (((offset + i) & (page_size - 1)) == + (page_size - 1)) + break; + } while (++i < words); + + ixgbe_standby_eeprom(hw); + msec_delay(10); + } + /* Done with writing - release the EEPROM */ + ixgbe_release_eeprom(hw); + } + + return status; +} + +/** + * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word to be written to the EEPROM + * + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status; + + DEBUGFUNC("ixgbe_write_eeprom_generic"); + + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); + +out: + return status; +} + +/** + * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit words(s) from EEPROM + * @words: number of word(s) + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + u16 i, count; + + DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic"); + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } + + if (offset + words > hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + /* + * We cannot hold synchronization semaphores for too long + * to avoid other entity starvation. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, + count, &data[i]); + + if (status != IXGBE_SUCCESS) + break; + } + +out: + return status; +} + +/** + * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @words: number of word(s) + * @data: read 16 bit word(s) from EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 word_in; + u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; + u16 i; + + DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang"); + + /* Prepare the EEPROM for reading */ + status = ixgbe_acquire_eeprom(hw); + + if (status == IXGBE_SUCCESS) { + if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } + } + + if (status == IXGBE_SUCCESS) { + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + /* + * Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, read_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + /* Read the data. */ + word_in = ixgbe_shift_in_eeprom_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + + /* End this read operation */ + ixgbe_release_eeprom(hw); + } + + return status; +} + +/** + * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit value from EEPROM + * + * Reads 16 bit value from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status; + + DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); + + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); + +out: + return status; +} + +/** + * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of word(s) + * @data: 16 bit word(s) from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + u32 eerd; + s32 status = IXGBE_SUCCESS; + u32 i; + + DEBUGFUNC("ixgbe_read_eerd_buffer_generic"); + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); + goto out; + } + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | + IXGBE_EEPROM_RW_REG_START; + + IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); + + if (status == IXGBE_SUCCESS) { + data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> + IXGBE_EEPROM_RW_REG_DATA); + } else { + DEBUGOUT("Eeprom read timed out\n"); + goto out; + } + } +out: + return status; +} + +/** + * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be used as a scratch pad + * + * Discover EEPROM page size by writing marching data at given offset. + * This function is called only when we are writing a new large buffer + * at given offset so the data would be overwritten anyway. + **/ +STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + u16 offset) +{ + u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; + s32 status = IXGBE_SUCCESS; + u16 i; + + DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic"); + + for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) + data[i] = i; + + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, + IXGBE_EEPROM_PAGE_SIZE_MAX, data); + hw->eeprom.word_page_size = 0; + if (status != IXGBE_SUCCESS) + goto out; + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); + if (status != IXGBE_SUCCESS) + goto out; + + /* + * When writing in burst more than the actual page size + * EEPROM address wraps around current page. + */ + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; + + DEBUGOUT1("Detected EEPROM page size = %d words.", + hw->eeprom.word_page_size); +out: + return status; +} + +/** + * ixgbe_read_eerd_generic - Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); +} + +/** + * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of word(s) + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + u32 eewr; + s32 status = IXGBE_SUCCESS; + u16 i; + + DEBUGFUNC("ixgbe_write_eewr_generic"); + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); + goto out; + } + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | + (data[i] << IXGBE_EEPROM_RW_REG_DATA) | + IXGBE_EEPROM_RW_REG_START; + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Eeprom write EEWR timed out\n"); + goto out; + } + + IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Eeprom write EEWR timed out\n"); + goto out; + } + } + +out: + return status; +} + +/** + * ixgbe_write_eewr_generic - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); +} + +/** + * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status + * @hw: pointer to hardware structure + * @ee_reg: EEPROM flag for polling + * + * Polls the status bit (bit 1) of the EERD or EEWR to determine when the + * read or write is done respectively. + **/ +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) +{ + u32 i; + u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); + + for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { + if (ee_reg == IXGBE_NVM_POLL_READ) + reg = IXGBE_READ_REG(hw, IXGBE_EERD); + else + reg = IXGBE_READ_REG(hw, IXGBE_EEWR); + + if (reg & IXGBE_EEPROM_RW_REG_DONE) { + status = IXGBE_SUCCESS; + break; + } + usec_delay(5); + } + + if (i == IXGBE_EERD_EEWR_ATTEMPTS) + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "EEPROM read/write done polling timed out"); + + return status; +} + +/** + * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang + * @hw: pointer to hardware structure + * + * Prepares EEPROM for access using bit-bang method. This function should + * be called before issuing a command to the EEPROM. + **/ +STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 eec; + u32 i; + + DEBUGFUNC("ixgbe_acquire_eeprom"); + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) + != IXGBE_SUCCESS) + status = IXGBE_ERR_SWFW_SYNC; + + if (status == IXGBE_SUCCESS) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + /* Request EEPROM Access */ + eec |= IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + + for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + if (eec & IXGBE_EEC_GNT) + break; + usec_delay(5); + } + + /* Release if grant not acquired */ + if (!(eec & IXGBE_EEC_GNT)) { + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + DEBUGOUT("Could not acquire EEPROM grant\n"); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + status = IXGBE_ERR_EEPROM; + } + + /* Setup EEPROM for Read/Write */ + if (status == IXGBE_SUCCESS) { + /* Clear CS and SK */ + eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); + } + } + return status; +} + +/** + * ixgbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + + DEBUGFUNC("ixgbe_get_eeprom_semaphore"); + + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = IXGBE_SUCCESS; + break; + } + usec_delay(50); + } + + if (i == timeout) { + DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " + "not granted.\n"); + /* + * this release is particularly important because our attempts + * above to get the semaphore may have succeeded, and if there + * was a timeout, we should unconditionally clear the semaphore + * bits to free the driver to make progress + */ + ixgbe_release_eeprom_semaphore(hw); + + usec_delay(50); + /* + * one last try + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) + status = IXGBE_SUCCESS; + } + + /* Now get the semaphore between SW/FW through the SWESMBI bit */ + if (status == IXGBE_SUCCESS) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + + /* Set the SW EEPROM semaphore bit to request access */ + swsm |= IXGBE_SWSM_SWESMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); + + /* + * If we set the bit successfully then we got the + * semaphore. + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (swsm & IXGBE_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + /* + * Release semaphores and return error if SW EEPROM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "SWESMBI Software EEPROM semaphore not granted.\n"); + ixgbe_release_eeprom_semaphore(hw); + status = IXGBE_ERR_EEPROM; + } + } else { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Software semaphore SMBI between device drivers " + "not granted.\n"); + } + + return status; +} + +/** + * ixgbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("ixgbe_release_eeprom_semaphore"); + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + + /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ + swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_ready_eeprom - Polls for EEPROM ready + * @hw: pointer to hardware structure + **/ +STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u16 i; + u8 spi_stat_reg; + + DEBUGFUNC("ixgbe_ready_eeprom"); + + /* + * Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { + ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); + spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); + if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) + break; + + usec_delay(5); + ixgbe_standby_eeprom(hw); + }; + + /* + * On some parts, SPI write time could vary from 0-20mSec on 3.3V + * devices (and only 0-5mSec on 5V devices) + */ + if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { + DEBUGOUT("SPI EEPROM Status error\n"); + status = IXGBE_ERR_EEPROM; + } + + return status; +} + +/** + * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: pointer to hardware structure + **/ +STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + + DEBUGFUNC("ixgbe_standby_eeprom"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + /* Toggle CS to flush commands */ + eec |= IXGBE_EEC_CS; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); + eec &= ~IXGBE_EEC_CS; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); +} + +/** + * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. + * @hw: pointer to hardware structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + **/ +STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count) +{ + u32 eec; + u32 mask; + u32 i; + + DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + /* + * Mask is used to shift "count" bits of "data" out to the EEPROM + * one bit at a time. Determine the starting bit based on count + */ + mask = 0x01 << (count - 1); + + for (i = 0; i < count; i++) { + /* + * A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK + * bit controls the clock input to the EEPROM). A "0" is + * shifted out to the EEPROM by setting "DI" to "0" and then + * raising and then lowering the clock. + */ + if (data & mask) + eec |= IXGBE_EEC_DI; + else + eec &= ~IXGBE_EEC_DI; + + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + + usec_delay(1); + + ixgbe_raise_eeprom_clk(hw, &eec); + ixgbe_lower_eeprom_clk(hw, &eec); + + /* + * Shift mask to signify next bit of data to shift in to the + * EEPROM + */ + mask = mask >> 1; + }; + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eec &= ~IXGBE_EEC_DI; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM + * @hw: pointer to hardware structure + **/ +STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) +{ + u32 eec; + u32 i; + u16 data = 0; + + DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); + + /* + * In order to read a register from the EEPROM, we need to shift + * 'count' bits in from the EEPROM. Bits are "shifted in" by raising + * the clock input to the EEPROM (setting the SK bit), and then reading + * the value of the "DO" bit. During this "shifting in" process the + * "DI" bit should always be clear. + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); + + for (i = 0; i < count; i++) { + data = data << 1; + ixgbe_raise_eeprom_clk(hw, &eec); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + eec &= ~(IXGBE_EEC_DI); + if (eec & IXGBE_EEC_DO) + data |= 1; + + ixgbe_lower_eeprom_clk(hw, &eec); + } + + return data; +} + +/** + * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eec: EEC register's current value + **/ +STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ + DEBUGFUNC("ixgbe_raise_eeprom_clk"); + + /* + * Raise the clock input to the EEPROM + * (setting the SK bit), then delay + */ + *eec = *eec | IXGBE_EEC_SK; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); +} + +/** + * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eecd: EECD's current value + **/ +STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ + DEBUGFUNC("ixgbe_lower_eeprom_clk"); + + /* + * Lower the clock input to the EEPROM (clearing the SK bit), then + * delay + */ + *eec = *eec & ~IXGBE_EEC_SK; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); +} + +/** + * ixgbe_release_eeprom - Release EEPROM, release semaphores + * @hw: pointer to hardware structure + **/ +STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + + DEBUGFUNC("ixgbe_release_eeprom"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + eec |= IXGBE_EEC_CS; /* Pull CS high */ + eec &= ~IXGBE_EEC_SK; /* Lower SCK */ + + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + + usec_delay(1); + + /* Stop requesting EEPROM access */ + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + /* Delay before attempt to obtain semaphore again to allow FW access */ + msec_delay(hw->eeprom.semaphore_delay); +} + +/** + * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ + u16 i; + u16 j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + + DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); + + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { + if (hw->eeprom.ops.read(hw, i, &word)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + checksum += word; + } + + /* Include all data from pointers except for the fw pointer */ + for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + if (hw->eeprom.ops.read(hw, i, &pointer)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + /* If the pointer seems invalid */ + if (pointer == 0xFFFF || pointer == 0) + continue; + + if (hw->eeprom.ops.read(hw, pointer, &length)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + if (length == 0xFFFF || length == 0) + continue; + + for (j = pointer + 1; j <= pointer + length; j++) { + if (hw->eeprom.ops.read(hw, j, &word)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + checksum += word; + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** + * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) + status = IXGBE_ERR_EEPROM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** + * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum + * @hw: pointer to hardware structure + **/ +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum; + + DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); + + return status; +} + +/** + * ixgbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. + * + * Tests a MAC address to ensure it is a valid Individual Address. + **/ +s32 ixgbe_validate_mac_addr(u8 *mac_addr) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_validate_mac_addr"); + + /* Make sure it is not a multicast address */ + if (IXGBE_IS_MULTICAST(mac_addr)) { + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Not a broadcast address */ + } else if (IXGBE_IS_BROADCAST(mac_addr)) { + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Reject the zero address */ + } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { + status = IXGBE_ERR_INVALID_MAC_ADDR; + } + return status; +} + +/** + * ixgbe_set_rar_generic - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_set_rar_generic"); + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + /* setup VMDq pool selection before this RAR gets enabled */ + hw->mac.ops.set_vmdq(hw, index, vmdq); + + /* + * HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); + + if (enable_addr != 0) + rar_high |= IXGBE_RAH_AV; + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clear_rar_generic - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_clear_rar_generic"); + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_rx_addrs_generic - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_init_rx_addrs_generic"); + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (ixgbe_validate_mac_addr(hw->mac.addr) == + IXGBE_ERR_INVALID_MAC_ADDR) { + /* Get the MAC address from the RAR0 for later reference */ + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); + DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + } else { + /* Setup the receive address. */ + DEBUGOUT("Overriding MAC Address in RAR[0]\n"); + DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); + DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for RAR 0 */ + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); + } + hw->addr_ctrl.overflow_promisc = 0; + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + DEBUGOUT(" Clearing MTA\n"); + for (i = 0; i < hw->mac.mcft_size; i++) + IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); + + ixgbe_init_uta_tables(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_add_uc_addr - Adds a secondary unicast address. + * @hw: pointer to hardware structure + * @addr: new address + * + * Adds it to unused receive address register or goes into promiscuous mode. + **/ +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; + + DEBUGFUNC("ixgbe_add_uc_addr"); + + DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + /* + * Place this address in the RAR if there is room, + * else put the controller into promiscuous mode + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + rar = hw->addr_ctrl.rar_used_count; + hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); + hw->addr_ctrl.rar_used_count++; + } else { + hw->addr_ctrl.overflow_promisc++; + } + + DEBUGOUT("ixgbe_add_uc_addr Complete\n"); +} + +/** + * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new addresses + * @addr_count: number of addresses + * @next: iterator function to walk the address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + * + * Drivers using secondary unicast addresses must set user_set_promisc when + * manually putting the device into promiscuous mode. + **/ +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr next) +{ + u8 *addr; + u32 i; + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; + u32 uc_addr_in_use; + u32 fctrl; + u32 vmdq; + + DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); + + /* + * Clear accounting of old secondary address list, + * don't count RAR[0] + */ + uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; + hw->addr_ctrl.rar_used_count -= uc_addr_in_use; + hw->addr_ctrl.overflow_promisc = 0; + + /* Zero out the other receive addresses */ + DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); + for (i = 0; i < uc_addr_in_use; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); + } + + /* Add the new addresses */ + for (i = 0; i < addr_count; i++) { + DEBUGOUT(" Adding the secondary addresses:\n"); + addr = next(hw, &addr_list, &vmdq); + ixgbe_add_uc_addr(hw, addr, vmdq); + } + + if (hw->addr_ctrl.overflow_promisc) { + /* enable promisc if not already in overflow or set by user */ + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Entering address overflow promisc mode\n"); + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + } else { + /* only disable if set by overflow, not by user */ + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Leaving address overflow promisc mode\n"); + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl &= ~IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + } + + DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + DEBUGFUNC("ixgbe_mta_vector"); + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + DEBUGOUT("MC filter type param set incorrectly\n"); + ASSERT(0); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ixgbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @hash_value: Multicast address hash value + * + * Sets the bit-vector in the multicast table. + **/ +void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + + DEBUGFUNC("ixgbe_set_mta"); + + hw->addr_ctrl.mta_in_use++; + + vector = ixgbe_mta_vector(hw, mc_addr); + DEBUGOUT1(" bit-vector = 0x%03X\n", vector); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @next: iterator function to walk the multicast address list + * @clear: flag, when set clears the table beforehand + * + * When the clear flag is set, the given list replaces any existing list. + * Hashes the given addresses into the multicast table. + **/ +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr next, + bool clear) +{ + u32 i; + u32 vmdq; + + DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = mc_addr_count; + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + if (clear) { + DEBUGOUT(" Clearing MTA\n"); + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + } + + /* Update mta_shadow */ + for (i = 0; i < mc_addr_count; i++) { + DEBUGOUT(" Adding the multicast addresses:\n"); + ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); + } + + /* Enable mta */ + for (i = 0; i < hw->mac.mcft_size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, + hw->mac.mta_shadow[i]); + + if (hw->addr_ctrl.mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + + DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_mc_generic - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + + DEBUGFUNC("ixgbe_enable_mc_generic"); + + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | + hw->mac.mc_filter_type); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_disable_mc_generic - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + + DEBUGFUNC("ixgbe_disable_mc_generic"); + + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_fc_enable_generic - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + u32 mflcn_reg, fccfg_reg; + u32 reg; + u32 fcrtl, fcrth; + int i; + + DEBUGFUNC("ixgbe_fc_enable_generic"); + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* Low water mark of zero causes XOFF floods */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + DEBUGOUT("Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + } + + /* Negotiate the fc mode to use */ + ixgbe_fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); + + fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ixgbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= IXGBE_MFLCN_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= IXGBE_MFLCN_RFCE; + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; + break; + default: + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + + /* Set 802.3x based flow control settings. */ + mflcn_reg |= IXGBE_MFLCN_DPF; + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); + + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); + fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; + } + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + +out: + return ret_val; +} + +/** + * ixgbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement + * + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +STATIC s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ + if ((!(adv_reg)) || (!(lp_reg))) { + ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED, + "Local or link partner's advertised flow control " + "settings are NULL. Local: %x, link partner: %x\n", + adv_reg, lp_reg); + return IXGBE_ERR_FC_NOT_NEGOTIATED; + } + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == ixgbe_fc_full) { + hw->fc.current_mode = ixgbe_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_rx_pause; + DEBUGOUT("Flow Control=RX PAUSE frames only\n"); + } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_tx_pause; + DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_rx_pause; + DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber + * @hw: pointer to hardware structure + * + * Enable flow control according on 1 gig fiber. + **/ +STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) +{ + u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + + /* + * On multispeed fiber at 1g, bail out if + * - link is up but AN did not complete, or if + * - link is up and AN completed but timed out + */ + + linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || + (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { + DEBUGOUT("Auto-Negotiation did not complete or timed out\n"); + goto out; + } + + pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + + ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, + pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE, + IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE); + +out: + return ret_val; +} + +/** + * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) +{ + u32 links2, anlp1_reg, autoc_reg, links; + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + + /* + * On backplane, bail out if + * - backplane autoneg was not completed, or if + * - we are 82599 and link partner is not AN enabled + */ + links = IXGBE_READ_REG(hw, IXGBE_LINKS); + if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + goto out; + } + + if (hw->mac.type == ixgbe_mac_82599EB) { + links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); + if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { + DEBUGOUT("Link partner is not AN enabled\n"); + goto out; + } + } + /* + * Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly + */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + + ret_val = ixgbe_negotiate_fc(hw, autoc_reg, + anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, + IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); + +out: + return ret_val; +} + +/** + * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) +{ + u16 technology_ability_reg = 0; + u16 lp_technology_ability_reg = 0; + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &technology_ability_reg); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &lp_technology_ability_reg); + + return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, + (u32)lp_technology_ability_reg, + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); +} + +/** + * ixgbe_fc_autoneg - Configure flow control + * @hw: pointer to hardware structure + * + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. + **/ +void ixgbe_fc_autoneg(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + ixgbe_link_speed speed; + bool link_up; + + DEBUGFUNC("ixgbe_fc_autoneg"); + + /* + * AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } + + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ + case ixgbe_media_type_fiber_qsfp: + case ixgbe_media_type_fiber: + if (speed == IXGBE_LINK_SPEED_1GB_FULL) + ret_val = ixgbe_fc_autoneg_fiber(hw); + break; + + /* Autoneg flow control on backplane adapters */ + case ixgbe_media_type_backplane: + ret_val = ixgbe_fc_autoneg_backplane(hw); + break; + + /* Autoneg flow control on copper adapters */ + case ixgbe_media_type_copper: + if (ixgbe_device_supports_autoneg_fc(hw)) + ret_val = ixgbe_fc_autoneg_copper(hw); + break; + + default: + break; + } + +out: + if (ret_val == IXGBE_SUCCESS) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/* + * ixgbe_pcie_timeout_poll - Return number of times to poll for completion + * @hw: pointer to hardware structure + * + * System-wide timeout range is encoded in PCIe Device Control2 register. + * + * Add 10% to specified maximum and return the number of times to poll for + * completion timeout, in units of 100 microsec. Never return less than + * 800 = 80 millisec. + */ +STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) +{ + s16 devctl2; + u32 pollcnt; + + devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); + devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; + + switch (devctl2) { + case IXGBE_PCIDEVCTRL2_65_130ms: + pollcnt = 1300; /* 130 millisec */ + break; + case IXGBE_PCIDEVCTRL2_260_520ms: + pollcnt = 5200; /* 520 millisec */ + break; + case IXGBE_PCIDEVCTRL2_1_2s: + pollcnt = 20000; /* 2 sec */ + break; + case IXGBE_PCIDEVCTRL2_4_8s: + pollcnt = 80000; /* 8 sec */ + break; + case IXGBE_PCIDEVCTRL2_17_34s: + pollcnt = 34000; /* 34 sec */ + break; + case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ + case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ + case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ + case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ + default: + pollcnt = 800; /* 80 millisec minimum */ + break; + } + + /* add 10% to spec maximum */ + return (pollcnt * 11) / 10; +} + +/** + * ixgbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS + * is returned signifying master requests disabled. + **/ +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 i, poll; + u16 value; + + DEBUGFUNC("ixgbe_disable_pcie_master"); + + /* Always set this bit to ensure any future transactions are blocked */ + IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); + + /* Exit if master requests are blocked */ + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || + IXGBE_REMOVED(hw->hw_addr)) + goto out; + + /* Poll for master request bit to clear */ + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + usec_delay(100); + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) + goto out; + } + + /* + * Two consecutive resets are required via CTRL.RST per datasheet + * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine + * of this need. The first reset prevents new master requests from + * being issued by our device. We then must wait 1usec or more for any + * remaining completions from the PCIe bus to trickle in, and then reset + * again to clear out any effects they may have had on our device. + */ + DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); + hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + + if (hw->mac.type >= ixgbe_mac_X550) + goto out; + + /* + * Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ + poll = ixgbe_pcie_timeout_poll(hw); + for (i = 0; i < poll; i++) { + usec_delay(100); + value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); + if (IXGBE_REMOVED(hw->hw_addr)) + goto out; + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + goto out; + } + + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PCIe transaction pending bit also did not clear.\n"); + status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + +out: + return status; +} + +/** + * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) +{ + u32 gssr = 0; + u32 swmask = mask; + u32 fwmask = mask << 5; + u32 timeout = 200; + u32 i; + + DEBUGFUNC("ixgbe_acquire_swfw_sync"); + + for (i = 0; i < timeout; i++) { + /* + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ixgbe_get_eeprom_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + if (!(gssr & (fwmask | swmask))) { + gssr |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + ixgbe_release_eeprom_semaphore(hw); + return IXGBE_SUCCESS; + } else { + /* Resource is currently in use by FW or SW */ + ixgbe_release_eeprom_semaphore(hw); + msec_delay(5); + } + } + + /* If time expired clear the bits holding the lock and retry */ + if (gssr & (fwmask | swmask)) + ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); + + msec_delay(5); + return IXGBE_ERR_SWFW_SYNC; +} + +/** + * ixgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) +{ + u32 gssr; + u32 swmask = mask; + + DEBUGFUNC("ixgbe_release_swfw_sync"); + + ixgbe_get_eeprom_semaphore(hw); + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + gssr &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + + ixgbe_release_eeprom_semaphore(hw); +} + +/** + * ixgbe_disable_sec_rx_path_generic - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path and waits for the HW to internally empty + * the Rx security block + **/ +s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw) +{ +#define IXGBE_MAX_SECRX_POLL 40 + + int i; + int secrxreg; + + DEBUGFUNC("ixgbe_disable_sec_rx_path_generic"); + + + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg |= IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); + if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) + break; + else + /* Use interrupt-safe sleep just in case */ + usec_delay(1000); + } + + /* For informational purposes only */ + if (i >= IXGBE_MAX_SECRX_POLL) + DEBUGOUT("Rx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + return IXGBE_SUCCESS; +} + +/** + * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @reg_val: Value we read from AUTOC + * + * The default case requires no protection so just to the register read. + */ +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) +{ + *locked = false; + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); + return IXGBE_SUCCESS; +} + +/** + * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write + * @hw: pointer to hardware structure + * @reg_val: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by + * previous read. + * + * The default case requires no protection so just to the register write. + */ +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) +{ + UNREFERENCED_1PARAMETER(locked); + + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_sec_rx_path_generic - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path. + **/ +s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw) +{ + int secrxreg; + + DEBUGFUNC("ixgbe_enable_sec_rx_path_generic"); + + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit + **/ +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) +{ + DEBUGFUNC("ixgbe_enable_rx_dma_generic"); + + if (regval & IXGBE_RXCTRL_RXEN) + ixgbe_enable_rx(hw); + else + ixgbe_disable_rx(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_blink_led_start_generic - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + **/ +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) +{ + ixgbe_link_speed speed = 0; + bool link_up = 0; + u32 autoc_reg = 0; + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 ret_val = IXGBE_SUCCESS; + bool locked = false; + + DEBUGFUNC("ixgbe_blink_led_start_generic"); + + /* + * Link must be up to auto-blink the LEDs; + * Force it if link is down. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, false); + + if (!link_up) { + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); + if (ret_val != IXGBE_SUCCESS) + goto out; + + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + autoc_reg |= IXGBE_AUTOC_FLU; + + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); + if (ret_val != IXGBE_SUCCESS) + goto out; + + IXGBE_WRITE_FLUSH(hw); + msec_delay(10); + } + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + +out: + return ret_val; +} + +/** + * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + **/ +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 autoc_reg = 0; + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 ret_val = IXGBE_SUCCESS; + bool locked = false; + + DEBUGFUNC("ixgbe_blink_led_stop_generic"); + + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); + if (ret_val != IXGBE_SUCCESS) + goto out; + + autoc_reg &= ~IXGBE_AUTOC_FLU; + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); + if (ret_val != IXGBE_SUCCESS) + goto out; + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg &= ~IXGBE_LED_BLINK(index); + led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + +out: + return ret_val; +} + +/** + * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_offset: SAN MAC address offset + * + * This function will read the EEPROM location for the SAN MAC address + * pointer, and returns the value at that location. This is used in both + * get and set mac_addr routines. + **/ +STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, + u16 *san_mac_offset) +{ + s32 ret_val; + + DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. + */ + ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, + san_mac_offset); + if (ret_val) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom at offset %d failed", + IXGBE_SAN_MAC_ADDR_PTR); + } + + return ret_val; +} + +/** + * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + * set_lan_id() is called by identify_sfp(), but this cannot be relied + * upon for non-SFP connections, so we must call it here. + **/ +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + u16 san_mac_data, san_mac_offset; + u8 i; + s32 ret_val; + + DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. If they're not, no point in calling set_lan_id() here. + */ + ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) + goto san_mac_addr_out; + + /* make sure we know which port we need to program */ + hw->mac.ops.set_lan_id(hw); + /* apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + for (i = 0; i < 3; i++) { + ret_val = hw->eeprom.ops.read(hw, san_mac_offset, + &san_mac_data); + if (ret_val) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + san_mac_offset); + goto san_mac_addr_out; + } + san_mac_addr[i * 2] = (u8)(san_mac_data); + san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); + san_mac_offset++; + } + return IXGBE_SUCCESS; + +san_mac_addr_out: + /* + * No addresses available in this EEPROM. It's not an + * error though, so just wipe the local address and return. + */ + for (i = 0; i < 6; i++) + san_mac_addr[i] = 0xFF; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Write a SAN MAC address to the EEPROM. + **/ +s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + s32 ret_val; + u16 san_mac_data, san_mac_offset; + u8 i; + + DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); + + /* Look for SAN mac address pointer. If not defined, return */ + ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) + return IXGBE_ERR_NO_SAN_ADDR_PTR; + + /* Make sure we know which port we need to write */ + hw->mac.ops.set_lan_id(hw); + /* Apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + + for (i = 0; i < 3; i++) { + san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); + san_mac_data |= (u16)(san_mac_addr[i * 2]); + hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); + san_mac_offset++; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) +{ + u16 msix_count = 1; + u16 max_msix_count; + u16 pcie_offset; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; + break; + default: + return msix_count; + } + + DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); + msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset); + if (IXGBE_REMOVED(hw->hw_addr)) + msix_count = 0; + msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > max_msix_count) + msix_count = max_msix_count; + + return msix_count; +} + +/** + * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; + u32 first_empty_rar = NO_EMPTY_RAR_FOUND; + u32 rar; + u32 rar_low, rar_high; + u32 addr_low, addr_high; + + DEBUGFUNC("ixgbe_insert_mac_addr_generic"); + + /* swap bytes for HW little endian */ + addr_low = addr[0] | (addr[1] << 8) + | (addr[2] << 16) + | (addr[3] << 24); + addr_high = addr[4] | (addr[5] << 8); + + /* + * Either find the mac_id in rar or find the first empty space. + * rar_highwater points to just after the highest currently used + * rar in order to shorten the search. It grows when we add a new + * rar to the top. + */ + for (rar = 0; rar < hw->mac.rar_highwater; rar++) { + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + + if (((IXGBE_RAH_AV & rar_high) == 0) + && first_empty_rar == NO_EMPTY_RAR_FOUND) { + first_empty_rar = rar; + } else if ((rar_high & 0xFFFF) == addr_high) { + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); + if (rar_low == addr_low) + break; /* found it already in the rars */ + } + } + + if (rar < hw->mac.rar_highwater) { + /* already there so just add to the pool bits */ + ixgbe_set_vmdq(hw, rar, vmdq); + } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { + /* stick it into first empty RAR slot we found */ + rar = first_empty_rar; + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + } else if (rar == hw->mac.rar_highwater) { + /* add it to the top of the list and inc the highwater mark */ + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + hw->mac.rar_highwater++; + } else if (rar >= hw->mac.num_rar_entries) { + return IXGBE_ERR_INVALID_MAC_ADDR; + } + + /* + * If we found rar[0], make sure the default pool bit (we use pool 0) + * remains cleared to be sure default pool packets will get delivered + */ + if (rar == 0) + ixgbe_clear_vmdq(hw, rar, 0); + + return rar; +} + +/** + * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar_lo, mpsar_hi; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_clear_vmdq_generic"); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + + if (IXGBE_REMOVED(hw->hw_addr)) + goto done; + + if (!mpsar_lo && !mpsar_hi) + goto done; + + if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { + if (mpsar_lo) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); + mpsar_lo = 0; + } + if (mpsar_hi) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); + mpsar_hi = 0; + } + } else if (vmdq < 32) { + mpsar_lo &= ~(1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); + } else { + mpsar_hi &= ~(1 << (vmdq - 32)); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); + } + + /* was that the last pool using this rar? */ + if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) + hw->mac.ops.clear_rar(hw, rar); +done: + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + **/ +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_set_vmdq_generic"); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + if (vmdq < 32) { + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar |= 1 << vmdq; + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); + } else { + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + mpsar |= 1 << (vmdq - 32); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); + } + return IXGBE_SUCCESS; +} + +/** + * This function should only be involved in the IOV mode. + * In IOV mode, Default pool is next pool after the number of + * VFs advertized and not 0. + * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] + * + * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @vmdq: VMDq pool index + **/ +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) +{ + u32 rar = hw->mac.san_mac_rar_index; + + DEBUGFUNC("ixgbe_set_vmdq_san_mac"); + + if (vmdq < 32) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); + } else { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array + * @hw: pointer to hardware structure + **/ +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) +{ + int i; + + DEBUGFUNC("ixgbe_init_uta_tables_generic"); + DEBUGOUT(" Clearing UTA\n"); + + for (i = 0; i < 128; i++) + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * + * return the VLVF index where this VLAN id should be placed + * + **/ +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) +{ + u32 bits = 0; + u32 first_empty_slot = 0; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* + * Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way + */ + for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { + bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); + if (!bits && !(first_empty_slot)) + first_empty_slot = regindex; + else if ((bits & 0x0FFF) == vlan) + break; + } + + /* + * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan + * in the VLVF. Else use the first empty VLVF register for this + * vlan id. + */ + if (regindex >= IXGBE_VLVF_ENTRIES) { + if (first_empty_slot) + regindex = first_empty_slot; + else { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, + "No space in VLVF.\n"); + regindex = IXGBE_ERR_NO_SPACE; + } + } + + return regindex; +} + +/** + * ixgbe_set_vfta_generic - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + s32 regindex; + u32 bitindex; + u32 vfta; + u32 targetbit; + s32 ret_val = IXGBE_SUCCESS; + bool vfta_changed = false; + + DEBUGFUNC("ixgbe_set_vfta_generic"); + + if (vlan > 4095) + return IXGBE_ERR_PARAM; + + /* + * this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); + + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + + /* Part 2 + * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF + */ + ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, + &vfta_changed); + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + if (vfta_changed) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_vlvf_generic - Set VLAN Pool Filter + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vfta_changed: pointer to boolean flag which indicates whether VFTA + * should be changed + * + * Turn on/off specified bit in VLVF table. + **/ +s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed) +{ + u32 vt; + + DEBUGFUNC("ixgbe_set_vlvf_generic"); + + if (vlan > 4095) + return IXGBE_ERR_PARAM; + + /* If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + if (vt & IXGBE_VT_CTL_VT_ENABLE) { + s32 vlvf_index; + u32 bits; + + vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); + if (vlvf_index < 0) + return vlvf_index; + + if (vlan_on) { + /* set the pool bit */ + if (vind < 32) { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB(vlvf_index * 2)); + bits |= (1 << vind); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB(vlvf_index * 2), + bits); + } else { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB((vlvf_index * 2) + 1)); + bits |= (1 << (vind - 32)); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB((vlvf_index * 2) + 1), + bits); + } + } else { + /* clear the pool bit */ + if (vind < 32) { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB(vlvf_index * 2)); + bits &= ~(1 << vind); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB(vlvf_index * 2), + bits); + bits |= IXGBE_READ_REG(hw, + IXGBE_VLVFB((vlvf_index * 2) + 1)); + } else { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB((vlvf_index * 2) + 1)); + bits &= ~(1 << (vind - 32)); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB((vlvf_index * 2) + 1), + bits); + bits |= IXGBE_READ_REG(hw, + IXGBE_VLVFB(vlvf_index * 2)); + } + } + + /* + * If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + if (bits) { + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), + (IXGBE_VLVF_VIEN | vlan)); + if ((!vlan_on) && (vfta_changed != NULL)) { + /* someone wants to clear the vfta entry + * but some pools/VFs are still using it. + * Ignore it. */ + *vfta_changed = false; + } + } else + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) +{ + u32 offset; + + DEBUGFUNC("ixgbe_clear_vfta_generic"); + + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { + IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_check_mac_link_generic - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 links_reg, links_orig; + u32 i; + + DEBUGFUNC("ixgbe_check_mac_link_generic"); + + /* clear the old state */ + links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + + if (links_orig != links_reg) { + DEBUGOUT2("LINKS changed from %08X to %08X\n", + links_orig, links_reg); + } + + if (link_up_wait_to_complete) { + for (i = 0; i < hw->mac.max_link_up_time; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msec_delay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { + if (links_reg & IXGBE_LINKS_UP) + *link_up = true; + else + *link_up = false; + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from + * the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + u16 offset, caps; + u16 alt_san_mac_blk_offset; + + DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); + + /* clear output first */ + *wwnn_prefix = 0xFFFF; + *wwpn_prefix = 0xFFFF; + + /* check if alternative SAN MAC is supported */ + offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; + if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) + goto wwn_prefix_err; + + if ((alt_san_mac_blk_offset == 0) || + (alt_san_mac_blk_offset == 0xFFFF)) + goto wwn_prefix_out; + + /* check capability in alternative san mac address block */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; + if (hw->eeprom.ops.read(hw, offset, &caps)) + goto wwn_prefix_err; + if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) + goto wwn_prefix_out; + + /* get the corresponding prefix for WWNN/WWPN */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; + if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + } + + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; + if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) + goto wwn_prefix_err; + +wwn_prefix_out: + return IXGBE_SUCCESS; + +wwn_prefix_err: + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM + * @hw: pointer to hardware structure + * @bs: the fcoe boot status + * + * This function will read the FCOE boot status from the iSCSI FCOE block + **/ +s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) +{ + u16 offset, caps, flags; + s32 status; + + DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); + + /* clear output first */ + *bs = ixgbe_fcoe_bootstatus_unavailable; + + /* check if FCOE IBA block is present */ + offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; + status = hw->eeprom.ops.read(hw, offset, &caps); + if (status != IXGBE_SUCCESS) + goto out; + + if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) + goto out; + + /* check if iSCSI FCOE block is populated */ + status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); + if (status != IXGBE_SUCCESS) + goto out; + + if ((offset == 0) || (offset == 0xFFFF)) + goto out; + + /* read fcoe flags in iSCSI FCOE block */ + offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; + status = hw->eeprom.ops.read(hw, offset, &flags); + if (status != IXGBE_SUCCESS) + goto out; + + if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) + *bs = ixgbe_fcoe_bootstatus_enabled; + else + *bs = ixgbe_fcoe_bootstatus_disabled; + +out: + return status; +} + +/** + * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for anti-spoofing + * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * + **/ +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) +{ + int j; + int pf_target_reg = pf >> 3; + int pf_target_shift = pf % 8; + u32 pfvfspoof = 0; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + if (enable) + pfvfspoof = IXGBE_SPOOF_MACAS_MASK; + + /* + * PFVFSPOOF register array is size 8 with 8 bits assigned to + * MAC anti-spoof enables in each register array element. + */ + for (j = 0; j < pf_target_reg; j++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); + + /* + * The PF should be allowed to spoof so that it can support + * emulation mode NICs. Do not set the bits assigned to the PF + */ + pfvfspoof &= (1 << pf_target_shift) - 1; + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); + + /* + * Remaining pools belong to the PF so they do not need to have + * anti-spoofing enabled. + */ + for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0); +} + +/** + * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; + u32 pfvfspoof; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= (1 << vf_target_shift); + else + pfvfspoof &= ~(1 << vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** + * ixgbe_get_device_caps_generic - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities + * + * This function will read the EEPROM location for the device capabilities, + * and return the word through device_caps. + **/ +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) +{ + DEBUGFUNC("ixgbe_get_device_caps_generic"); + + hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering + * @hw: pointer to hardware structure + * + **/ +void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw) +{ + u32 regval; + u32 i; + + DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2"); + + /* Enable relaxed ordering */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); + regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); + } + + for (i = 0; i < hw->mac.max_rx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + +} + +/** + * ixgbe_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + DEBUGFUNC("ixgbe_calculate_checksum"); + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * ixgbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + * + * Communicates with the manageability block. On success return IXGBE_SUCCESS + * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + u32 hicr, i, bi, fwsts; + u32 hdr_size = sizeof(struct ixgbe_hic_hdr); + u16 buf_len; + u16 dword_len; + + DEBUGFUNC("ixgbe_host_interface_command"); + + if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + /* Set bit 9 of FWSTS clearing FW reset indication */ + fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); + IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); + + /* Check that the host interface is enabled. */ + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if ((hicr & IXGBE_HICR_EN) == 0) { + DEBUGOUT("IXGBE_HOST_EN bit disabled.\n"); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + DEBUGOUT("Buffer length failure, not aligned to dword"); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + dword_len = length >> 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, + i, IXGBE_CPU_TO_LE32(buffer[i])); + + /* Setting this bit tells the ARC that a new command is pending. */ + IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); + + for (i = 0; i < timeout; i++) { + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if (!(hicr & IXGBE_HICR_C)) + break; + msec_delay(1); + } + + /* Check command completion */ + if ((timeout != 0 && i == timeout) || + !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { + ERROR_REPORT1(IXGBE_ERROR_CAUTION, + "Command has failed with no status valid.\n"); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + if (!return_data) + return 0; + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + IXGBE_LE32_TO_CPUS(&buffer[bi]); + } + + /* If there is any thing in data position pull it in */ + buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; + if (buf_len == 0) + return 0; + + if (length < buf_len + hdr_size) { + DEBUGOUT("Buffer not large enough for reply message.\n"); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + IXGBE_LE32_TO_CPUS(&buffer[bi]); + } + + return 0; +} + +/** + * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * + * Sends driver version number to firmware through the manageability + * block. On success return IXGBE_SUCCESS + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub) +{ + struct ixgbe_hic_drv_info fw_cmd; + int i; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_fw_drv_ver_generic"); + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) + != IXGBE_SUCCESS) { + ret_val = IXGBE_ERR_SWFW_SYNC; + goto out; + } + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + fw_cmd.pad = 0; + fw_cmd.pad2 = 0; + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val != IXGBE_SUCCESS) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = IXGBE_SUCCESS; + else + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); +out: + return ret_val; +} + +/** + * ixgbe_set_rxpba_generic - Initialize Rx packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + int i = 0; + u32 rxpktsize, txpktsize, txpbthresh; + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number of packet + * buffers requested using supplied strategy. + */ + switch (strategy) { + case PBA_STRATEGY_WEIGHTED: + /* ixgbe_dcb_pba_80_48 strategy weight first half of packet + * buffer with 5/8 of the packet buffer space. + */ + rxpktsize = (pbsize * 5) / (num_pb * 4); + pbsize -= rxpktsize * (num_pb / 2); + rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; + for (; i < (num_pb / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + /* Fall through to configure remaining packet buffers */ + case PBA_STRATEGY_EQUAL: + rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; + for (; i < num_pb; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + break; + default: + break; + } + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; + txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; + for (i = 0; i < num_pb; i++) { + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); + } + + /* Clear unused TCs, if any, to zero buffer size*/ + for (; i < IXGBE_MAX_PB; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); + } +} + +/** + * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo + * @hw: pointer to the hardware structure + * + * The 82599 and x540 MACs can experience issues if TX work is still pending + * when a reset occurs. This function prevents this by flushing the PCIe + * buffers on the system. + **/ +void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) +{ + u32 gcr_ext, hlreg0, i, poll; + u16 value; + + /* + * If double reset is not requested then all transactions should + * already be clear and as such there is no work to do + */ + if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) + return; + + /* + * Set loopback enable to prevent any transmits from being sent + * should the link come up. This assumes that the RXCTRL.RXEN bit + * has already been cleared. + */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); + + /* Wait for a last completion before clearing buffers */ + IXGBE_WRITE_FLUSH(hw); + msec_delay(3); + + /* + * Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ + poll = ixgbe_pcie_timeout_poll(hw); + for (i = 0; i < poll; i++) { + usec_delay(100); + value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); + if (IXGBE_REMOVED(hw->hw_addr)) + goto out; + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + goto out; + } + +out: + /* initiate cleaning flow for buffers in the PCIe transaction layer */ + gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, + gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); + + /* Flush all writes and allow 20usec for all transactions to clear */ + IXGBE_WRITE_FLUSH(hw); + usec_delay(20); + + /* restore previous register values */ + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); +} + +STATIC const u8 ixgbe_emc_temp_data[4] = { + IXGBE_EMC_INTERNAL_DATA, + IXGBE_EMC_DIODE1_DATA, + IXGBE_EMC_DIODE2_DATA, + IXGBE_EMC_DIODE3_DATA +}; +STATIC const u8 ixgbe_emc_therm_limit[4] = { + IXGBE_EMC_INTERNAL_THERM_LIMIT, + IXGBE_EMC_DIODE1_THERM_LIMIT, + IXGBE_EMC_DIODE2_THERM_LIMIT, + IXGBE_EMC_DIODE3_THERM_LIMIT +}; + +/** + * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * @data: pointer to the thermal sensor data structure + * + * Returns the thermal sensor data structure + **/ +s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 i; + struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic"); + + /* Only support thermal sensors attached to 82599 physical port 0 */ + if ((hw->mac.type != ixgbe_mac_82599EB) || + (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + + status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset); + if (status) + goto out; + + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + + status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg); + if (status) + goto out; + + if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT) + != IXGBE_ETS_TYPE_EMC) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + + num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); + if (num_sensors > IXGBE_MAX_SENSORS) + num_sensors = IXGBE_MAX_SENSORS; + + for (i = 0; i < num_sensors; i++) { + status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), + &ets_sensor); + if (status) + goto out; + + sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> + IXGBE_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> + IXGBE_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) { + status = hw->phy.ops.read_i2c_byte(hw, + ixgbe_emc_temp_data[sensor_index], + IXGBE_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + if (status) + goto out; + } + } +out: + return status; +} + +/** + * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u16 offset; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 therm_limit; + u8 i; + struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic"); + + memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); + + /* Only support thermal sensors attached to 82599 physical port 0 */ + if ((hw->mac.type != ixgbe_mac_82599EB) || + (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) + return IXGBE_NOT_IMPLEMENTED; + + offset = IXGBE_ETS_CFG; + if (hw->eeprom.ops.read(hw, offset, &ets_offset)) + goto eeprom_err; + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return IXGBE_NOT_IMPLEMENTED; + + offset = ets_offset; + if (hw->eeprom.ops.read(hw, offset, &ets_cfg)) + goto eeprom_err; + if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT) + != IXGBE_ETS_TYPE_EMC) + return IXGBE_NOT_IMPLEMENTED; + + low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> + IXGBE_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); + + for (i = 0; i < num_sensors; i++) { + offset = ets_offset + 1 + i; + if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + offset); + continue; + } + sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> + IXGBE_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> + IXGBE_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + ixgbe_emc_therm_limit[sensor_index], + IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); + + if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } + } + return status; + +eeprom_err: + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", offset); + return IXGBE_NOT_IMPLEMENTED; +} + + +/** + * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg + * @hw: pointer to hardware structure + * @map: pointer to u8 arr for returning map + * + * Read the rtrup2tc HW register and resolve its content into map + **/ +void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map) +{ + u32 reg, i; + + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) + map[i] = IXGBE_RTRUP2TC_UP_MASK & + (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); + return; +} + +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) +{ + u32 pfdtxgswc; + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + if (hw->mac.type != ixgbe_mac_82598EB) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + } + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } +} + +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) +{ + u32 pfdtxgswc; + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); + + if (hw->mac.type != ixgbe_mac_82598EB) { + if (hw->mac.set_lben) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = false; + } + } +} + +/** + * ixgbe_mng_present - returns true when management capability is present + * @hw: pointer to hardware structure + */ +bool ixgbe_mng_present(struct ixgbe_hw *hw) +{ + u32 fwsm; + + if (hw->mac.type < ixgbe_mac_82599EB) + return false; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); + fwsm &= IXGBE_FWSM_MODE_MASK; + return fwsm == IXGBE_FWSM_FW_MODE_PT; +} + +/** + * ixgbe_mng_enabled - Is the manageability engine enabled? + * @hw: pointer to hardware structure + * + * Returns true if the manageability engine is enabled. + **/ +bool ixgbe_mng_enabled(struct ixgbe_hw *hw) +{ + u32 fwsm, manc, factps; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); + if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) + return false; + + manc = IXGBE_READ_REG(hw, IXGBE_MANC); + if (!(manc & IXGBE_MANC_RCV_TCO_EN)) + return false; + + if (hw->mac.type <= ixgbe_mac_X540) { + factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); + if (factps & IXGBE_FACTPS_MNGCG) + return false; + } + + return true; +} + +/** + * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the MAC and/or PHY register and restarts link. + **/ +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; + s32 status = IXGBE_SUCCESS; + u32 speedcnt = 0; + u32 i = 0; + bool autoneg, link_up = false; + + DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); + + /* Mask off requested but non-supported speeds */ + status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg); + if (status != IXGBE_SUCCESS) + return status; + + speed &= link_speed; + + /* Try each speed one by one, highest priority first. We do this in + * software because 10Gb fiber doesn't support speed autonegotiation. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) + return status; + + if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) + goto out; + + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + ixgbe_set_rate_select_speed(hw, + IXGBE_LINK_SPEED_10GB_FULL); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects MAC link speed */ + break; + default: + DEBUGOUT("Unexpected media type.\n"); + break; + } + + /* Allow module to change analog characteristics (1G->10G) */ + msec_delay(40); + + status = ixgbe_setup_mac_link(hw, + IXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + return status; + + /* Flap the Tx laser if it has not already been done */ + ixgbe_flap_tx_laser(hw); + + /* Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. 82599 uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, + &link_up, false); + if (status != IXGBE_SUCCESS) + return status; + + if (link_up) + goto out; + } + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + speedcnt++; + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) + return status; + + if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) + goto out; + + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + ixgbe_set_rate_select_speed(hw, + IXGBE_LINK_SPEED_1GB_FULL); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects link speed */ + break; + default: + DEBUGOUT("Unexpected media type.\n"); + break; + } + + /* Allow module to change analog characteristics (10G->1G) */ + msec_delay(40); + + status = ixgbe_setup_mac_link(hw, + IXGBE_LINK_SPEED_1GB_FULL, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + return status; + + /* Flap the Tx laser if it has not already been done */ + ixgbe_flap_tx_laser(hw); + + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) + return status; + + if (link_up) + goto out; + } + + /* We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = ixgbe_setup_mac_link_multispeed_fiber(hw, + highest_link_speed, + autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + return status; +} + +/** + * ixgbe_set_soft_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via the soft rate select. + */ +void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + s32 status; + u8 rs, eeprom_data; + + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + /* one bit mask same as setting on */ + rs = IXGBE_SFF_SOFT_RS_SELECT_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + rs = IXGBE_SFF_SOFT_RS_SELECT_1G; + break; + default: + DEBUGOUT("Invalid fixed module speed\n"); + return; + } + + /* Set RS0 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + DEBUGOUT("Failed to read Rx Rate Select RS0\n"); + goto out; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + DEBUGOUT("Failed to write Rx Rate Select RS0\n"); + goto out; + } + + /* Set RS1 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + DEBUGOUT("Failed to read Rx Rate Select RS1\n"); + goto out; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + DEBUGOUT("Failed to write Rx Rate Select RS1\n"); + goto out; + } +out: + return; +} diff --git a/drivers/net/ixgbe/base/ixgbe_common.h b/drivers/net/ixgbe/base/ixgbe_common.h new file mode 100644 index 00000000..fd67a889 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_common.h @@ -0,0 +1,188 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _IXGBE_COMMON_H_ +#define _IXGBE_COMMON_H_ + +#include "ixgbe_type.h" +#define IXGBE_WRITE_REG64(hw, reg, value) \ + do { \ + IXGBE_WRITE_REG(hw, reg, (u32) value); \ + IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \ + } while (0) +#define IXGBE_REMOVED(a) (0) +struct ixgbe_pba { + u16 word[2]; + u16 *pba_block; +}; + +void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map); + +u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num); +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 max_pba_block_size, + struct ixgbe_pba *pba); +s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, struct ixgbe_pba *pba); +s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf, + u32 eeprom_buf_size, u16 *pba_block_size); +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); +void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status); +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); + +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data); +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val); +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); + +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, + ixgbe_mc_addr_itr func, bool clear); +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func); +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); +s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw); + +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); +bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw); + +s32 ixgbe_validate_mac_addr(u8 *mac_addr); +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); + +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); + +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); + +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on); +s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool *vfta_changed); +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan); + +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); + +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); + +s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs); +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, + int strategy); +void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw); +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver); +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); + +void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); + +extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); +extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); +bool ixgbe_mng_present(struct ixgbe_hw *hw); +bool ixgbe_mng_enabled(struct ixgbe_hw *hw); + +#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define IXGBE_EMC_INTERNAL_DATA 0x00 +#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define IXGBE_EMC_DIODE1_DATA 0x01 +#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define IXGBE_EMC_DIODE2_DATA 0x23 +#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A +#define IXGBE_EMC_DIODE3_DATA 0x2A +#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30 + +s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed); +#endif /* IXGBE_COMMON */ diff --git a/drivers/net/ixgbe/base/ixgbe_dcb.c b/drivers/net/ixgbe/base/ixgbe_dcb.c new file mode 100644 index 00000000..9a6a5085 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_dcb.c @@ -0,0 +1,727 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + + +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82598.h" +#include "ixgbe_dcb_82599.h" + +/** + * ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class + * credits from the configured bandwidth percentages. Credits + * are the smallest unit programmable into the underlying + * hardware. The IEEE 802.1Qaz specification do not use bandwidth + * groups so this is much simplified from the CEE case. + */ +s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max, + int max_frame_size) +{ + int min_percent = 100; + int min_credit, multiplier; + int i; + + min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) / + IXGBE_DCB_CREDIT_QUANTUM; + + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if (bw[i] < min_percent && bw[i]) + min_percent = bw[i]; + } + + multiplier = (min_credit / min_percent) + 1; + + /* Find out the hw credits for each TC */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL); + + if (val < min_credit) + val = min_credit; + refill[i] = (u16)val; + + max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit; + } + + return 0; +} + +/** + * ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits + * @ixgbe_dcb_config: Struct containing DCB settings. + * @direction: Configuring either Tx or Rx. + * + * This function calculates the credits allocated to each traffic class. + * It should be called only after the rules are checked by + * ixgbe_dcb_check_config_cee(). + */ +s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config, + u32 max_frame_size, u8 direction) +{ + struct ixgbe_dcb_tc_path *p; + u32 min_multiplier = 0; + u16 min_percent = 100; + s32 ret_val = IXGBE_SUCCESS; + /* Initialization values default for Tx settings */ + u32 min_credit = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u16 link_percentage = 0; + u8 bw_percent = 0; + u8 i; + + if (dcb_config == NULL) { + ret_val = IXGBE_ERR_CONFIG; + goto out; + } + + min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) / + IXGBE_DCB_CREDIT_QUANTUM; + + /* Find smallest link percentage */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + link_percentage = p->bwg_percent; + + link_percentage = (link_percentage * bw_percent) / 100; + + if (link_percentage && link_percentage < min_percent) + min_percent = link_percentage; + } + + /* + * The ratio between traffic classes will control the bandwidth + * percentages seen on the wire. To calculate this ratio we use + * a multiplier. It is required that the refill credits must be + * larger than the max frame size so here we find the smallest + * multiplier that will allow all bandwidth percentages to be + * greater than the max frame size. + */ + min_multiplier = (min_credit / min_percent) + 1; + + /* Find out the link percentage for each TC first */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + + link_percentage = p->bwg_percent; + /* Must be careful of integer division for very small nums */ + link_percentage = (link_percentage * bw_percent) / 100; + if (p->bwg_percent > 0 && link_percentage == 0) + link_percentage = 1; + + /* Save link_percentage for reference */ + p->link_percent = (u8)link_percentage; + + /* Calculate credit refill ratio using multiplier */ + credit_refill = min(link_percentage * min_multiplier, + (u32)IXGBE_DCB_MAX_CREDIT_REFILL); + + /* Refill at least minimum credit */ + if (credit_refill < min_credit) + credit_refill = min_credit; + + p->data_credits_refill = (u16)credit_refill; + + /* Calculate maximum credit for the TC */ + credit_max = (link_percentage * IXGBE_DCB_MAX_CREDIT) / 100; + + /* + * Adjustment based on rule checking, if the percentage + * of a TC is too small, the maximum credit may not be + * enough to send out a jumbo frame in data plane arbitration. + */ + if (credit_max < min_credit) + credit_max = min_credit; + + if (direction == IXGBE_DCB_TX_CONFIG) { + /* + * Adjustment based on rule checking, if the + * percentage of a TC is too small, the maximum + * credit may not be enough to send out a TSO + * packet in descriptor plane arbitration. + */ + if (credit_max && (credit_max < + IXGBE_DCB_MIN_TSO_CREDIT) + && (hw->mac.type == ixgbe_mac_82598EB)) + credit_max = IXGBE_DCB_MIN_TSO_CREDIT; + + dcb_config->tc_config[i].desc_credits_max = + (u16)credit_max; + } + + p->data_credits_max = (u16)credit_max; + } + +out: + return ret_val; +} + +/** + * ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info + * @cfg: dcb configuration to unpack into hardware consumable fields + * @map: user priority to traffic class map + * @pfc_up: u8 to store user priority PFC bitmask + * + * This unpacks the dcb configuration PFC info which is stored per + * traffic class into a 8bit user priority bitmask that can be + * consumed by hardware routines. The priority to tc map must be + * updated before calling this routine to use current up-to maps. + */ +void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up) +{ + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int up; + + /* + * If the TC for this user priority has PFC enabled then set the + * matching bit in 'pfc_up' to reflect that PFC is enabled. + */ + for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) { + if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled) + *pfc_up |= 1 << up; + } +} + +void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction, + u16 *refill) +{ + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + refill[tc] = tc_config[tc].path[direction].data_credits_refill; +} + +void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max) +{ + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + max[tc] = tc_config[tc].desc_credits_max; +} + +void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction, + u8 *bwgid) +{ + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + bwgid[tc] = tc_config[tc].path[direction].bwg_id; +} + +void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction, + u8 *tsa) +{ + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + tsa[tc] = tc_config[tc].path[direction].tsa; +} + +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) +{ + struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + u8 prio_mask = 1 << up; + u8 tc = cfg->num_tcs.pg_tcs; + + /* If tc is 0 then DCB is likely not enabled or supported */ + if (!tc) + goto out; + + /* + * Test from maximum TC to 1 and report the first match we find. If + * we find no match we can assume that the TC is 0 since the TC must + * be set for all user priorities + */ + for (tc--; tc; tc--) { + if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) + break; + } +out: + return tc; +} + +void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction, + u8 *map) +{ + u8 up; + + for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) + map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up); +} + +/** + * ixgbe_dcb_config - Struct containing DCB settings. + * @dcb_config: Pointer to DCB config structure + * + * This function checks DCB rules for DCB settings. + * The following rules are checked: + * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%. + * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth + * Group must total 100. + * 3. A Traffic Class should not be set to both Link Strict Priority + * and Group Strict Priority. + * 4. Link strict Bandwidth Groups can only have link strict traffic classes + * with zero bandwidth. + */ +s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *dcb_config) +{ + struct ixgbe_dcb_tc_path *p; + s32 ret_val = IXGBE_SUCCESS; + u8 i, j, bw = 0, bw_id; + u8 bw_sum[2][IXGBE_DCB_MAX_BW_GROUP]; + bool link_strict[2][IXGBE_DCB_MAX_BW_GROUP]; + + memset(bw_sum, 0, sizeof(bw_sum)); + memset(link_strict, 0, sizeof(link_strict)); + + /* First Tx, then Rx */ + for (i = 0; i < 2; i++) { + /* Check each traffic class for rule violation */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + p = &dcb_config->tc_config[j].path[i]; + + bw = p->bwg_percent; + bw_id = p->bwg_id; + + if (bw_id >= IXGBE_DCB_MAX_BW_GROUP) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + if (p->tsa == ixgbe_dcb_tsa_strict) { + link_strict[i][bw_id] = true; + /* Link strict should have zero bandwidth */ + if (bw) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } else if (!bw) { + /* + * Traffic classes without link strict + * should have non-zero bandwidth. + */ + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + bw_sum[i][bw_id] += bw; + } + + bw = 0; + + /* Check each bandwidth group for rule violation */ + for (j = 0; j < IXGBE_DCB_MAX_BW_GROUP; j++) { + bw += dcb_config->bw_percentage[i][j]; + /* + * Sum of bandwidth percentages of all traffic classes + * within a Bandwidth Group must total 100 except for + * link strict group (zero bandwidth). + */ + if (link_strict[i][j]) { + if (bw_sum[i][j]) { + /* + * Link strict group should have zero + * bandwidth. + */ + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } else if (bw_sum[i][j] != IXGBE_DCB_BW_PERCENT && + bw_sum[i][j] != 0) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } + + if (bw != IXGBE_DCB_BW_PERCENT) { + ret_val = IXGBE_ERR_CONFIG; + goto err_config; + } + } + +err_config: + + return ret_val; +} + +/** + * ixgbe_dcb_get_tc_stats - Returns status of each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the status data for each of the Traffic Classes in use. + */ +s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the CBFC status data for each of the Traffic Classes. + */ +s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Rx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; + + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid, + tsa, map); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, + bwgid, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, + bwgid, tsa); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, + bwgid, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, + bwgid, tsa, + map); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_pfc_cee - Config priority flow control + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Priority Flow Control for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 pfc_en; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_config_tc_stats - Config traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_tc_stats_82598(hw); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL); + break; + default: + break; + } + return ret; +} + +/** + * ixgbe_dcb_hw_config_cee - Config and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + s32 ret = IXGBE_NOT_IMPLEMENTED; + u8 pfc_en; + u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + + /* Unpack CEE standard containers */ + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->link_speed, + refill, max, bwgid, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ixgbe_dcb_config_82599(hw, dcb_config); + ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed, + refill, max, bwgid, + tsa, map); + + ixgbe_dcb_config_tc_stats_82599(hw, dcb_config); + break; + default: + break; + } + + if (!ret && dcb_config->pfc_mode_enable) { + ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); + ret = ixgbe_dcb_config_pfc(hw, pfc_en, map); + } + + return ret; +} + +/* Helper routines to abstract HW specifics from DCB netlink ops */ +s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) +{ + int ret = IXGBE_ERR_PARAM; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); + break; + default: + break; + } + return ret; +} + +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, + u8 *bwg_id, u8 *tsa, u8 *map) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, + tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, + tsa, map); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, + tsa, map); + break; + default: + break; + } + return 0; +} diff --git a/drivers/net/ixgbe/base/ixgbe_dcb.h b/drivers/net/ixgbe/base/ixgbe_dcb.h new file mode 100644 index 00000000..41208049 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_dcb.h @@ -0,0 +1,174 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _IXGBE_DCB_H_ +#define _IXGBE_DCB_H_ + +#include "ixgbe_type.h" + +/* DCB defines */ +/* DCB credit calculation defines */ +#define IXGBE_DCB_CREDIT_QUANTUM 64 +#define IXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */ +#define IXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/ +#define IXGBE_DCB_MAX_CREDIT (2 * IXGBE_DCB_MAX_CREDIT_REFILL) + +/* 513 for 32KB TSO packet */ +#define IXGBE_DCB_MIN_TSO_CREDIT \ + ((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1) + +/* DCB configuration defines */ +#define IXGBE_DCB_MAX_USER_PRIORITY 8 +#define IXGBE_DCB_MAX_BW_GROUP 8 +#define IXGBE_DCB_BW_PERCENT 100 + +#define IXGBE_DCB_TX_CONFIG 0 +#define IXGBE_DCB_RX_CONFIG 1 + +/* DCB capability defines */ +#define IXGBE_DCB_PG_SUPPORT 0x00000001 +#define IXGBE_DCB_PFC_SUPPORT 0x00000002 +#define IXGBE_DCB_BCN_SUPPORT 0x00000004 +#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008 +#define IXGBE_DCB_GSP_SUPPORT 0x00000010 + +struct ixgbe_dcb_support { + u32 capabilities; /* DCB capabilities */ + + /* Each bit represents a number of TCs configurable in the hw. + * If 8 traffic classes can be configured, the value is 0x80. */ + u8 traffic_classes; + u8 pfc_traffic_classes; +}; + +enum ixgbe_dcb_tsa { + ixgbe_dcb_tsa_ets = 0, + ixgbe_dcb_tsa_group_strict_cee, + ixgbe_dcb_tsa_strict +}; + +/* Traffic class bandwidth allocation per direction */ +struct ixgbe_dcb_tc_path { + u8 bwg_id; /* Bandwidth Group (BWG) ID */ + u8 bwg_percent; /* % of BWG's bandwidth */ + u8 link_percent; /* % of link bandwidth */ + u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ + u16 data_credits_refill; /* Credit refill amount in 64B granularity */ + u16 data_credits_max; /* Max credits for a configured packet buffer + * in 64B granularity.*/ + enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */ +}; + +enum ixgbe_dcb_pfc { + ixgbe_dcb_pfc_disabled = 0, + ixgbe_dcb_pfc_enabled, + ixgbe_dcb_pfc_enabled_txonly, + ixgbe_dcb_pfc_enabled_rxonly +}; + +/* Traffic class configuration */ +struct ixgbe_dcb_tc_config { + struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */ + enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */ + + u16 desc_credits_max; /* For Tx Descriptor arbitration */ + u8 tc; /* Traffic class (TC) */ +}; + +enum ixgbe_dcb_pba { + /* PBA[0-7] each use 64KB FIFO */ + ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL, + /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */ + ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED +}; + +struct ixgbe_dcb_num_tcs { + u8 pg_tcs; + u8 pfc_tcs; +}; + +struct ixgbe_dcb_config { + struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS]; + struct ixgbe_dcb_support support; + struct ixgbe_dcb_num_tcs num_tcs; + u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */ + bool pfc_mode_enable; + bool round_robin_enable; + + enum ixgbe_dcb_pba rx_pba_cfg; + + u32 dcb_cfg_version; /* Not used...OS-specific? */ + u32 link_speed; /* For bandwidth allocation validation purpose */ + bool vt_mode; +}; + +/* DCB driver APIs */ + +/* DCB rule checking */ +s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *); + +/* DCB credits calculation */ +s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int); +s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *, u32, u8); + +/* DCB PFC */ +s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *); +s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *); + +/* DCB stats */ +s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *); +s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); +s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); + +/* DCB config arbiters */ +s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *); +s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *); +s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *, + struct ixgbe_dcb_config *); + +/* DCB unpack routines */ +void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *); +void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *); +void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *); +void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *); +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8); + +/* DCB initialization */ +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *); +s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *); +#endif /* _IXGBE_DCB_H_ */ diff --git a/drivers/net/ixgbe/base/ixgbe_dcb_82598.c b/drivers/net/ixgbe/base/ixgbe_dcb_82598.c new file mode 100644 index 00000000..7ff7beb4 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_dcb_82598.c @@ -0,0 +1,360 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + + +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82598.h" + +/** + * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the status data for each of the Traffic Classes in use. + */ +s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_tc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + /* Statistics pertaining to each traffic class */ + for (tc = 0; tc < tc_count; tc++) { + /* Transmitted Packets */ + stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); + /* Transmitted Bytes */ + stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc)); + /* Received Packets */ + stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); + /* Received Bytes */ + stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc)); + +#if 0 + /* Can we get rid of these?? Consequently, getting rid + * of the tc_stats structure. + */ + tc_stats_array[up]->in_overflow_discards = 0; + tc_stats_array[up]->out_overflow_discards = 0; +#endif + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the CBFC status data for each of the Traffic Classes. + */ +s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_pfc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + for (tc = 0; tc < tc_count; tc++) { + /* Priority XOFF Transmitted */ + stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); + /* Priority XOFF Received */ + stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc)); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Rx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *tsa) +{ + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; + + reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; + IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + /* Enable Arbiter */ + reg &= ~IXGBE_RMCS_ARBDIS; + /* Enable Receive Recycle within the BWG */ + reg |= IXGBE_RMCS_RRM; + /* Enable Deficit Fixed Priority arbitration*/ + reg |= IXGBE_RMCS_DFP; + + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; + credit_max = max[i]; + + reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); + + if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_RT2CR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); + } + + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg |= IXGBE_RDRXCTL_RDMTS_1_2; + reg |= IXGBE_RDRXCTL_MPBEN; + reg |= IXGBE_RDRXCTL_MCEN; + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + /* Make sure there is enough descriptors before arbitration */ + reg &= ~IXGBE_RXCTRL_DMBYPS; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, u16 *max, u8 *bwg_id, + u8 *tsa) +{ + u32 reg, max_credits; + u8 i; + + reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); + + /* Enable arbiter */ + reg &= ~IXGBE_DPMCS_ARBDIS; + reg |= IXGBE_DPMCS_TSOEF; + + /* Configure Max TSO packet size 34KB including payload and headers */ + reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); + + IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT; + + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) + reg |= IXGBE_TDTQ2TCCR_GSP; + + if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_TDTQ2TCCR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, u16 *max, u8 *bwg_id, + u8 *tsa) +{ + u32 reg; + u8 i; + + reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS); + /* Enable Data Plane Arbiter */ + reg &= ~IXGBE_PDPMCS_ARBDIS; + /* Enable DFP and Transmit Recycle Mode */ + reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM); + + IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT; + + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) + reg |= IXGBE_TDPT2TCCR_GSP; + + if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_TDPT2TCCR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); + } + + /* Enable Tx packet buffer division */ + reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL); + reg |= IXGBE_DTXCTL_ENDBUBD; + IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_pfc_82598 - Config priority flow control + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Priority Flow Control for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) +{ + u32 fcrtl, reg; + u8 i; + + /* Enable Transmit Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + reg &= ~IXGBE_RMCS_TFCE_802_3X; + reg |= IXGBE_RMCS_TFCE_PRIORITY; + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + + /* Enable Receive Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE); + + if (pfc_en) + reg |= IXGBE_FCTRL_RPFCE; + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); + + /* Configure PFC Tx thresholds per TC */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if (!(pfc_en & (1 << i))) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); + continue; + } + + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); + } + + /* Configure pause time */ + reg = hw->fc.pause_time | (hw->fc.pause_time << 16); + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) +{ + u32 reg = 0; + u8 i = 0; + u8 j = 0; + + /* Receive Queues stats setting - 8 queues per statistics reg */ + for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { + reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i)); + reg |= ((0x1010101) * j); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1)); + reg |= ((0x1010101) * j); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); + } + /* Transmit Queues stats setting - 4 queues per statistics reg*/ + for (i = 0; i < 8; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); + reg |= ((0x1010101) * i); + IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_hw_config_82598 - Config and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed, + u16 *refill, u16 *max, u8 *bwg_id, + u8 *tsa) +{ + UNREFERENCED_1PARAMETER(link_speed); + + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tc_stats_82598(hw); + + + return IXGBE_SUCCESS; +} diff --git a/drivers/net/ixgbe/base/ixgbe_dcb_82598.h b/drivers/net/ixgbe/base/ixgbe_dcb_82598.h new file mode 100644 index 00000000..eb88b3d3 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_dcb_82598.h @@ -0,0 +1,99 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _IXGBE_DCB_82598_H_ +#define _IXGBE_DCB_82598_H_ + +/* DCB register definitions */ + +#define IXGBE_DPMCS_MTSOS_SHIFT 16 +#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, + * 1 DFP - Deficit Fixed Priority */ +#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ + +#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ + +#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ + +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet + * buffers enable */ +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores + * (RSS) enable */ + +#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 +#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 +#define IXGBE_TDTQ2TCCR_GSP 0x40000000 +#define IXGBE_TDTQ2TCCR_LSP 0x80000000 + +#define IXGBE_TDPT2TCCR_MCL_SHIFT 12 +#define IXGBE_TDPT2TCCR_BWG_SHIFT 9 +#define IXGBE_TDPT2TCCR_GSP 0x40000000 +#define IXGBE_TDPT2TCCR_LSP 0x80000000 + +#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, + * 1 DFP - Deficit Fixed Priority */ +#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ + +#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ + +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ + +/* DCB driver APIs */ + +/* DCB PFC */ +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8); + +/* DCB stats */ +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *); +s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); +s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); + +/* DCB config arbiters */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *); +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *); +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, u8 *); + +/* DCB initialization */ +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, u8 *); +#endif /* _IXGBE_DCB_82958_H_ */ diff --git a/drivers/net/ixgbe/base/ixgbe_dcb_82599.c b/drivers/net/ixgbe/base/ixgbe_dcb_82599.c new file mode 100644 index 00000000..a52f83a0 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_dcb_82599.c @@ -0,0 +1,593 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + + +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82599.h" + +/** + * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the status data for each of the Traffic Classes in use. + */ +s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_tc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + /* Statistics pertaining to each traffic class */ + for (tc = 0; tc < tc_count; tc++) { + /* Transmitted Packets */ + stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); + /* Transmitted Bytes (read low first to prevent missed carry) */ + stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc)); + stats->qbtc[tc] += + (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32); + /* Received Packets */ + stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); + /* Received Bytes (read low first to prevent missed carry) */ + stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc)); + stats->qbrc[tc] += + (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32); + + /* Received Dropped Packet */ + stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc)); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * @tc_count: Number of elements in bwg_array. + * + * This function returns the CBFC status data for each of the Traffic Classes. + */ +s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *stats, + u8 tc_count) +{ + int tc; + + DEBUGFUNC("dcb_get_pfc_stats"); + + if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) + return IXGBE_ERR_PARAM; + + for (tc = 0; tc < tc_count; tc++) { + /* Priority XOFF Transmitted */ + stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); + /* Priority XOFF Received */ + stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc)); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Rx Packet Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) +{ + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; + + /* + * Disable the arbiter before changing parameters + * (always enable recycle mode; WSP) + */ + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + + /* + * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding + * bits sets for the UPs that needs to be mappped to that TC. + * e.g if priorities 6 and 7 are to be mapped to a TC then the + * up_to_tc_bitmap value for that TC will be 11000000 in binary. + */ + reg = 0; + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) + reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); + + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; + credit_max = max[i]; + reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); + + reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; + + if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_RTRPT4C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); + } + + /* + * Configure Rx packet plane (recycle mode; WSP) and + * enable arbiter + */ + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa) +{ + u32 reg, max_credits; + u8 i; + + /* Clear the per-Tx queue credits; we use per-TC instead */ + for (i = 0; i < 128; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); + IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0); + } + + /* Configure traffic class credits and priority */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; + + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) + reg |= IXGBE_RTTDT2C_GSP; + + if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_RTTDT2C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); + } + + /* + * Configure Tx descriptor plane (recycle mode; WSP) and + * enable arbiter + */ + reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Packet Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) +{ + u32 reg; + u8 i; + + /* + * Disable the arbiter before changing parameters + * (always enable recycle mode; SP; arb delay) + */ + reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | + (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) | + IXGBE_RTTPCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); + + /* + * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding + * bits sets for the UPs that needs to be mappped to that TC. + * e.g if priorities 6 and 7 are to be mapped to a TC then the + * up_to_tc_bitmap value for that TC will be 11000000 in binary. + */ + reg = 0; + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) + reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); + + IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; + + if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) + reg |= IXGBE_RTTPT2C_GSP; + + if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_RTTPT2C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); + } + + /* + * Configure Tx packet plane (recycle mode; SP; arb delay) and + * enable arbiter + */ + reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | + (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_pfc_82599 - Configure priority flow control + * @hw: pointer to hardware structure + * @pfc_en: enabled pfc bitmask + * @map: priority to tc assignments indexed by priority + * + * Configure Priority Flow Control (PFC) for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) +{ + u32 i, j, fcrtl, reg; + u8 max_tc = 0; + + /* Enable Transmit Priority Flow Control */ + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY); + + /* Enable Receive Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + reg |= IXGBE_MFLCN_DPF; + + /* + * X540 supports per TC Rx priority flow control. So + * clear all TCs and only enable those that should be + * enabled. + */ + reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); + + if (hw->mac.type >= ixgbe_mac_X540) + reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; + + if (pfc_en) + reg |= IXGBE_MFLCN_RPFCE; + + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); + + for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) { + if (map[i] > max_tc) + max_tc = map[i]; + } + + + /* Configure PFC Tx thresholds per TC */ + for (i = 0; i <= max_tc; i++) { + int enabled = 0; + + for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) { + if ((map[j] == i) && (pfc_en & (1 << j))) { + enabled = 1; + break; + } + } + + if (enabled) { + reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); + } else { + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); + } + + for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0); + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time | (hw->fc.pause_time << 16); + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + u32 reg = 0; + u8 i = 0; + u8 tc_count = 8; + bool vt_mode = false; + + if (dcb_config != NULL) { + tc_count = dcb_config->num_tcs.pg_tcs; + vt_mode = dcb_config->vt_mode; + } + + if (!((tc_count == 8 && vt_mode == false) || tc_count == 4)) + return IXGBE_ERR_PARAM; + + if (tc_count == 8 && vt_mode == false) { + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * + * Set all 16 queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) { + reg = 0x01010101 * (i / 4); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + } + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * + * Set all queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + * Tx queues are allocated non-uniformly to TCs: + * 32, 32, 16, 16, 8, 8, 8, 8. + */ + for (i = 0; i < 32; i++) { + if (i < 8) + reg = 0x00000000; + else if (i < 16) + reg = 0x01010101; + else if (i < 20) + reg = 0x02020202; + else if (i < 24) + reg = 0x03030303; + else if (i < 26) + reg = 0x04040404; + else if (i < 28) + reg = 0x05050505; + else if (i < 30) + reg = 0x06060606; + else + reg = 0x07070707; + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); + } + } else if (tc_count == 4 && vt_mode == false) { + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * + * Set all 16 queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) { + if (i % 8 > 3) + /* In 4 TC mode, odd 16-queue ranges are + * not used. + */ + continue; + reg = 0x01010101 * (i / 8); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + } + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * + * Set all queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + * Tx queues are allocated non-uniformly to TCs: + * 64, 32, 16, 16. + */ + for (i = 0; i < 32; i++) { + if (i < 16) + reg = 0x00000000; + else if (i < 24) + reg = 0x01010101; + else if (i < 28) + reg = 0x02020202; + else + reg = 0x03030303; + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); + } + } else if (tc_count == 4 && vt_mode == true) { + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * + * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each + * pool. Set all 32 queues of each TC across pools to the same + * stat with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100); + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * + * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each + * pool. Set all 32 queues of each TC across pools to the same + * stat with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_config_82599 - Configure general DCB parameters + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure general DCB parameters. + */ +s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + u32 reg; + u32 q; + + /* Disable the Tx desc arbiter so that MTQC can be changed */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + if (dcb_config->num_tcs.pg_tcs == 8) { + /* Enable DCB for Rx with 8 TCs */ + switch (reg & IXGBE_MRQC_MRQE_MASK) { + case 0: + case IXGBE_MRQC_RT4TCEN: + /* RSS disabled cases */ + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RT8TCEN; + break; + case IXGBE_MRQC_RSSEN: + case IXGBE_MRQC_RTRSS4TCEN: + /* RSS enabled cases */ + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RTRSS8TCEN; + break; + default: + /* + * Unsupported value, assume stale data, + * overwrite no RSS + */ + ASSERT(0); + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RT8TCEN; + } + } + if (dcb_config->num_tcs.pg_tcs == 4) { + /* We support both VT-on and VT-off with 4 TCs. */ + if (dcb_config->vt_mode) + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_VMDQRT4TCEN; + else + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RTRSS4TCEN; + } + IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg); + + /* Enable DCB for Tx with 8 TCs */ + if (dcb_config->num_tcs.pg_tcs == 8) + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + else { + /* We support both VT-on and VT-off with 4 TCs. */ + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + if (dcb_config->vt_mode) + reg |= IXGBE_MTQC_VT_ENA; + } + IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); + + /* Disable drop for all queues */ + for (q = 0; q < 128; q++) + IXGBE_WRITE_REG(hw, IXGBE_QDE, + (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); + + /* Enable the Tx desc arbiter */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + /* Enable Security TX Buffer IFG for DCB */ + reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + reg |= IXGBE_SECTX_DCB; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dcb_hw_config_82599 - Configure and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed, + u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa, + u8 *map) +{ + UNREFERENCED_1PARAMETER(link_speed); + + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa, + map); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, + tsa); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, + tsa, map); + + return IXGBE_SUCCESS; +} + diff --git a/drivers/net/ixgbe/base/ixgbe_dcb_82599.h b/drivers/net/ixgbe/base/ixgbe_dcb_82599.h new file mode 100644 index 00000000..dc0fb284 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_dcb_82599.h @@ -0,0 +1,153 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _IXGBE_DCB_82599_H_ +#define _IXGBE_DCB_82599_H_ + +/* DCB register definitions */ +#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, + * 1 WSP - Weighted Strict Priority + */ +#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, + * 1 WRR - Weighted Round Robin + */ +#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ +#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must + * clear! + */ +#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ + +/* Receive UP2TC mapping */ +#define IXGBE_RTRUP2TC_UP_SHIFT 3 +#define IXGBE_RTRUP2TC_UP_MASK 7 +/* Transmit UP2TC mapping */ +#define IXGBE_RTTUP2TC_UP_SHIFT 3 + +#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */ +#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */ +#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ + +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet + * buffers enable + */ +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores + * (RSS) enable + */ + +/* RTRPCS Bit Masks */ +#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RTRPCS_RAC 0x00000004 +#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* RTTDT2C Bit Masks */ +#define IXGBE_RTTDT2C_MCL_SHIFT 12 +#define IXGBE_RTTDT2C_BWG_SHIFT 9 +#define IXGBE_RTTDT2C_GSP 0x40000000 +#define IXGBE_RTTDT2C_LSP 0x80000000 + +#define IXGBE_RTTPT2C_MCL_SHIFT 12 +#define IXGBE_RTTPT2C_BWG_SHIFT 9 +#define IXGBE_RTTPT2C_GSP 0x40000000 +#define IXGBE_RTTPT2C_LSP 0x80000000 + +/* RTTPCS Bit Masks */ +#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, + * 1 SP - Strict Priority + */ +#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ +#define IXGBE_RTTPCS_ARBD_SHIFT 22 +#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */ + +#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ + +/* SECTXMINIFG DCB */ +#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer SEC IFG */ + +/* BCN register definitions */ +#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 +#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 + +#define IXGBE_RTTBCNCR_MNG_CMTGI 0x00000001 +#define IXGBE_RTTBCNCR_MGN_BCNA_MODE 0x00000002 +#define IXGBE_RTTBCNCR_RSV7_11_SHIFT 5 +#define IXGBE_RTTBCNCR_G 0x00000400 +#define IXGBE_RTTBCNCR_I 0x00000800 +#define IXGBE_RTTBCNCR_H 0x00001000 +#define IXGBE_RTTBCNCR_VER_SHIFT 14 +#define IXGBE_RTTBCNCR_CMT_ETH_SHIFT 16 + +#define IXGBE_RTTBCNACL_SMAC_L_SHIFT 16 + +#define IXGBE_RTTBCNTG_BCNA_MODE 0x80000000 + +#define IXGBE_RTTBCNRTT_TS_SHIFT 3 +#define IXGBE_RTTBCNRTT_TXQ_IDX_SHIFT 16 + +#define IXGBE_RTTBCNRD_BCN_CLEAR_ALL 0x00000002 +#define IXGBE_RTTBCNRD_DRIFT_FAC_SHIFT 2 +#define IXGBE_RTTBCNRD_DRIFT_INT_SHIFT 16 +#define IXGBE_RTTBCNRD_DRIFT_ENA 0x80000000 + + +/* DCB driver APIs */ + +/* DCB PFC */ +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *, u8, u8 *); + +/* DCB stats */ +s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *, + struct ixgbe_dcb_config *); +s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); +s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *, + struct ixgbe_hw_stats *, u8); + +/* DCB config arbiters */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *); +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, + u8 *, u8 *, u8 *); +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, u8 *, + u8 *, u8 *); + +/* DCB initialization */ +s32 ixgbe_dcb_config_82599(struct ixgbe_hw *, + struct ixgbe_dcb_config *); + +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, + u8 *, u8 *); +#endif /* _IXGBE_DCB_82959_H_ */ diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.c b/drivers/net/ixgbe/base/ixgbe_mbx.c new file mode 100644 index 00000000..042e5cc1 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_mbx.c @@ -0,0 +1,791 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "ixgbe_type.h" +#include "ixgbe_mbx.h" + +/** + * ixgbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_read_mbx"); + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * ixgbe_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_mbx"); + + if (size > mbx->size) { + ret_val = IXGBE_ERR_MBX; + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %d", size); + } else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * ixgbe_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_msg"); + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_ack"); + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_rst"); + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +STATIC s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("ixgbe_poll_for_msg"); + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + + if (countdown == 0) + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Polling for VF%d mailbox message timedout", mbx_id); + +out: + return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; +} + +/** + * ixgbe_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +STATIC s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("ixgbe_poll_for_ack"); + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + + if (countdown == 0) + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Polling for VF%d mailbox ack timedout", mbx_id); + +out: + return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; +} + +/** + * ixgbe_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_read_posted_mbx"); + + if (!mbx->ops.read) + goto out; + + ret_val = ixgbe_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_write_posted_mbx"); + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = ixgbe_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +/** + * ixgbe_init_mbx_ops_generic - Initialize MB function pointers + * @hw: pointer to the HW structure + * + * Setups up the mailbox read and write message function pointers + **/ +void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + mbx->ops.read_posted = ixgbe_read_posted_mbx; + mbx->ops.write_posted = ixgbe_write_posted_mbx; +} + +/** + * ixgbe_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +STATIC u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw) +{ + u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); + + v2p_mailbox |= hw->mbx.v2p_mailbox; + hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * ixgbe_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +STATIC s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) +{ + u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw); + s32 ret_val = IXGBE_ERR_MBX; + + if (v2p_mailbox & mask) + ret_val = IXGBE_SUCCESS; + + hw->mbx.v2p_mailbox &= ~mask; + + return ret_val; +} + +/** + * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + s32 ret_val = IXGBE_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_msg_vf"); + + if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + s32 ret_val = IXGBE_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_ack_vf"); + + if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns true if the PF has set the reset done bit or else false + **/ +STATIC s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + s32 ret_val = IXGBE_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_rst_vf"); + + if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | + IXGBE_VFMAILBOX_RSTI))) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +STATIC s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_obtain_mbx_lock_vf"); + + /* Take ownership of the buffer */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); + + /* reserve mailbox for vf use */ + if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) + ret_val = IXGBE_SUCCESS; + + return ret_val; +} + +/** + * ixgbe_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + s32 ret_val; + u16 i; + + UNREFERENCED_1PARAMETER(mbx_id); + + DEBUGFUNC("ixgbe_write_mbx_vf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_check_for_msg_vf(hw, 0); + ixgbe_check_for_ack_vf(hw, 0); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); + + /* + * Complete the remaining mailbox data registers with zero to reset + * the data sent in a previous exchange (in either side) with the PF, + * including exchanges performed by another Guest OS to which that VF + * was previously assigned. + */ + while (i < hw->mbx.size) { + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, 0); + i++; + } + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); + +out_no_write: + return ret_val; +} + +/** + * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +STATIC s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + s32 ret_val = IXGBE_SUCCESS; + u16 i; + + DEBUGFUNC("ixgbe_read_mbx_vf"); + UNREFERENCED_1PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * ixgbe_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications */ + mbx->timeout = 0; + mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY; + + mbx->size = IXGBE_VFMAILBOX_SIZE; + + mbx->ops.read = ixgbe_read_mbx_vf; + mbx->ops.write = ixgbe_write_mbx_vf; + mbx->ops.read_posted = ixgbe_read_posted_mbx; + mbx->ops.write_posted = ixgbe_write_posted_mbx; + mbx->ops.check_for_msg = ixgbe_check_for_msg_vf; + mbx->ops.check_for_ack = ixgbe_check_for_ack_vf; + mbx->ops.check_for_rst = ixgbe_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} + +STATIC s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) +{ + u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); + s32 ret_val = IXGBE_ERR_MBX; + + if (mbvficr & mask) { + ret_val = IXGBE_SUCCESS; + IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); + } + + return ret_val; +} + +/** + * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 ret_val = IXGBE_ERR_MBX; + s32 index = IXGBE_MBVFICR_INDEX(vf_number); + u32 vf_bit = vf_number % 16; + + DEBUGFUNC("ixgbe_check_for_msg_pf"); + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, + index)) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 ret_val = IXGBE_ERR_MBX; + s32 index = IXGBE_MBVFICR_INDEX(vf_number); + u32 vf_bit = vf_number % 16; + + DEBUGFUNC("ixgbe_check_for_ack_pf"); + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, + index)) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +STATIC s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + u32 reg_offset = (vf_number < 32) ? 0 : 1; + u32 vf_shift = vf_number % 32; + u32 vflre = 0; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_rst_pf"); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + case ixgbe_mac_X540: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); + break; + default: + break; + } + + if (vflre & (1 << vf_shift)) { + ret_val = IXGBE_SUCCESS; + IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +STATIC s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 ret_val = IXGBE_ERR_MBX; + u32 p2v_mailbox; + + DEBUGFUNC("ixgbe_obtain_mbx_lock_pf"); + + /* Take ownership of the buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); + if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) + ret_val = IXGBE_SUCCESS; + else + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for VF%d", vf_number); + + + return ret_val; +} + +/** + * ixgbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +STATIC s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("ixgbe_write_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_check_for_msg_pf(hw, vf_number); + ixgbe_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]); + + /* + * Complete the remaining mailbox data registers with zero to reset + * the data sent in a previous exchange (in either side) with the VF, + * including exchanges performed by another Guest OS to which that VF + * was previously assigned. + */ + while (i < hw->mbx.size) { + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, 0); + i++; + } + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * ixgbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +STATIC s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("ixgbe_read_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * ixgbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a && + hw->mac.type != ixgbe_mac_X540) + return; + + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = IXGBE_VFMAILBOX_SIZE; + + mbx->ops.read = ixgbe_read_mbx_pf; + mbx->ops.write = ixgbe_write_mbx_pf; + mbx->ops.read_posted = ixgbe_read_posted_mbx; + mbx->ops.write_posted = ixgbe_write_posted_mbx; + mbx->ops.check_for_msg = ixgbe_check_for_msg_pf; + mbx->ops.check_for_ack = ixgbe_check_for_ack_pf; + mbx->ops.check_for_rst = ixgbe_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.h b/drivers/net/ixgbe/base/ixgbe_mbx.h new file mode 100644 index 00000000..4a120a3d --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_mbx.h @@ -0,0 +1,154 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _IXGBE_MBX_H_ +#define _IXGBE_MBX_H_ + +#include "ixgbe_type.h" + +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define IXGBE_ERR_MBX -100 + +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 + +/* Define mailbox register bits */ +#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ +#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ +#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + + +/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is IXGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + * clear to send requests */ +#define IXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) + +/* definitions to support mailbox API version negotiation */ + +/* + * each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum ixgbe_pfvf_api_rev { + ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + /* This value should always be last */ + ixgbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0C + +/* GET_QUEUES return data indices within the mailbox */ +#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* length of permanent address message returned from PF */ +#define IXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define IXGBE_VF_MC_TYPE_WORD 3 + +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +/* mailbox API, version 2.0 VF requests */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define IXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ +#define IXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ +#define IXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ +#define IXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ +#define IXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ +#define IXGBE_VF_SET_MTU 0x0F /* set a specific MTU */ + +/* mailbox API, version 2.0 PF requests */ +#define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ + +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); +s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); +s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); +void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw); +void ixgbe_init_mbx_params_vf(struct ixgbe_hw *); +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); + +#endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h new file mode 100644 index 00000000..40b0b512 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_osdep.h @@ -0,0 +1,155 @@ +/****************************************************************************** + + Copyright (c) 2001-2015, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _IXGBE_OS_H_ +#define _IXGBE_OS_H_ + +#include <string.h> +#include <stdint.h> +#include <stdio.h> +#include <stdarg.h> +#include <rte_common.h> +#include <rte_debug.h> +#include <rte_cycles.h> +#include <rte_log.h> +#include <rte_byteorder.h> + +#include "../ixgbe_logs.h" +#include "../ixgbe_bypass_defines.h" + +#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x") + +#define DELAY(x) rte_delay_us(x) +#define usec_delay(x) DELAY(x) +#define msec_delay(x) DELAY(1000*(x)) + +#define DEBUGFUNC(F) DEBUGOUT(F "\n"); +#define DEBUGOUT(S, args...) PMD_DRV_LOG_RAW(DEBUG, S, ##args) +#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args) + +#define ERROR_REPORT1(e, S, args...) DEBUGOUT(S, ##args) +#define ERROR_REPORT2(e, S, args...) DEBUGOUT(S, ##args) +#define ERROR_REPORT3(e, S, args...) DEBUGOUT(S, ##args) + +#define FALSE 0 +#define TRUE 1 + +#define false 0 +#define true 1 +#define min(a,b) RTE_MIN(a,b) + +#define EWARN(hw, S, args...) DEBUGOUT1(S, ##args) + +/* Bunch of defines for shared code bogosity */ +#define UNREFERENCED_PARAMETER(_p) +#define UNREFERENCED_1PARAMETER(_p) +#define UNREFERENCED_2PARAMETER(_p, _q) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) + +/* Shared code error reporting */ +enum { + IXGBE_ERROR_SOFTWARE, + IXGBE_ERROR_POLLING, + IXGBE_ERROR_INVALID_STATE, + IXGBE_ERROR_UNSUPPORTED, + IXGBE_ERROR_ARGUMENT, + IXGBE_ERROR_CAUTION, +}; + +#define STATIC static +#define IXGBE_NTOHL(_i) rte_be_to_cpu_32(_i) +#define IXGBE_NTOHS(_i) rte_be_to_cpu_16(_i) +#define IXGBE_CPU_TO_LE32(_i) rte_cpu_to_le_32(_i) +#define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i) +#define IXGBE_CPU_TO_BE16(_i) rte_cpu_to_be_16(_i) +#define IXGBE_CPU_TO_BE32(_i) rte_cpu_to_be_32(_i) + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef int16_t s16; +typedef uint32_t u32; +typedef int32_t s32; +typedef uint64_t u64; +typedef int bool; + +#define mb() rte_mb() +#define wmb() rte_wmb() +#define rmb() rte_rmb() + +#define IOMEM + +#define prefetch(x) rte_prefetch0(x) + +#define IXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg))) + +static inline uint32_t ixgbe_read_addr(volatile void* addr) +{ + return rte_le_to_cpu_32(IXGBE_PCI_REG(addr)); +} + +#define IXGBE_PCI_REG_WRITE(reg, value) do { \ + IXGBE_PCI_REG((reg)) = (rte_cpu_to_le_32(value)); \ +} while(0) + +#define IXGBE_PCI_REG_ADDR(hw, reg) \ + ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg))) + +#define IXGBE_PCI_REG_ARRAY_ADDR(hw, reg, index) \ + IXGBE_PCI_REG_ADDR((hw), (reg) + ((index) << 2)) + +/* Not implemented !! */ +#define IXGBE_READ_PCIE_WORD(hw, reg) 0 +#define IXGBE_WRITE_PCIE_WORD(hw, reg, value) do { } while(0) + +#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) + +#define IXGBE_READ_REG(hw, reg) \ + ixgbe_read_addr(IXGBE_PCI_REG_ADDR((hw), (reg))) + +#define IXGBE_WRITE_REG(hw, reg, value) \ + IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ADDR((hw), (reg)), (value)) + +#define IXGBE_READ_REG_ARRAY(hw, reg, index) \ + IXGBE_PCI_REG(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index))) + +#define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \ + IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value)) + +#endif /* _IXGBE_OS_H_ */ diff --git a/drivers/net/ixgbe/base/ixgbe_phy.c b/drivers/net/ixgbe/base/ixgbe_phy.c new file mode 100644 index 00000000..6ed685e8 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_phy.c @@ -0,0 +1,2687 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw); +STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw); +STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); +STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); +STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); +STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); +STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); +STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); +STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); +STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); + +/** + * ixgbe_out_i2c_byte_ack - Send I2C byte with ack + * @hw: pointer to the hardware structure + * @byte: byte to send + * + * Returns an error code on error. + */ +STATIC s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) +{ + s32 status; + + status = ixgbe_clock_out_i2c_byte(hw, byte); + if (status) + return status; + return ixgbe_get_i2c_ack(hw); +} + +/** + * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack + * @hw: pointer to the hardware structure + * @byte: pointer to a u8 to receive the byte + * + * Returns an error code on error. + */ +STATIC s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) +{ + s32 status; + + status = ixgbe_clock_in_i2c_byte(hw, byte); + if (status) + return status; + /* ACK */ + return ixgbe_clock_out_i2c_bit(hw, false); +} + +/** + * ixgbe_ones_comp_byte_add - Perform one's complement addition + * @add1 - addend 1 + * @add2 - addend 2 + * + * Returns one's complement 8-bit sum. + */ +STATIC u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) +{ + u16 sum = add1 + add2; + + sum = (sum & 0xFF) + (sum >> 8); + return sum & 0xFF; +} + +/** + * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * @lock: true if to take and release semaphore + * + * Returns an error code on error. + */ +s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, + u16 *val, bool lock) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 10; + int retry = 0; + u8 csum_byte; + u8 high_bits; + u8 low_bits; + u8 reg_high; + u8 csum; + + if (hw->mac.type >= ixgbe_mac_X550) + max_retry = 3; + reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ~csum; + do { + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + /* Re-start condition */ + ixgbe_i2c_start(hw); + /* Device Address and read indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr | 1)) + goto fail; + /* Get upper bits */ + if (ixgbe_in_i2c_byte_ack(hw, &high_bits)) + goto fail; + /* Get low bits */ + if (ixgbe_in_i2c_byte_ack(hw, &low_bits)) + goto fail; + /* Get csum */ + if (ixgbe_clock_in_i2c_byte(hw, &csum_byte)) + goto fail; + /* NACK */ + if (ixgbe_clock_out_i2c_bit(hw, false)) + goto fail; + ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + *val = (high_bits << 8) | low_bits; + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte read combined error - Retrying.\n"); + else + DEBUGOUT("I2C byte read combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** + * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * @lock: true if to take and release semaphore + * + * Returns an error code on error. + */ +s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, + u16 val, bool lock) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 1; + int retry = 0; + u8 reg_high; + u8 csum; + + reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ixgbe_ones_comp_byte_add(csum, val >> 8); + csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); + csum = ~csum; + do { + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write data 15:8 */ + if (ixgbe_out_i2c_byte_ack(hw, val >> 8)) + goto fail; + /* Write data 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte write combined error - Retrying.\n"); + else + DEBUGOUT("I2C byte write combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** + * ixgbe_init_phy_ops_generic - Inits PHY function ptrs + * @hw: pointer to the hardware structure + * + * Initialize the function pointers. + **/ +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + + DEBUGFUNC("ixgbe_init_phy_ops_generic"); + + /* PHY */ + phy->ops.identify = ixgbe_identify_phy_generic; + phy->ops.reset = ixgbe_reset_phy_generic; + phy->ops.read_reg = ixgbe_read_phy_reg_generic; + phy->ops.write_reg = ixgbe_write_phy_reg_generic; + phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi; + phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi; + phy->ops.setup_link = ixgbe_setup_phy_link_generic; + phy->ops.setup_link_speed = ixgbe_setup_phy_link_speed_generic; + phy->ops.check_link = NULL; + phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic; + phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_generic; + phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_generic; + phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_generic; + phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_generic; + phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_generic; + phy->ops.i2c_bus_clear = ixgbe_i2c_bus_clear; + phy->ops.identify_sfp = ixgbe_identify_module_generic; + phy->sfp_type = ixgbe_sfp_type_unknown; + phy->ops.read_i2c_byte_unlocked = ixgbe_read_i2c_byte_generic_unlocked; + phy->ops.write_i2c_byte_unlocked = + ixgbe_write_i2c_byte_generic_unlocked; + phy->ops.check_overtemp = ixgbe_tn_check_overtemp; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_identify_phy_generic - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + **/ +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 phy_addr; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_identify_phy_generic"); + + if (!hw->phy.phy_semaphore_mask) { + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + } + + if (hw->phy.type == ixgbe_phy_unknown) { + for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { + if (ixgbe_validate_phy_addr(hw, phy_addr)) { + hw->phy.addr = phy_addr; + ixgbe_get_phy_id(hw); + hw->phy.type = + ixgbe_get_phy_type_from_id(hw->phy.id); + + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &ext_ability); + if (ext_ability & + (IXGBE_MDIO_PHY_10GBASET_ABILITY | + IXGBE_MDIO_PHY_1000BASET_ABILITY)) + hw->phy.type = + ixgbe_phy_cu_unknown; + else + hw->phy.type = + ixgbe_phy_generic; + } + + status = IXGBE_SUCCESS; + break; + } + } + + /* Certain media types do not have a phy so an address will not + * be found and the code will take this path. Caller has to + * decide if it is an error or not. + */ + if (status != IXGBE_SUCCESS) { + hw->phy.addr = 0; + } + } else { + status = IXGBE_SUCCESS; + } + + return status; +} + +/** + * ixgbe_check_reset_blocked - check status of MNG FW veto bit + * @hw: pointer to the hardware structure + * + * This function checks the MMNGC.MNG_VETO bit to see if there are + * any constraints on link from manageability. For MAC's that don't + * have this bit just return faluse since the link can not be blocked + * via this method. + **/ +s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw) +{ + u32 mmngc; + + DEBUGFUNC("ixgbe_check_reset_blocked"); + + /* If we don't have this bit, it can't be blocking */ + if (hw->mac.type == ixgbe_mac_82598EB) + return false; + + mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC); + if (mmngc & IXGBE_MMNGC_MNG_VETO) { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, + "MNG_VETO bit detected.\n"); + return true; + } + + return false; +} + +/** + * ixgbe_validate_phy_addr - Determines phy address is valid + * @hw: pointer to hardware structure + * + **/ +bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) +{ + u16 phy_id = 0; + bool valid = false; + + DEBUGFUNC("ixgbe_validate_phy_addr"); + + hw->phy.addr = phy_addr; + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); + + if (phy_id != 0xFFFF && phy_id != 0x0) + valid = true; + + return valid; +} + +/** + * ixgbe_get_phy_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) +{ + u32 status; + u16 phy_id_high = 0; + u16 phy_id_low = 0; + + DEBUGFUNC("ixgbe_get_phy_id"); + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_high); + + if (status == IXGBE_SUCCESS) { + hw->phy.id = (u32)(phy_id_high << 16); + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_low); + hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); + hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); + } + return status; +} + +/** + * ixgbe_get_phy_type_from_id - Get the phy type + * @phy_id: PHY ID information + * + **/ +enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) +{ + enum ixgbe_phy_type phy_type; + + DEBUGFUNC("ixgbe_get_phy_type_from_id"); + + switch (phy_id) { + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; + case X550_PHY_ID1: + case X550_PHY_ID2: + case X550_PHY_ID3: + case X540_PHY_ID: + phy_type = ixgbe_phy_aq; + break; + case QT2022_PHY_ID: + phy_type = ixgbe_phy_qt; + break; + case ATH_PHY_ID: + phy_type = ixgbe_phy_nl; + break; + case X557_PHY_ID: + phy_type = ixgbe_phy_x550em_ext_t; + break; + default: + phy_type = ixgbe_phy_unknown; + break; + } + return phy_type; +} + +/** + * ixgbe_reset_phy_generic - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) +{ + u32 i; + u16 ctrl = 0; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_reset_phy_generic"); + + if (hw->phy.type == ixgbe_phy_unknown) + status = ixgbe_identify_phy_generic(hw); + + if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none) + goto out; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) + goto out; + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + + /* + * Perform soft PHY reset to the PHY_XS. + * This will cause a soft reset to the PHY + */ + hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + IXGBE_MDIO_PHY_XS_RESET); + + /* + * Poll for reset bit to self-clear indicating reset is complete. + * Some PHYs could take up to 3 seconds to complete and need about + * 1.7 usec delay after the reset is complete. + */ + for (i = 0; i < 30; i++) { + msec_delay(100); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl); + if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) { + usec_delay(2); + break; + } + } + + if (ctrl & IXGBE_MDIO_PHY_XS_RESET) { + status = IXGBE_ERR_RESET_FAILED; + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PHY reset polling failed to complete.\n"); + } + +out: + return status; +} + +/** + * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) +{ + u32 i, data, command; + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n"); + return IXGBE_ERR_PHY; + } + + /* + * Address cycle complete, setup and write the read + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n"); + return IXGBE_ERR_PHY; + } + + /* + * Read operation is complete. Get the data + * from MSRWD + */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)(data); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register + * using the SWFW lock - this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + s32 status; + u32 gssr = hw->phy.phy_semaphore_mask; + + DEBUGFUNC("ixgbe_read_phy_reg_generic"); + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) { + status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, gssr); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register + * without SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 i, command; + + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + + /* + * Address cycle complete, setup and write the write + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY write cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register + * using SWFW lock- this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + s32 status; + u32 gssr = hw->phy.phy_semaphore_mask; + + DEBUGFUNC("ixgbe_write_phy_reg_generic"); + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) { + status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, gssr); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_setup_phy_link_generic - Set and restart auto-neg + * @hw: pointer to hardware structure + * + * Restart auto-negotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; + + DEBUGFUNC("ixgbe_setup_phy_link_generic"); + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + if (hw->mac.type == ixgbe_mac_X550) { + if (speed & IXGBE_LINK_SPEED_5GB_FULL) { + /* Set or unset auto-negotiation 5G advertisement */ + hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_5GB_FULL) + autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) { + /* Set or unset auto-negotiation 2.5G advertisement */ + hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_2_5GB_FULL) + autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE | + IXGBE_MII_100BASE_T_ADVERTISE_HALF); + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) + return status; + + /* Restart PHY auto-negotiation. */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + + autoneg_reg |= IXGBE_MII_RESTART; + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + return status; +} + +/** + * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + **/ +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + DEBUGFUNC("ixgbe_setup_phy_link_speed_generic"); + + /* + * Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_5GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; + + if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + + /* Setup link based on the new speed settings */ + ixgbe_setup_phy_link(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_copper_speeds_supported - Get copper link speeds from phy + * @hw: pointer to hardware structure + * + * Determines the supported link capabilities by reading the PHY auto + * negotiation register. + **/ +static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) +{ + s32 status; + u16 speed_ability; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &speed_ability); + if (status) + return status; + + if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; + if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; + break; + case ixgbe_mac_X550EM_x: + hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; + break; + default: + break; + } + + return status; +} + +/** + * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + **/ +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic"); + + *autoneg = true; + if (!hw->phy.speeds_supported) + status = ixgbe_get_copper_speeds_supported(hw); + + *speed = hw->phy.speeds_supported; + return status; +} + +/** + * ixgbe_check_phy_link_tnx - Determine link and speed status + * @hw: pointer to hardware structure + * + * Reads the VS1 register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + s32 status = IXGBE_SUCCESS; + u32 time_out; + u32 max_time_out = 10; + u16 phy_link = 0; + u16 phy_speed = 0; + u16 phy_data = 0; + + DEBUGFUNC("ixgbe_check_phy_link_tnx"); + + /* Initialize speed and link to default case */ + *link_up = false; + *speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* + * Check current speed and link status of the PHY register. + * This is a vendor specific register and may have to + * be changed for other copper PHYs. + */ + for (time_out = 0; time_out < max_time_out; time_out++) { + usec_delay(10); + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + &phy_data); + phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; + phy_speed = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; + if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { + *link_up = true; + if (phy_speed == + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } + } + + return status; +} + +/** + * ixgbe_setup_phy_link_tnx - Set and restart auto-neg + * @hw: pointer to hardware structure + * + * Restart auto-negotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; + + DEBUGFUNC("ixgbe_setup_phy_link_tnx"); + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) + return status; + + /* Restart PHY auto-negotiation. */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + + autoneg_reg |= IXGBE_MII_RESTART; + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status; + + DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx"); + + status = hw->phy.ops.read_reg(hw, TNX_FW_REV, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + firmware_version); + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status; + + DEBUGFUNC("ixgbe_get_phy_firmware_version_generic"); + + status = hw->phy.ops.read_reg(hw, AQ_FW_REV, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + firmware_version); + + return status; +} + +/** + * ixgbe_reset_phy_nl - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) +{ + u16 phy_offset, control, eword, edata, block_crc; + bool end_data = false; + u16 list_offset, data_offset; + u16 phy_data = 0; + s32 ret_val = IXGBE_SUCCESS; + u32 i; + + DEBUGFUNC("ixgbe_reset_phy_nl"); + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); + + /* reset the PHY and poll for completion */ + hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + (phy_data | IXGBE_MDIO_PHY_XS_RESET)); + + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); + if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0) + break; + msec_delay(10); + } + + if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) { + DEBUGOUT("PHY reset did not complete.\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + + /* Get init offsets */ + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, + &data_offset); + if (ret_val != IXGBE_SUCCESS) + goto out; + + ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); + data_offset++; + while (!end_data) { + /* + * Read control word from PHY init contents offset + */ + ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); + if (ret_val) + goto err_eeprom; + control = (eword & IXGBE_CONTROL_MASK_NL) >> + IXGBE_CONTROL_SHIFT_NL; + edata = eword & IXGBE_DATA_MASK_NL; + switch (control) { + case IXGBE_DELAY_NL: + data_offset++; + DEBUGOUT1("DELAY: %d MS\n", edata); + msec_delay(edata); + break; + case IXGBE_DATA_NL: + DEBUGOUT("DATA:\n"); + data_offset++; + ret_val = hw->eeprom.ops.read(hw, data_offset, + &phy_offset); + if (ret_val) + goto err_eeprom; + data_offset++; + for (i = 0; i < edata; i++) { + ret_val = hw->eeprom.ops.read(hw, data_offset, + &eword); + if (ret_val) + goto err_eeprom; + hw->phy.ops.write_reg(hw, phy_offset, + IXGBE_TWINAX_DEV, eword); + DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword, + phy_offset); + data_offset++; + phy_offset++; + } + break; + case IXGBE_CONTROL_NL: + data_offset++; + DEBUGOUT("CONTROL:\n"); + if (edata == IXGBE_CONTROL_EOL_NL) { + DEBUGOUT("EOL\n"); + end_data = true; + } else if (edata == IXGBE_CONTROL_SOL_NL) { + DEBUGOUT("SOL\n"); + } else { + DEBUGOUT("Bad control value\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + break; + default: + DEBUGOUT("Bad control type\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + } + +out: + return ret_val; + +err_eeprom: + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", data_offset); + return IXGBE_ERR_PHY; +} + +/** + * ixgbe_identify_module_generic - Identifies module type + * @hw: pointer to hardware structure + * + * Determines HW type and calls appropriate function. + **/ +s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_SFP_NOT_PRESENT; + + DEBUGFUNC("ixgbe_identify_module_generic"); + + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + status = ixgbe_identify_sfp_module_generic(hw); + break; + + case ixgbe_media_type_fiber_qsfp: + status = ixgbe_identify_qsfp_module_generic(hw); + break; + + default: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + break; + } + + return status; +} + +/** + * ixgbe_identify_sfp_module_generic - Identifies SFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u8 cable_tech = 0; + u8 cable_spec = 0; + u16 enforce_sfp = 0; + + DEBUGFUNC("ixgbe_identify_sfp_module_generic"); + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* LAN ID is needed for I2C access */ + hw->mac.ops.set_lan_id(hw); + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_IDENTIFIER, + &identifier); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, + &comp_codes_1g); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, + &comp_codes_10g); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_CABLE_TECHNOLOGY, + &cable_tech); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + /* ID Module + * ========= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CORE0 - 82599-specific + * 4 SFP_DA_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + * 7 SFP_act_lmt_DA_CORE0 - 82599-specific + * 8 SFP_act_lmt_DA_CORE1 - 82599-specific + * 9 SFP_1g_cu_CORE0 - 82599-specific + * 10 SFP_1g_cu_CORE1 - 82599-specific + * 11 SFP_1g_sx_CORE0 - 82599-specific + * 12 SFP_1g_sx_CORE1 - 82599-specific + */ + if (hw->mac.type == ixgbe_mac_82598EB) { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.sfp_type = ixgbe_sfp_type_da_cu; + else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_sr; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_lr; + else + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } else { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_cu_core1; + } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { + hw->phy.ops.read_i2c_eeprom( + hw, IXGBE_SFF_CABLE_SPEC_COMP, + &cable_spec); + if (cable_spec & + IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core1; + } else { + hw->phy.sfp_type = + ixgbe_sfp_type_unknown; + } + } else if (comp_codes_10g & + (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_sx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_sx_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lx_core1; + } else { + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the SFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor */ + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = identifier; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + switch (vendor_oui) { + case IXGBE_SFF_VENDOR_OUI_TYCO: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_tyco; + break; + case IXGBE_SFF_VENDOR_OUI_FTL: + if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = ixgbe_phy_sfp_ftl_active; + else + hw->phy.type = ixgbe_phy_sfp_ftl; + break; + case IXGBE_SFF_VENDOR_OUI_AVAGO: + hw->phy.type = ixgbe_phy_sfp_avago; + break; + case IXGBE_SFF_VENDOR_OUI_INTEL: + hw->phy.type = ixgbe_phy_sfp_intel; + break; + default: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_unknown; + else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_active_unknown; + else + hw->phy.type = ixgbe_phy_sfp_unknown; + break; + } + } + + /* Allow any DA cable vendor */ + if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | + IXGBE_SFF_DA_ACTIVE_CABLE)) { + status = IXGBE_SUCCESS; + goto out; + } + + /* Verify supported 1G SFP modules */ + if (comp_codes_10g == 0 && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + /* Anything else 82598-based is supported */ + if (hw->mac.type == ixgbe_mac_82598EB) { + status = IXGBE_SUCCESS; + goto out; + } + + ixgbe_get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_sfp_intel) { + status = IXGBE_SUCCESS; + } else { + if (hw->allow_unsupported_sfp == true) { + EWARN(hw, "WARNING: Intel (R) Network " + "Connections are quality tested " + "using Intel (R) Ethernet Optics." + " Using untested modules is not " + "supported and may cause unstable" + " operation or damage to the " + "module or the adapter. Intel " + "Corporation is not responsible " + "for any harm caused by using " + "untested modules.\n", status); + status = IXGBE_SUCCESS; + } else { + DEBUGOUT("SFP+ module not supported\n"); + hw->phy.type = + ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } + } + } else { + status = IXGBE_SUCCESS; + } + } + +out: + return status; + +err_read_i2c_eeprom: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = 0; + hw->phy.type = ixgbe_phy_unknown; + } + return IXGBE_ERR_SFP_NOT_PRESENT; +} + +/** + * ixgbe_get_supported_phy_sfp_layer_generic - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current SFP. + */ +s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw) +{ + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u8 comp_codes_10g = 0; + u8 comp_codes_1g = 0; + + DEBUGFUNC("ixgbe_get_supported_phy_sfp_layer_generic"); + + hw->phy.ops.identify_sfp(hw); + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return physical_layer; + + switch (hw->phy.type) { + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_qsfp_passive_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case ixgbe_phy_sfp_ftl_active: + case ixgbe_phy_sfp_active_unknown: + case ixgbe_phy_qsfp_active_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; + break; + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; + else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX; + break; + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + break; + } + + return physical_layer; +} + +/** + * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the QSFP module and assigns appropriate PHY type + **/ +s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u16 enforce_sfp = 0; + u8 connector = 0; + u8 cable_length = 0; + u8 device_tech = 0; + bool active_cable = false; + + DEBUGFUNC("ixgbe_identify_qsfp_module_generic"); + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* LAN ID is needed for I2C access */ + hw->mac.ops.set_lan_id(hw); + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, + &identifier); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + hw->phy.id = identifier; + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, + &comp_codes_10g); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP, + &comp_codes_1g); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) { + hw->phy.type = ixgbe_phy_qsfp_passive_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1; + } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; + } else { + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE) + active_cable = true; + + if (!active_cable) { + /* check for active DA cables that pre-date + * SFF-8436 v3.6 */ + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_CONNECTOR, + &connector); + + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_CABLE_LENGTH, + &cable_length); + + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_DEVICE_TECH, + &device_tech); + + if ((connector == + IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) && + (cable_length > 0) && + ((device_tech >> 4) == + IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL)) + active_cable = true; + } + + if (active_cable) { + hw->phy.type = ixgbe_phy_qsfp_active_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core1; + } else { + /* unsupported module type */ + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the QSFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor for optical modules */ + if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL) + hw->phy.type = ixgbe_phy_qsfp_intel; + else + hw->phy.type = ixgbe_phy_qsfp_unknown; + + ixgbe_get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_qsfp_intel) { + status = IXGBE_SUCCESS; + } else { + if (hw->allow_unsupported_sfp == true) { + EWARN(hw, "WARNING: Intel (R) Network " + "Connections are quality tested " + "using Intel (R) Ethernet Optics." + " Using untested modules is not " + "supported and may cause unstable" + " operation or damage to the " + "module or the adapter. Intel " + "Corporation is not responsible " + "for any harm caused by using " + "untested modules.\n", status); + status = IXGBE_SUCCESS; + } else { + DEBUGOUT("QSFP module not supported\n"); + hw->phy.type = + ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } + } + } else { + status = IXGBE_SUCCESS; + } + } + +out: + return status; + +err_read_i2c_eeprom: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + hw->phy.id = 0; + hw->phy.type = ixgbe_phy_unknown; + + return IXGBE_ERR_SFP_NOT_PRESENT; +} + + +/** + * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence + * @hw: pointer to hardware structure + * @list_offset: offset to the SFP ID list + * @data_offset: offset to the SFP data block + * + * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if + * so it returns the offsets to the phy init sequence block. + **/ +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + u16 *list_offset, + u16 *data_offset) +{ + u16 sfp_id; + u16 sfp_type = hw->phy.sfp_type; + + DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets"); + + if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return IXGBE_ERR_SFP_NOT_PRESENT; + + if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) && + (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + /* + * Limiting active cables and 1G Phys must be initialized as + * SR modules + */ + if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || + sfp_type == ixgbe_sfp_type_1g_lx_core0 || + sfp_type == ixgbe_sfp_type_1g_cu_core0 || + sfp_type == ixgbe_sfp_type_1g_sx_core0) + sfp_type = ixgbe_sfp_type_srlr_core0; + else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || + sfp_type == ixgbe_sfp_type_1g_lx_core1 || + sfp_type == ixgbe_sfp_type_1g_cu_core1 || + sfp_type == ixgbe_sfp_type_1g_sx_core1) + sfp_type = ixgbe_sfp_type_srlr_core1; + + /* Read offset to PHY init contents */ + if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", + IXGBE_PHY_INIT_OFFSET_NL); + return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + } + + if ((!*list_offset) || (*list_offset == 0xFFFF)) + return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + + /* Shift offset to first ID word */ + (*list_offset)++; + + /* + * Find the matching SFP ID in the EEPROM + * and program the init sequence + */ + if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) + goto err_phy; + + while (sfp_id != IXGBE_PHY_INIT_END_NL) { + if (sfp_id == sfp_type) { + (*list_offset)++; + if (hw->eeprom.ops.read(hw, *list_offset, data_offset)) + goto err_phy; + if ((!*data_offset) || (*data_offset == 0xFFFF)) { + DEBUGOUT("SFP+ module not supported\n"); + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + break; + } + } else { + (*list_offset) += 2; + if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) + goto err_phy; + } + } + + if (sfp_id == IXGBE_PHY_INIT_END_NL) { + DEBUGOUT("No matching SFP+ module found\n"); + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + return IXGBE_SUCCESS; + +err_phy: + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "eeprom read at offset %d failed", *list_offset); + return IXGBE_ERR_PHY; +} + +/** + * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + DEBUGFUNC("ixgbe_read_i2c_eeprom_generic"); + + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR2, + sff8472_data); +} + +/** + * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data) +{ + DEBUGFUNC("ixgbe_write_i2c_eeprom_generic"); + + return hw->phy.ops.write_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * ixgbe_is_sfp_probe - Returns true if SFP is being detected + * @hw: pointer to hardware structure + * @offset: eeprom offset to be read + * @addr: I2C address to be read + */ +STATIC bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) +{ + if (addr == IXGBE_I2C_EEPROM_DEV_ADDR && + offset == IXGBE_SFF_IDENTIFIER && + hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return true; + return false; +} + +/** + * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * @lock: true if to take and release semaphore + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +STATIC s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data, bool lock) +{ + s32 status; + u32 max_retry = 10; + u32 retry = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + bool nack = 1; + *data = 0; + + DEBUGFUNC("ixgbe_read_i2c_byte_generic"); + + if (hw->mac.type >= ixgbe_mac_X550) + max_retry = 3; + if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) + max_retry = IXGBE_SFP_DETECT_RETRIES; + + do { + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + + ixgbe_i2c_start(hw); + + /* Device Address and write indication */ + status = ixgbe_clock_out_i2c_byte(hw, dev_addr); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, byte_offset); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + ixgbe_i2c_start(hw); + + /* Device Address and read indication */ + status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_in_i2c_byte(hw, data); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_bit(hw, nack); + if (status != IXGBE_SUCCESS) + goto fail; + + ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return IXGBE_SUCCESS; + +fail: + ixgbe_i2c_bus_clear(hw); + if (lock) { + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(100); + } + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte read error - Retrying.\n"); + else + DEBUGOUT("I2C byte read error.\n"); + + } while (retry < max_retry); + + return status; +} + +/** + * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, true); +} + +/** + * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, false); +} + +/** + * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * @lock: true if to take and release semaphore + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +STATIC s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data, bool lock) +{ + s32 status; + u32 max_retry = 1; + u32 retry = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + DEBUGFUNC("ixgbe_write_i2c_byte_generic"); + + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != + IXGBE_SUCCESS) + return IXGBE_ERR_SWFW_SYNC; + + do { + ixgbe_i2c_start(hw); + + status = ixgbe_clock_out_i2c_byte(hw, dev_addr); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, byte_offset); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, data); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return IXGBE_SUCCESS; + +fail: + ixgbe_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte write error - Retrying.\n"); + else + DEBUGOUT("I2C byte write error.\n"); + } while (retry < max_retry); + + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + return status; +} + +/** + * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, true); +} + +/** + * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, false); +} + +/** + * ixgbe_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + * Set bit-bang mode on X550 hardware. + **/ +STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + + DEBUGFUNC("ixgbe_i2c_start"); + + i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw); + + /* Start condition must begin with data and clock high */ + ixgbe_set_i2c_data(hw, &i2cctl, 1); + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + usec_delay(IXGBE_I2C_T_SU_STA); + + ixgbe_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + usec_delay(IXGBE_I2C_T_HD_STA); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(IXGBE_I2C_T_LOW); + +} + +/** + * ixgbe_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + * Disables bit-bang mode and negates data output enable on X550 + * hardware. + **/ +STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); + u32 bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw); + + DEBUGFUNC("ixgbe_i2c_stop"); + + /* Stop condition must begin with data low and clock high */ + ixgbe_set_i2c_data(hw, &i2cctl, 0); + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + usec_delay(IXGBE_I2C_T_SU_STO); + + ixgbe_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + usec_delay(IXGBE_I2C_T_BUF); + + if (bb_en_bit || data_oe_bit || clk_oe_bit) { + i2cctl &= ~bb_en_bit; + i2cctl |= data_oe_bit | clk_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } +} + +/** + * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) +{ + s32 i; + bool bit = 0; + + DEBUGFUNC("ixgbe_clock_in_i2c_byte"); + + *data = 0; + for (i = 7; i >= 0; i--) { + ixgbe_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) +{ + s32 status = IXGBE_SUCCESS; + s32 i; + u32 i2cctl; + bool bit; + + DEBUGFUNC("ixgbe_clock_out_i2c_byte"); + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = ixgbe_clock_out_i2c_bit(hw, bit); + + if (status != IXGBE_SUCCESS) + break; + } + + /* Release SDA line (set high) */ + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + + return status; +} + +/** + * ixgbe_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) +{ + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + s32 status = IXGBE_SUCCESS; + u32 i = 0; + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 timeout = 10; + bool ack = 1; + + DEBUGFUNC("ixgbe_get_i2c_ack"); + + if (data_oe_bit) { + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(IXGBE_I2C_T_HIGH); + + /* Poll for ACK. Note that ACK in I2C spec is + * transition from 1 to 0 */ + for (i = 0; i < timeout; i++) { + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + ack = ixgbe_get_i2c_data(hw, &i2cctl); + + usec_delay(1); + if (!ack) + break; + } + + if (ack) { + DEBUGOUT("I2C ack was not received.\n"); + status = IXGBE_ERR_I2C; + } + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(IXGBE_I2C_T_LOW); + + return status; +} + +/** + * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + + DEBUGFUNC("ixgbe_clock_in_i2c_bit"); + + if (data_oe_bit) { + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(IXGBE_I2C_T_HIGH); + + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + *data = ixgbe_get_i2c_data(hw, &i2cctl); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(IXGBE_I2C_T_LOW); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + + DEBUGFUNC("ixgbe_clock_out_i2c_bit"); + + status = ixgbe_set_i2c_data(hw, &i2cctl, data); + if (status == IXGBE_SUCCESS) { + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(IXGBE_I2C_T_HIGH); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + usec_delay(IXGBE_I2C_T_LOW); + } else { + status = IXGBE_ERR_I2C; + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "I2C data was not set to %X\n", data); + } + + return status; +} + +/** + * ixgbe_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + * Negates the I2C clock output enable on X550 hardware. + **/ +STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +{ + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); + u32 i = 0; + u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT; + u32 i2cctl_r = 0; + + DEBUGFUNC("ixgbe_raise_i2c_clk"); + + if (clk_oe_bit) { + *i2cctl |= clk_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + } + + for (i = 0; i < timeout; i++) { + *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw); + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + /* SCL rise time (1000ns) */ + usec_delay(IXGBE_I2C_T_RISE); + + i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw)) + break; + } +} + +/** + * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + * Asserts the I2C clock output enable on X550 hardware. + **/ +STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +{ + DEBUGFUNC("ixgbe_lower_i2c_clk"); + + *i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw)); + *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + usec_delay(IXGBE_I2C_T_FALL); +} + +/** + * ixgbe_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + * Asserts the I2C data output enable on X550 hardware. + **/ +STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) +{ + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_i2c_data"); + + if (data) + *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + else + *i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw)); + *i2cctl &= ~data_oe_bit; + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); + + if (!data) /* Can't verify data in this case */ + return IXGBE_SUCCESS; + if (data_oe_bit) { + *i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + } + + /* Verify data was set correctly */ + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (data != ixgbe_get_i2c_data(hw, i2cctl)) { + status = IXGBE_ERR_I2C; + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "Error - I2C data was not set to %X.\n", + data); + } + + return status; +} + +/** + * ixgbe_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + * Negates the I2C data output enable on X550 hardware. + **/ +STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) +{ + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + bool data; + + DEBUGFUNC("ixgbe_get_i2c_data"); + + if (data_oe_bit) { + *i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + usec_delay(IXGBE_I2C_T_FALL); + } + + if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw)) + data = 1; + else + data = 0; + + return data; +} + +/** + * ixgbe_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) +{ + u32 i2cctl; + u32 i; + + DEBUGFUNC("ixgbe_i2c_bus_clear"); + + ixgbe_i2c_start(hw); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + + ixgbe_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + usec_delay(IXGBE_I2C_T_HIGH); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + usec_delay(IXGBE_I2C_T_LOW); + } + + ixgbe_i2c_start(hw); + + /* Put the i2c bus back to default state */ + ixgbe_i2c_stop(hw); +} + +/** + * ixgbe_tn_check_overtemp - Checks if an overtemp occurred. + * @hw: pointer to hardware structure + * + * Checks if the LASI temp alarm status was triggered due to overtemp + **/ +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u16 phy_data = 0; + + DEBUGFUNC("ixgbe_tn_check_overtemp"); + + if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) + goto out; + + /* Check that the LASI temp alarm status was triggered */ + hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data); + + if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) + goto out; + + status = IXGBE_ERR_OVERTEMP; + ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Device over temperature"); +out: + return status; +} + +/** + * ixgbe_set_copper_phy_power - Control power for copper phy + * @hw: pointer to hardware structure + * @on: true for on, false for off + */ +s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) +{ + u32 status; + u16 reg; + + if (!on && ixgbe_mng_present(hw)) + return 0; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + if (on) { + reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; + } else { + if (ixgbe_check_reset_blocked(hw)) + return 0; + reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; + } + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + return status; +} diff --git a/drivers/net/ixgbe/base/ixgbe_phy.h b/drivers/net/ixgbe/base/ixgbe_phy.h new file mode 100644 index 00000000..1a5affe5 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_phy.h @@ -0,0 +1,214 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _IXGBE_PHY_H_ +#define _IXGBE_PHY_H_ + +#include "ixgbe_type.h" +#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 +#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 +#define IXGBE_I2C_EEPROM_BANK_LEN 0xFF + +/* EEPROM byte offsets */ +#define IXGBE_SFF_IDENTIFIER 0x0 +#define IXGBE_SFF_IDENTIFIER_SFP 0x3 +#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define IXGBE_SFF_1GBE_COMP_CODES 0x6 +#define IXGBE_SFF_10GBE_COMP_CODES 0x3 +#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C +#define IXGBE_SFF_SFF_8472_SWAP 0x5C +#define IXGBE_SFF_SFF_8472_COMP 0x5E +#define IXGBE_SFF_SFF_8472_OSCB 0x6E +#define IXGBE_SFF_SFF_8472_ESCB 0x76 +#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define IXGBE_SFF_QSFP_CONNECTOR 0x82 +#define IXGBE_SFF_QSFP_10GBE_COMP 0x83 +#define IXGBE_SFF_QSFP_1GBE_COMP 0x86 +#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92 +#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93 + +/* Bitmasks */ +#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 +#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 +#define IXGBE_SFF_1GBASET_CAPABLE 0x8 +#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 +#define IXGBE_SFF_ADDRESSING_MODE 0x4 +#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 +#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 +#define IXGBE_I2C_EEPROM_READ_MASK 0x100 +#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +#define IXGBE_CS4227 0xBE /* CS4227 address */ +#define IXGBE_CS4227_GLOBAL_ID_LSB 0 +#define IXGBE_CS4227_SCRATCH 2 +#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5 +#define IXGBE_CS4227_RESET_PENDING 0x1357 +#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 +#define IXGBE_CS4227_RETRIES 15 +#define IXGBE_CS4227_EFUSE_STATUS 0x0181 +#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */ +#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */ +#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ +#define IXGBE_CS4227_EEPROM_STATUS 0x5001 +#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001 +#define IXGBE_CS4227_SPEED_1G 0x8000 +#define IXGBE_CS4227_SPEED_10G 0 +#define IXGBE_CS4227_EDC_MODE_CX1 0x0002 +#define IXGBE_CS4227_EDC_MODE_SR 0x0004 +#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008 +#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */ +#define IXGBE_CS4227_RESET_DELAY 450 /* milliseconds */ +#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ +#define IXGBE_PE 0xE0 /* Port expander address */ +#define IXGBE_PE_OUTPUT 1 /* Output register offset */ +#define IXGBE_PE_CONFIG 3 /* Config register offset */ +#define IXGBE_PE_BIT1 (1 << 1) + +/* Flow control defines */ +#define IXGBE_TAF_SYM_PAUSE 0x400 +#define IXGBE_TAF_ASM_PAUSE 0x800 + +/* Bit-shift macros */ +#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define IXGBE_I2C_T_HD_STA 4 +#define IXGBE_I2C_T_LOW 5 +#define IXGBE_I2C_T_HIGH 4 +#define IXGBE_I2C_T_SU_STA 5 +#define IXGBE_I2C_T_HD_DATA 5 +#define IXGBE_I2C_T_SU_DATA 1 +#define IXGBE_I2C_T_RISE 1 +#define IXGBE_I2C_T_FALL 1 +#define IXGBE_I2C_T_SU_STO 4 +#define IXGBE_I2C_T_BUF 5 + +#ifndef IXGBE_SFP_DETECT_RETRIES +#define IXGBE_SFP_DETECT_RETRIES 10 + +#endif /* IXGBE_SFP_DETECT_RETRIES */ +#define IXGBE_TN_LASI_STATUS_REG 0x9005 +#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 + +/* SFP+ SFF-8472 Compliance */ +#define IXGBE_SFF_SFF_8472_UNSUP 0x00 + +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); +bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); +enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); +s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data); +s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data); +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw); + +/* PHY specific */ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version); +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version); + +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); +s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); +s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw); +s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + u16 *list_offset, + u16 *data_offset); +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data); +void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, + u16 *val, bool lock); +s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, + u16 val, bool lock); +#endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ixgbe/base/ixgbe_type.h b/drivers/net/ixgbe/base/ixgbe_type.h new file mode 100644 index 00000000..4dce2ac1 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_type.h @@ -0,0 +1,4121 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _IXGBE_TYPE_H_ +#define _IXGBE_TYPE_H_ + +/* + * The following is a brief description of the error categories used by the + * ERROR_REPORT* macros. + * + * - IXGBE_ERROR_INVALID_STATE + * This category is for errors which represent a serious failure state that is + * unexpected, and could be potentially harmful to device operation. It should + * not be used for errors relating to issues that can be worked around or + * ignored. + * + * - IXGBE_ERROR_POLLING + * This category is for errors related to polling/timeout issues and should be + * used in any case where the timeout occured, or a failure to obtain a lock, or + * failure to receive data within the time limit. + * + * - IXGBE_ERROR_CAUTION + * This category should be used for reporting issues that may be the cause of + * other errors, such as temperature warnings. It should indicate an event which + * could be serious, but hasn't necessarily caused problems yet. + * + * - IXGBE_ERROR_SOFTWARE + * This category is intended for errors due to software state preventing + * something. The category is not intended for errors due to bad arguments, or + * due to unsupported features. It should be used when a state occurs which + * prevents action but is not a serious issue. + * + * - IXGBE_ERROR_ARGUMENT + * This category is for when a bad or invalid argument is passed. It should be + * used whenever a function is called and error checking has detected the + * argument is wrong or incorrect. + * + * - IXGBE_ERROR_UNSUPPORTED + * This category is for errors which are due to unsupported circumstances or + * configuration issues. It should not be used when the issue is due to an + * invalid argument, but for when something has occurred that is unsupported + * (Ex: Flow control autonegotiation or an unsupported SFP+ module.) + */ + +#include "ixgbe_osdep.h" + +/* Override this by setting IOMEM in your ixgbe_osdep.h header */ + +/* Vendor ID */ +#define IXGBE_INTEL_VENDOR_ID 0x8086 + +/* Device IDs */ +#define IXGBE_DEV_ID_82598 0x10B6 +#define IXGBE_DEV_ID_82598_BX 0x1508 +#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 +#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +#define IXGBE_DEV_ID_82598AT 0x10C8 +#define IXGBE_DEV_ID_82598AT2 0x150B +#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB +#define IXGBE_DEV_ID_82598EB_CX4 0x10DD +#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC +#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 +#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 +#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 +#define IXGBE_DEV_ID_82599_KX4 0x10F7 +#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 +#define IXGBE_DEV_ID_82599_KR 0x1517 +#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 +#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C +#define IXGBE_DEV_ID_82599_CX4 0x10F9 +#define IXGBE_DEV_ID_82599_SFP 0x10FB +#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071 +#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 +#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 +#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 +#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B +#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976 +#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159 +#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D +#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008 +#define IXGBE_SUBDEV_ID_82599_SFP_LOM 0x06EE +#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A +#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 +#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 +#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D +#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A +#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 +#define IXGBE_DEV_ID_82599EN_SFP 0x1557 +#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001 +#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC +#define IXGBE_DEV_ID_82599_T3_LOM 0x151C +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_82599_VF_HV 0x152E +#define IXGBE_DEV_ID_82599_LS 0x154F +#define IXGBE_DEV_ID_X540T 0x1528 +#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X540_VF_HV 0x1530 +#define IXGBE_DEV_ID_X540T1 0x1560 +#define IXGBE_DEV_ID_X550T 0x1563 +#define IXGBE_DEV_ID_X550T1 0x15D1 +/* Placeholder value, pending official value. */ +#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 +#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 +#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 +#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15C6 +#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15C7 +#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 +#define IXGBE_DEV_ID_X550EM_A_QSFP 0x15CA +#define IXGBE_DEV_ID_X550EM_A_QSFP_N 0x15CC +#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE +#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA +#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB +#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC +#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD +#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE +#define IXGBE_DEV_ID_X550_VF_HV 0x1564 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 +#define IXGBE_DEV_ID_X550EM_A_VF_HV 0x15B4 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 + +#define IXGBE_CAT(r, m) IXGBE_##r##m + +#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, _IDX)]) + +/* General Registers */ +#define IXGBE_CTRL 0x00000 +#define IXGBE_STATUS 0x00008 +#define IXGBE_CTRL_EXT 0x00018 +#define IXGBE_ESDP 0x00020 +#define IXGBE_EODSDP 0x00028 +#define IXGBE_I2CCTL_82599 0x00028 +#define IXGBE_I2CCTL IXGBE_I2CCTL_82599 +#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_82599 +#define IXGBE_I2CCTL_X550 0x15F5C +#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550 +#define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550 +#define IXGBE_I2CCTL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2CCTL) +#define IXGBE_PHY_GPIO 0x00028 +#define IXGBE_MAC_GPIO 0x00030 +#define IXGBE_PHYINT_STATUS0 0x00100 +#define IXGBE_PHYINT_STATUS1 0x00104 +#define IXGBE_PHYINT_STATUS2 0x00108 +#define IXGBE_LEDCTL 0x00200 +#define IXGBE_FRTIMER 0x00048 +#define IXGBE_TCPTIMER 0x0004C +#define IXGBE_CORESPARE 0x00600 +#define IXGBE_EXVET 0x05078 + +/* NVM Registers */ +#define IXGBE_EEC 0x10010 +#define IXGBE_EEC_X540 IXGBE_EEC +#define IXGBE_EEC_X550 IXGBE_EEC +#define IXGBE_EEC_X550EM_x IXGBE_EEC +#define IXGBE_EEC_X550EM_a 0x15FF8 +#define IXGBE_EEC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EEC) + +#define IXGBE_EERD 0x10014 +#define IXGBE_EEWR 0x10018 + +#define IXGBE_FLA 0x1001C +#define IXGBE_FLA_X540 IXGBE_FLA +#define IXGBE_FLA_X550 IXGBE_FLA +#define IXGBE_FLA_X550EM_x IXGBE_FLA +#define IXGBE_FLA_X550EM_a 0x15F6C +#define IXGBE_FLA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FLA) + +#define IXGBE_EEMNGCTL 0x10110 +#define IXGBE_EEMNGDATA 0x10114 +#define IXGBE_FLMNGCTL 0x10118 +#define IXGBE_FLMNGDATA 0x1011C +#define IXGBE_FLMNGCNT 0x10120 +#define IXGBE_FLOP 0x1013C + +#define IXGBE_GRC 0x10200 +#define IXGBE_GRC_X540 IXGBE_GRC +#define IXGBE_GRC_X550 IXGBE_GRC +#define IXGBE_GRC_X550EM_x IXGBE_GRC +#define IXGBE_GRC_X550EM_a 0x15F64 +#define IXGBE_GRC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), GRC) + +#define IXGBE_SRAMREL 0x10210 +#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550EM_a 0x15F6C +#define IXGBE_SRAMREL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SRAMREL) + +#define IXGBE_PHYDBG 0x10218 + +/* General Receive Control */ +#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ +#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ + +#define IXGBE_VPDDIAG0 0x10204 +#define IXGBE_VPDDIAG1 0x10208 + +/* I2CCTL Bit Masks */ +#define IXGBE_I2C_CLK_IN 0x00000001 +#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN +#define IXGBE_I2C_CLK_IN_X550 0x00004000 +#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550 +#define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550 +#define IXGBE_I2C_CLK_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN) + +#define IXGBE_I2C_CLK_OUT 0x00000002 +#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT +#define IXGBE_I2C_CLK_OUT_X550 0x00000200 +#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550 +#define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550 +#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT) + +#define IXGBE_I2C_DATA_IN 0x00000004 +#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN +#define IXGBE_I2C_DATA_IN_X550 0x00001000 +#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550 +#define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550 +#define IXGBE_I2C_DATA_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN) + +#define IXGBE_I2C_DATA_OUT 0x00000008 +#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT +#define IXGBE_I2C_DATA_OUT_X550 0x00000400 +#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550 +#define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550 +#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT) + +#define IXGBE_I2C_DATA_OE_N_EN 0 +#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN +#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800 +#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550 +#define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550 +#define IXGBE_I2C_DATA_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN) + +#define IXGBE_I2C_BB_EN 0 +#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN +#define IXGBE_I2C_BB_EN_X550 0x00000100 +#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550 +#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550 + +#define IXGBE_I2C_BB_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN) + +#define IXGBE_I2C_CLK_OE_N_EN 0 +#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN +#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000 +#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550 +#define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550 +#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN) +#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 + +#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define IXGBE_EMC_INTERNAL_DATA 0x00 +#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define IXGBE_EMC_DIODE1_DATA 0x01 +#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define IXGBE_EMC_DIODE2_DATA 0x23 +#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A + +#define IXGBE_MAX_SENSORS 3 + +struct ixgbe_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct ixgbe_thermal_sensor_data { + struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS]; +}; + +/* Interrupt Registers */ +#define IXGBE_EICR 0x00800 +#define IXGBE_EICS 0x00808 +#define IXGBE_EIMS 0x00880 +#define IXGBE_EIMC 0x00888 +#define IXGBE_EIAC 0x00810 +#define IXGBE_EIAM 0x00890 +#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) +#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) +#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) +#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) +/* 82599 EITR is only 12 bits, with the lower 3 always zero */ +/* + * 82598 EITR is 16 bits but set the limits based on the max + * supported by all ixgbe hardware + */ +#define IXGBE_MAX_INT_RATE 488281 +#define IXGBE_MIN_INT_RATE 956 +#define IXGBE_MAX_EITR 0x00000FF8 +#define IXGBE_MIN_EITR 8 +#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ + (0x012300 + (((_i) - 24) * 4))) +#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 +#define IXGBE_EITR_LLI_MOD 0x00008000 +#define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ +#define IXGBE_EITRSEL 0x00894 +#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ +#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ +#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) +#define IXGBE_GPIE 0x00898 + +/* Flow Control Registers */ +#define IXGBE_FCADBUL 0x03210 +#define IXGBE_FCADBUH 0x03214 +#define IXGBE_FCAMACL 0x04328 +#define IXGBE_FCAMACH 0x0432C +#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_PFCTOP 0x03008 +#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTV 0x032A0 +#define IXGBE_FCCFG 0x03D00 +#define IXGBE_TFCS 0x0CE00 + +/* Receive DMA Registers */ +#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ + (0x0D000 + (((_i) - 64) * 0x40))) +#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ + (0x0D004 + (((_i) - 64) * 0x40))) +#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ + (0x0D008 + (((_i) - 64) * 0x40))) +#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ + (0x0D010 + (((_i) - 64) * 0x40))) +#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ + (0x0D018 + (((_i) - 64) * 0x40))) +#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ + (0x0D028 + (((_i) - 64) * 0x40))) +#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ + (0x0D02C + (((_i) - 64) * 0x40))) +#define IXGBE_RSCDBU 0x03028 +#define IXGBE_RDDCC 0x02F20 +#define IXGBE_RXMEMWRAP 0x03190 +#define IXGBE_STARCTRL 0x03024 +/* + * Split and Replication Receive Control Registers + * 00-15 : 0x02100 + n*4 + * 16-64 : 0x01014 + n*0x40 + * 64-127: 0x0D014 + (n-64)*0x40 + */ +#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ + (0x0D014 + (((_i) - 64) * 0x40)))) +/* + * Rx DCA Control Register: + * 00-15 : 0x02200 + n*4 + * 16-64 : 0x0100C + n*0x40 + * 64-127: 0x0D00C + (n-64)*0x40 + */ +#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ + (0x0D00C + (((_i) - 64) * 0x40)))) +#define IXGBE_RDRXCTL 0x02F00 +/* 8 of these 0x03C00 - 0x03C1C */ +#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) +#define IXGBE_RXCTRL 0x03000 +#define IXGBE_DROPEN 0x03D04 +#define IXGBE_RXPBSIZE_SHIFT 10 +#define IXGBE_RXPBSIZE_MASK 0x000FFC00 + +/* Receive Registers */ +#define IXGBE_RXCSUM 0x05000 +#define IXGBE_RFCTL 0x05008 +#define IXGBE_DRECCCTL 0x02F08 +#define IXGBE_DRECCCTL_DISABLE 0 +#define IXGBE_DRECCCTL2 0x02F8C + +/* Multicast Table Array - 128 entries */ +#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) +#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x0A200 + ((_i) * 8))) +#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x0A204 + ((_i) * 8))) +#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) +#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) +/* Packet split receive type */ +#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ + (0x0EA00 + ((_i) * 4))) +/* array of 4096 1-bit vlan filters */ +#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) +/*array of 4096 4-bit vlan vmdq indices */ +#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) +#define IXGBE_FCTRL 0x05080 +#define IXGBE_VLNCTRL 0x05088 +#define IXGBE_MCSTCTRL 0x05090 +#define IXGBE_MRQC 0x05818 +#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ +#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ +#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ +#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ +#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ +#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ +#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ +#define IXGBE_RQTC 0x0EC70 +#define IXGBE_MTQC 0x08120 +#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ +#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_PFFLPL 0x050B0 +#define IXGBE_PFFLPH 0x050B4 +#define IXGBE_VT_CTL 0x051B0 +#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ +/* 64 Mailboxes, 16 DW each */ +#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) +#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ +#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ +#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) +#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) +#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) +#define IXGBE_QDE 0x2F04 +#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ +#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ +#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) +#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) +#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) +#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define IXGBE_LVMMC_RX 0x2FA8 +#define IXGBE_LVMMC_TX 0x8108 +#define IXGBE_LMVM_RX 0x2FA4 +#define IXGBE_LMVM_TX 0x8124 +#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */ +#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */ +#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ +#define IXGBE_RXFECCERR0 0x051B8 +#define IXGBE_LLITHRESH 0x0EC90 +#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIRVP 0x05AC0 +#define IXGBE_VMD_CTL 0x0581C +#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ +#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ + +/* Registers for setting up RSS on X550 with SRIOV + * _p - pool number (0..63) + * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA) + */ +#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4)) +#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40)) +#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40)) + +/* Flow Director registers */ +#define IXGBE_FDIRCTRL 0x0EE00 +#define IXGBE_FDIRHKEY 0x0EE68 +#define IXGBE_FDIRSKEY 0x0EE6C +#define IXGBE_FDIRDIP4M 0x0EE3C +#define IXGBE_FDIRSIP4M 0x0EE40 +#define IXGBE_FDIRTCPM 0x0EE44 +#define IXGBE_FDIRUDPM 0x0EE48 +#define IXGBE_FDIRSCTPM 0x0EE78 +#define IXGBE_FDIRIP6M 0x0EE74 +#define IXGBE_FDIRM 0x0EE70 + +/* Flow Director Stats registers */ +#define IXGBE_FDIRFREE 0x0EE38 +#define IXGBE_FDIRLEN 0x0EE4C +#define IXGBE_FDIRUSTAT 0x0EE50 +#define IXGBE_FDIRFSTAT 0x0EE54 +#define IXGBE_FDIRMATCH 0x0EE58 +#define IXGBE_FDIRMISS 0x0EE5C + +/* Flow Director Programming registers */ +#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ +#define IXGBE_FDIRIPSA 0x0EE18 +#define IXGBE_FDIRIPDA 0x0EE1C +#define IXGBE_FDIRPORT 0x0EE20 +#define IXGBE_FDIRVLAN 0x0EE24 +#define IXGBE_FDIRHASH 0x0EE28 +#define IXGBE_FDIRCMD 0x0EE2C + +/* Transmit DMA registers */ +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/ +#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) +#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) +#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) +#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) +#define IXGBE_DTXCTL 0x07E00 + +#define IXGBE_DMATXCTL 0x04A80 +#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ +#define IXGBE_PFDTXGSWC 0x08220 +#define IXGBE_DTXMXSZRQ 0x08100 +#define IXGBE_DTXTCPFLGL 0x04A88 +#define IXGBE_DTXTCPFLGH 0x04A8C +#define IXGBE_LBDRPEN 0x0CA00 +#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ + +#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ +#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ +#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ +#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */ +#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ +#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ + +#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ + +/* Anti-spoofing defines */ +#define IXGBE_SPOOF_MACAS_MASK 0xFF +#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 +#define IXGBE_SPOOF_VLANAS_SHIFT 8 +#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000 +#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16 +#define IXGBE_PFVFSPOOF_REG_COUNT 8 +/* 16 of these (0-15) */ +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) +/* Tx DCA Control register : 128 of these (0-127) */ +#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) +#define IXGBE_TIPG 0x0CB00 +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_MNGTXMAP 0x0CD10 +#define IXGBE_TIPG_FIBER_DEFAULT 3 +#define IXGBE_TXPBSIZE_SHIFT 10 + +/* Wake up registers */ +#define IXGBE_WUC 0x05800 +#define IXGBE_WUFC 0x05808 +#define IXGBE_WUS 0x05810 +#define IXGBE_IPAV 0x05838 +#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ +#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ + +#define IXGBE_WUPL 0x05900 +#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_PROXYS 0x05F60 /* Proxying Status Register */ +#define IXGBE_PROXYFC 0x05F64 /* Proxying Filter Control Register */ +#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */ + +#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ +/* Ext Flexible Host Filter Table */ +#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) +#define IXGBE_FHFT_EXT_X550(_n) (0x09600 + ((_n) * 0x100)) + +/* Four Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Six Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6 +/* Eight Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_8 8 +#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 +#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ +#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ +#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ + +/* Wake Up Filter Control */ +#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ + +#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ +#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ +#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ +#define IXGBE_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */ +#define IXGBE_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */ +#define IXGBE_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */ +/* Mask for Ext. flex filters */ +#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 +#define IXGBE_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */ +#define IXGBE_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */ +#define IXGBE_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */ +#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ + +/* Wake Up Status */ +#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC +#define IXGBE_WUS_MAG IXGBE_WUFC_MAG +#define IXGBE_WUS_EX IXGBE_WUFC_EX +#define IXGBE_WUS_MC IXGBE_WUFC_MC +#define IXGBE_WUS_BC IXGBE_WUFC_BC +#define IXGBE_WUS_ARP IXGBE_WUFC_ARP +#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 +#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 +#define IXGBE_WUS_MNG IXGBE_WUFC_MNG +#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 +#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 +#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 +#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 +#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 +#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 +#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS +#define IXGBE_WUS_FW_RST_WK IXGBE_WUFC_FW_RST_WK +/* Proxy Status */ +#define IXGBE_PROXYS_EX 0x00000004 /* Exact packet received */ +#define IXGBE_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */ +#define IXGBE_PROXYS_NS 0x00000200 /* IPV6 NS received */ +#define IXGBE_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */ +#define IXGBE_PROXYS_ARP 0x00000800 /* ARP request packet received */ +#define IXGBE_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */ + +/* Proxying Filter Control */ +#define IXGBE_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */ +#define IXGBE_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */ +#define IXGBE_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */ +#define IXGBE_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ +#define IXGBE_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */ +#define IXGBE_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */ +#define IXGBE_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */ + +#define IXGBE_WUPL_LENGTH_MASK 0xFFFF + +/* DCB registers */ +#define IXGBE_DCB_MAX_TRAFFIC_CLASS 8 +#define IXGBE_RMCS 0x03D00 +#define IXGBE_DPMCS 0x07F40 +#define IXGBE_PDPMCS 0x0CD00 +#define IXGBE_RUPPBMR 0x050A0 +#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ + +/* Power Management */ +/* DMA Coalescing configuration */ +struct ixgbe_dmac_config { + u16 watchdog_timer; /* usec units */ + bool fcoe_en; + u32 link_speed; + u8 fcoe_tc; + u8 num_tcs; +}; + +/* + * DMA Coalescing threshold Rx PB TC[n] value in Kilobyte by link speed. + * DMACRXT = 10Gbps = 10,000 bits / usec = 1250 bytes / usec 70 * 1250 == + * 87500 bytes [85KB] + */ +#define IXGBE_DMACRXT_10G 0x55 +#define IXGBE_DMACRXT_1G 0x09 +#define IXGBE_DMACRXT_100M 0x01 + +/* DMA Coalescing registers */ +#define IXGBE_DMCMNGTH 0x15F20 /* Management Threshold */ +#define IXGBE_DMACR 0x02400 /* Control register */ +#define IXGBE_DMCTH(_i) (0x03300 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_DMCTLX 0x02404 /* Time to Lx request */ +/* DMA Coalescing register fields */ +#define IXGBE_DMCMNGTH_DMCMNGTH_MASK 0x000FFFF0 /* Mng Threshold mask */ +#define IXGBE_DMCMNGTH_DMCMNGTH_SHIFT 4 /* Management Threshold shift */ +#define IXGBE_DMACR_DMACWT_MASK 0x0000FFFF /* Watchdog Timer mask */ +#define IXGBE_DMACR_HIGH_PRI_TC_MASK 0x00FF0000 +#define IXGBE_DMACR_HIGH_PRI_TC_SHIFT 16 +#define IXGBE_DMACR_EN_MNG_IND 0x10000000 /* Enable Mng Indications */ +#define IXGBE_DMACR_LX_COAL_IND 0x40000000 /* Lx Coalescing indicate */ +#define IXGBE_DMACR_DMAC_EN 0x80000000 /* DMA Coalescing Enable */ +#define IXGBE_DMCTH_DMACRXT_MASK 0x000001FF /* Receive Threshold mask */ +#define IXGBE_DMCTLX_TTLX_MASK 0x00000FFF /* Time to Lx request mask */ + +/* EEE registers */ +#define IXGBE_EEER 0x043A0 /* EEE register */ +#define IXGBE_EEE_STAT 0x04398 /* EEE Status */ +#define IXGBE_EEE_SU 0x04380 /* EEE Set up */ +#define IXGBE_EEE_SU_TEEE_DLY_SHIFT 26 +#define IXGBE_TLPIC 0x041F4 /* EEE Tx LPI count */ +#define IXGBE_RLPIC 0x041F8 /* EEE Rx LPI count */ + +/* EEE register fields */ +#define IXGBE_EEER_TX_LPI_EN 0x00010000 /* Enable EEE LPI TX path */ +#define IXGBE_EEER_RX_LPI_EN 0x00020000 /* Enable EEE LPI RX path */ +#define IXGBE_EEE_STAT_NEG 0x20000000 /* EEE support neg on link */ +#define IXGBE_EEE_RX_LPI_STATUS 0x40000000 /* RX Link in LPI status */ +#define IXGBE_EEE_TX_LPI_STATUS 0x80000000 /* TX Link in LPI status */ + + + +/* Security Control Registers */ +#define IXGBE_SECTXCTRL 0x08800 +#define IXGBE_SECTXSTAT 0x08804 +#define IXGBE_SECTXBUFFAF 0x08808 +#define IXGBE_SECTXMINIFG 0x08810 +#define IXGBE_SECRXCTRL 0x08D00 +#define IXGBE_SECRXSTAT 0x08D04 + +/* Security Bit Fields and Masks */ +#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 +#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 +#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 + +#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 +#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 + +#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 +#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 + +#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 +#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 + +/* LinkSec (MacSec) Registers */ +#define IXGBE_LSECTXCAP 0x08A00 +#define IXGBE_LSECRXCAP 0x08F00 +#define IXGBE_LSECTXCTRL 0x08A04 +#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ +#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ +#define IXGBE_LSECTXSA 0x08A10 +#define IXGBE_LSECTXPN0 0x08A14 +#define IXGBE_LSECTXPN1 0x08A18 +#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECRXCTRL 0x08F04 +#define IXGBE_LSECRXSCL 0x08F08 +#define IXGBE_LSECRXSCH 0x08F0C +#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) +#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ +#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ +#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ +#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ +#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ +#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ +#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ +#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ +#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ +#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ +#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ +#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ +#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ +#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ +#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ +#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ +#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ +#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ +#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ + +/* LinkSec (MacSec) Bit Fields and Masks */ +#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECTXCAP_SUM_SHIFT 16 +#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECRXCAP_SUM_SHIFT 16 + +#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 +#define IXGBE_LSECTXCTRL_DISABLE 0x0 +#define IXGBE_LSECTXCTRL_AUTH 0x1 +#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define IXGBE_LSECTXCTRL_AISCI 0x00000020 +#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C +#define IXGBE_LSECRXCTRL_EN_SHIFT 2 +#define IXGBE_LSECRXCTRL_DISABLE 0x0 +#define IXGBE_LSECRXCTRL_CHECK 0x1 +#define IXGBE_LSECRXCTRL_STRICT 0x2 +#define IXGBE_LSECRXCTRL_DROP 0x3 +#define IXGBE_LSECRXCTRL_PLSH 0x00000040 +#define IXGBE_LSECRXCTRL_RP 0x00000080 +#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +/* IpSec Registers */ +#define IXGBE_IPSTXIDX 0x08900 +#define IXGBE_IPSTXSALT 0x08904 +#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXIDX 0x08E00 +#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSPI 0x08E14 +#define IXGBE_IPSRXIPIDX 0x08E18 +#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSALT 0x08E2C +#define IXGBE_IPSRXMOD 0x08E30 + +#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 + +/* DCB registers */ +#define IXGBE_RTRPCS 0x02430 +#define IXGBE_RTTDCS 0x04900 +#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_RTTPCS 0x0CD00 +#define IXGBE_RTRUP2TC 0x03020 +#define IXGBE_RTTUP2TC 0x0C800 +#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDQSEL 0x04904 +#define IXGBE_RTTDT1C 0x04908 +#define IXGBE_RTTDT1S 0x0490C +#define IXGBE_RTTDTECC 0x04990 +#define IXGBE_RTTDTECC_NO_BCN 0x00000100 + +#define IXGBE_RTTBCNRC 0x04984 +#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 +#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 +#define IXGBE_RTTBCNRC_RF_INT_MASK \ + (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) +#define IXGBE_RTTBCNRM 0x04980 + +/* BCN (for DCB) Registers */ +#define IXGBE_RTTBCNRS 0x04988 +#define IXGBE_RTTBCNCR 0x08B00 +#define IXGBE_RTTBCNACH 0x08B04 +#define IXGBE_RTTBCNACL 0x08B08 +#define IXGBE_RTTBCNTG 0x04A90 +#define IXGBE_RTTBCNIDX 0x08B0C +#define IXGBE_RTTBCNCP 0x08B10 +#define IXGBE_RTFRTIMER 0x08B14 +#define IXGBE_RTTBCNRTT 0x05150 +#define IXGBE_RTTBCNRD 0x0498C + + +/* FCoE DMA Context Registers */ +/* FCoE Direct DMA Context */ +#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10)) +#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ +#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ +#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ +#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ +#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ +#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ +#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ +#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ +#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ +#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 +#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 +#define IXGBE_FCBUFF_OFFSET_SHIFT 16 +#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ +#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ +#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ +#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ +#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 +/* FCoE SOF/EOF */ +#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ +#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ +#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ +#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +/* FCoE Filter Context Registers */ +#define IXGBE_FCD_ID 0x05114 /* FCoE D_ID */ +#define IXGBE_FCSMAC 0x0510C /* FCoE Source MAC */ +#define IXGBE_FCFLTRW_SMAC_HIGH_SHIFT 16 +/* FCoE Direct Filter Context */ +#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10)) +#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4)) +#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ +#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ +#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ +#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ +#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ +#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ +#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ +#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ +#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ +#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ +/* FCoE Receive Control */ +#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ +#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ +#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ +#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ +#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ +#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ +#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ +#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ +#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ +#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ +#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 +/* FCoE Redirection */ +#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ +#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ +#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ +#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ +#define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */ +#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ +#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ +#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */ +/* Higher 7 bits for the queue index */ +#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000 +#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16 + +/* Stats registers */ +#define IXGBE_CRCERRS 0x04000 +#define IXGBE_ILLERRC 0x04004 +#define IXGBE_ERRBC 0x04008 +#define IXGBE_MSPDC 0x04010 +#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ +#define IXGBE_MLFC 0x04034 +#define IXGBE_MRFC 0x04038 +#define IXGBE_RLEC 0x04040 +#define IXGBE_LXONTXC 0x03F60 +#define IXGBE_LXONRXC 0x0CF60 +#define IXGBE_LXOFFTXC 0x03F68 +#define IXGBE_LXOFFRXC 0x0CF68 +#define IXGBE_LXONRXCNT 0x041A4 +#define IXGBE_LXOFFRXCNT 0x041A8 +#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ +#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ +#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ +#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ +#define IXGBE_PRC64 0x0405C +#define IXGBE_PRC127 0x04060 +#define IXGBE_PRC255 0x04064 +#define IXGBE_PRC511 0x04068 +#define IXGBE_PRC1023 0x0406C +#define IXGBE_PRC1522 0x04070 +#define IXGBE_GPRC 0x04074 +#define IXGBE_BPRC 0x04078 +#define IXGBE_MPRC 0x0407C +#define IXGBE_GPTC 0x04080 +#define IXGBE_GORCL 0x04088 +#define IXGBE_GORCH 0x0408C +#define IXGBE_GOTCL 0x04090 +#define IXGBE_GOTCH 0x04094 +#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ +#define IXGBE_RUC 0x040A4 +#define IXGBE_RFC 0x040A8 +#define IXGBE_ROC 0x040AC +#define IXGBE_RJC 0x040B0 +#define IXGBE_MNGPRC 0x040B4 +#define IXGBE_MNGPDC 0x040B8 +#define IXGBE_MNGPTC 0x0CF90 +#define IXGBE_TORL 0x040C0 +#define IXGBE_TORH 0x040C4 +#define IXGBE_TPR 0x040D0 +#define IXGBE_TPT 0x040D4 +#define IXGBE_PTC64 0x040D8 +#define IXGBE_PTC127 0x040DC +#define IXGBE_PTC255 0x040E0 +#define IXGBE_PTC511 0x040E4 +#define IXGBE_PTC1023 0x040E8 +#define IXGBE_PTC1522 0x040EC +#define IXGBE_MPTC 0x040F0 +#define IXGBE_BPTC 0x040F4 +#define IXGBE_XEC 0x04120 +#define IXGBE_SSVPC 0x08780 + +#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) +#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ + (0x08600 + ((_i) * 4))) +#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) + +#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_FCCRC 0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */ +#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ +#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ +#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ +#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ +#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ +#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ +#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */ +#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */ +#define IXGBE_O2BGPTC 0x041C4 +#define IXGBE_O2BSPC 0x087B0 +#define IXGBE_B2OSPC 0x041C0 +#define IXGBE_B2OGPRC 0x02F90 +#define IXGBE_BUPRC 0x04180 +#define IXGBE_BMPRC 0x04184 +#define IXGBE_BBPRC 0x04188 +#define IXGBE_BUPTC 0x0418C +#define IXGBE_BMPTC 0x04190 +#define IXGBE_BBPTC 0x04194 +#define IXGBE_BCRCERRS 0x04198 +#define IXGBE_BXONRXC 0x0419C +#define IXGBE_BXOFFRXC 0x041E0 +#define IXGBE_BXONTXC 0x041E4 +#define IXGBE_BXOFFTXC 0x041E8 + +/* Management */ +#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MANC 0x05820 +#define IXGBE_MFVAL 0x05824 +#define IXGBE_MANC2H 0x05860 +#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MIPAF 0x058B0 +#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ +#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_LSWFW 0x15014 +#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */ +#define IXGBE_BMCIPVAL 0x05060 +#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001 +#define IXGBE_BMCIP_IPADDR_VALID 0x00000002 + +/* Management Bit Fields and Masks */ +#define IXGBE_MANC_MPROXYE 0x40000000 /* Management Proxy Enable */ +#define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */ +#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */ +#define IXGBE_MANC_EN_BMC2OS_SHIFT 28 + +/* Firmware Semaphore Register */ +#define IXGBE_FWSM_MODE_MASK 0xE +#define IXGBE_FWSM_TS_ENABLED 0x1 +#define IXGBE_FWSM_FW_MODE_PT 0x4 + +/* ARC Subsystem registers */ +#define IXGBE_HICR 0x15F00 +#define IXGBE_FWSTS 0x15F0C +#define IXGBE_HSMC0R 0x15F04 +#define IXGBE_HSMC1R 0x15F08 +#define IXGBE_SWSR 0x15F10 +#define IXGBE_HFDR 0x15FE8 +#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ + +#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define IXGBE_HICR_C 0x02 +#define IXGBE_HICR_SV 0x04 /* Status Validity */ +#define IXGBE_HICR_FW_RESET_ENABLE 0x40 +#define IXGBE_HICR_FW_RESET 0x80 + +/* PCI-E registers */ +#define IXGBE_GCR 0x11000 +#define IXGBE_GTV 0x11004 +#define IXGBE_FUNCTAG 0x11008 +#define IXGBE_GLT 0x1100C +#define IXGBE_PCIEPIPEADR 0x11004 +#define IXGBE_PCIEPIPEDAT 0x11008 +#define IXGBE_GSCL_1 0x11010 +#define IXGBE_GSCL_2 0x11014 +#define IXGBE_GSCL_3 0x11018 +#define IXGBE_GSCL_4 0x1101C +#define IXGBE_GSCN_0 0x11020 +#define IXGBE_GSCN_1 0x11024 +#define IXGBE_GSCN_2 0x11028 +#define IXGBE_GSCN_3 0x1102C +#define IXGBE_FACTPS 0x10150 +#define IXGBE_FACTPS_X540 IXGBE_FACTPS +#define IXGBE_FACTPS_X550 IXGBE_FACTPS +#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS +#define IXGBE_FACTPS_X550EM_a 0x15FEC +#define IXGBE_FACTPS_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FACTPS) + +#define IXGBE_PCIEANACTL 0x11040 +#define IXGBE_SWSM 0x10140 +#define IXGBE_SWSM_X540 IXGBE_SWSM +#define IXGBE_SWSM_X550 IXGBE_SWSM +#define IXGBE_SWSM_X550EM_x IXGBE_SWSM +#define IXGBE_SWSM_X550EM_a 0x15F70 +#define IXGBE_SWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWSM) + +#define IXGBE_FWSM 0x10148 +#define IXGBE_FWSM_X540 IXGBE_FWSM +#define IXGBE_FWSM_X550 IXGBE_FWSM +#define IXGBE_FWSM_X550EM_x IXGBE_FWSM +#define IXGBE_FWSM_X550EM_a 0x15F74 +#define IXGBE_FWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FWSM) + +#define IXGBE_SWFW_SYNC IXGBE_GSSR +#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550EM_a 0x15F78 +#define IXGBE_SWFW_SYNC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC) + +#define IXGBE_GSSR 0x10160 +#define IXGBE_MREVID 0x11064 +#define IXGBE_DCA_ID 0x11070 +#define IXGBE_DCA_CTRL 0x11074 + +/* PCI-E registers 82599-Specific */ +#define IXGBE_GCR_EXT 0x11050 +#define IXGBE_GSCL_5_82599 0x11030 +#define IXGBE_GSCL_6_82599 0x11034 +#define IXGBE_GSCL_7_82599 0x11038 +#define IXGBE_GSCL_8_82599 0x1103C +#define IXGBE_PHYADR_82599 0x11040 +#define IXGBE_PHYDAT_82599 0x11044 +#define IXGBE_PHYCTL_82599 0x11048 +#define IXGBE_PBACLR_82599 0x11068 +#define IXGBE_CIAA 0x11088 +#define IXGBE_CIAD 0x1108C +#define IXGBE_CIAA_82599 IXGBE_CIAA +#define IXGBE_CIAD_82599 IXGBE_CIAD +#define IXGBE_CIAA_X540 IXGBE_CIAA +#define IXGBE_CIAD_X540 IXGBE_CIAD +#define IXGBE_CIAA_X550 0x11508 +#define IXGBE_CIAD_X550 0x11510 +#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550 +#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550 +#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550 +#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550 +#define IXGBE_CIAA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAA) +#define IXGBE_CIAD_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAD) +#define IXGBE_PICAUSE 0x110B0 +#define IXGBE_PIENA 0x110B8 +#define IXGBE_CDQ_MBR_82599 0x110B4 +#define IXGBE_PCIESPARE 0x110BC +#define IXGBE_MISC_REG_82599 0x110F0 +#define IXGBE_ECC_CTRL_0_82599 0x11100 +#define IXGBE_ECC_CTRL_1_82599 0x11104 +#define IXGBE_ECC_STATUS_82599 0x110E0 +#define IXGBE_BAR_CTRL_82599 0x110F4 + +/* PCI Express Control */ +#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 +#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define IXGBE_GCR_CAP_VER2 0x00040000 + +#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 +#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000 +#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 +#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 +#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 +#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ + IXGBE_GCR_EXT_VT_MODE_64) +#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003 +/* Time Sync Registers */ +#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ +#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ +#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ +#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ +#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ +#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ +#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ +#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ +#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ +#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ +#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ +#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */ +#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ +#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ +#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ +#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ +#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ +#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ +#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ +#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ +#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */ +#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */ +#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ +#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ +#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ +#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ +#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ +#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ +#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ +#define IXGBE_TSICR 0x08C60 /* TimeSync Interrupt Cause Register - WO */ +#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */ + +/* Diagnostic Registers */ +#define IXGBE_RDSTATCTL 0x02C20 +#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ +#define IXGBE_RDHMPN 0x02F08 +#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) +#define IXGBE_RDPROBE 0x02F20 +#define IXGBE_RDMAM 0x02F30 +#define IXGBE_RDMAD 0x02F34 +#define IXGBE_TDHMPN 0x07F08 +#define IXGBE_TDHMPN2 0x082FC +#define IXGBE_TXDESCIC 0x082CC +#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) +#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) +#define IXGBE_TDPROBE 0x07F20 +#define IXGBE_TXBUFCTRL 0x0C600 +#define IXGBE_TXBUFDATA0 0x0C610 +#define IXGBE_TXBUFDATA1 0x0C614 +#define IXGBE_TXBUFDATA2 0x0C618 +#define IXGBE_TXBUFDATA3 0x0C61C +#define IXGBE_RXBUFCTRL 0x03600 +#define IXGBE_RXBUFDATA0 0x03610 +#define IXGBE_RXBUFDATA1 0x03614 +#define IXGBE_RXBUFDATA2 0x03618 +#define IXGBE_RXBUFDATA3 0x0361C +#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_RFVAL 0x050A4 +#define IXGBE_MDFTC1 0x042B8 +#define IXGBE_MDFTC2 0x042C0 +#define IXGBE_MDFTFIFO1 0x042C4 +#define IXGBE_MDFTFIFO2 0x042C8 +#define IXGBE_MDFTS 0x042CC +#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ +#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ +#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ +#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ +#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ +#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ +#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ +#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ +#define IXGBE_PCIEECCCTL 0x1106C +#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ +#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ +#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ +#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ +#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ +#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ +#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ +#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ +#define IXGBE_PCIEECCCTL0 0x11100 +#define IXGBE_PCIEECCCTL1 0x11104 +#define IXGBE_RXDBUECC 0x03F70 +#define IXGBE_TXDBUECC 0x0CF70 +#define IXGBE_RXDBUEST 0x03F74 +#define IXGBE_TXDBUEST 0x0CF74 +#define IXGBE_PBTXECC 0x0C300 +#define IXGBE_PBRXECC 0x03300 +#define IXGBE_GHECCR 0x110B0 + +/* MAC Registers */ +#define IXGBE_PCS1GCFIG 0x04200 +#define IXGBE_PCS1GLCTL 0x04208 +#define IXGBE_PCS1GLSTA 0x0420C +#define IXGBE_PCS1GDBG0 0x04210 +#define IXGBE_PCS1GDBG1 0x04214 +#define IXGBE_PCS1GANA 0x04218 +#define IXGBE_PCS1GANLP 0x0421C +#define IXGBE_PCS1GANNP 0x04220 +#define IXGBE_PCS1GANLPNP 0x04224 +#define IXGBE_HLREG0 0x04240 +#define IXGBE_HLREG1 0x04244 +#define IXGBE_PAP 0x04248 +#define IXGBE_MACA 0x0424C +#define IXGBE_APAE 0x04250 +#define IXGBE_ARD 0x04254 +#define IXGBE_AIS 0x04258 +#define IXGBE_MSCA 0x0425C +#define IXGBE_MSRWD 0x04260 +#define IXGBE_MLADD 0x04264 +#define IXGBE_MHADD 0x04268 +#define IXGBE_MAXFRS 0x04268 +#define IXGBE_TREG 0x0426C +#define IXGBE_PCSS1 0x04288 +#define IXGBE_PCSS2 0x0428C +#define IXGBE_XPCSS 0x04290 +#define IXGBE_MFLCN 0x04294 +#define IXGBE_SERDESC 0x04298 +#define IXGBE_MAC_SGMII_BUSY 0x04298 +#define IXGBE_MACS 0x0429C +#define IXGBE_AUTOC 0x042A0 +#define IXGBE_LINKS 0x042A4 +#define IXGBE_LINKS2 0x04324 +#define IXGBE_AUTOC2 0x042A8 +#define IXGBE_AUTOC3 0x042AC +#define IXGBE_ANLP1 0x042B0 +#define IXGBE_ANLP2 0x042B4 +#define IXGBE_MACC 0x04330 +#define IXGBE_ATLASCTL 0x04800 +#define IXGBE_MMNGC 0x042D0 +#define IXGBE_ANLPNP1 0x042D4 +#define IXGBE_ANLPNP2 0x042D8 +#define IXGBE_KRPCSFC 0x042E0 +#define IXGBE_KRPCSS 0x042E4 +#define IXGBE_FECS1 0x042E8 +#define IXGBE_FECS2 0x042EC +#define IXGBE_SMADARCTL 0x14F10 +#define IXGBE_MPVC 0x04318 +#define IXGBE_SGMIIC 0x04314 + +/* Statistics Registers */ +#define IXGBE_RXNFGPC 0x041B0 +#define IXGBE_RXNFGBCL 0x041B4 +#define IXGBE_RXNFGBCH 0x041B8 +#define IXGBE_RXDGPC 0x02F50 +#define IXGBE_RXDGBCL 0x02F54 +#define IXGBE_RXDGBCH 0x02F58 +#define IXGBE_RXDDGPC 0x02F5C +#define IXGBE_RXDDGBCL 0x02F60 +#define IXGBE_RXDDGBCH 0x02F64 +#define IXGBE_RXLPBKGPC 0x02F68 +#define IXGBE_RXLPBKGBCL 0x02F6C +#define IXGBE_RXLPBKGBCH 0x02F70 +#define IXGBE_RXDLPBKGPC 0x02F74 +#define IXGBE_RXDLPBKGBCL 0x02F78 +#define IXGBE_RXDLPBKGBCH 0x02F7C +#define IXGBE_TXDGPC 0x087A0 +#define IXGBE_TXDGBCL 0x087A4 +#define IXGBE_TXDGBCH 0x087A8 + +#define IXGBE_RXDSTATCTRL 0x02F40 + +/* Copper Pond 2 link timeout */ +#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 + +/* Omer CORECTL */ +#define IXGBE_CORECTL 0x014F00 +/* BARCTRL */ +#define IXGBE_BARCTRL 0x110F4 +#define IXGBE_BARCTRL_FLSIZE 0x0700 +#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 +#define IXGBE_BARCTRL_CSRSIZE 0x2000 + +/* RSCCTL Bit Masks */ +#define IXGBE_RSCCTL_RSCEN 0x01 +#define IXGBE_RSCCTL_MAXDESC_1 0x00 +#define IXGBE_RSCCTL_MAXDESC_4 0x04 +#define IXGBE_RSCCTL_MAXDESC_8 0x08 +#define IXGBE_RSCCTL_MAXDESC_16 0x0C +#define IXGBE_RSCCTL_TS_DIS 0x02 + +/* RSCDBU Bit Masks */ +#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F +#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 + +/* RDRXCTL Bit Masks */ +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min THLD Size */ +#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ +#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad Small Packet */ +#define IXGBE_RDRXCTL_MVMEN 0x00000020 +#define IXGBE_RDRXCTL_RSC_PUSH_DIS 0x00000020 +#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ +#define IXGBE_RDRXCTL_RSC_PUSH 0x00000080 +#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ +#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ +#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI*/ +#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */ +#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */ +#define IXGBE_RDRXCTL_MBINTEN 0x10000000 +#define IXGBE_RDRXCTL_MDP_EN 0x20000000 + +/* RQTC Bit Masks and Shifts */ +#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) +#define IXGBE_RQTC_TC0_MASK (0x7 << 0) +#define IXGBE_RQTC_TC1_MASK (0x7 << 4) +#define IXGBE_RQTC_TC2_MASK (0x7 << 8) +#define IXGBE_RQTC_TC3_MASK (0x7 << 12) +#define IXGBE_RQTC_TC4_MASK (0x7 << 16) +#define IXGBE_RQTC_TC5_MASK (0x7 << 20) +#define IXGBE_RQTC_TC6_MASK (0x7 << 24) +#define IXGBE_RQTC_TC7_MASK (0x7 << 28) + +/* PSRTYPE.RQPL Bit masks and shift */ +#define IXGBE_PSRTYPE_RQPL_MASK 0x7 +#define IXGBE_PSRTYPE_RQPL_SHIFT 29 + +/* CTRL Bit Masks */ +#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ +#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) + +/* FACTPS */ +#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */ +#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ + +/* MHADD Bit Masks */ +#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 +#define IXGBE_MHADD_MFS_SHIFT 16 + +/* Extended Device Control */ +#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ +#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ +#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ + +/* Direct Cache Access (DCA) definitions */ +#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ + +#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ + +/* MSCA Bit Masks */ +#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Addr (new prot) */ +#define IXGBE_MSCA_NP_ADDR_SHIFT 0 +#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Dev Type (new prot) */ +#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old prot */ +#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ +#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ +#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ +#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ +#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ +#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (wr) */ +#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (rd) */ +#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (rd auto inc)*/ +#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ +#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ +#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new prot) */ +#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old prot) */ +#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ +#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress ena */ + +/* MSRWD bit masks */ +#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF +#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 +#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 +#define IXGBE_MSRWD_READ_DATA_SHIFT 16 + +/* Atlas registers */ +#define IXGBE_ATLAS_PDN_LPBK 0x24 +#define IXGBE_ATLAS_PDN_10G 0xB +#define IXGBE_ATLAS_PDN_1G 0xC +#define IXGBE_ATLAS_PDN_AN 0xD + +/* Atlas bit masks */ +#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 +#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 +#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 + +/* Omer bit masks */ +#define IXGBE_CORECTL_WRITE_CMD 0x00010000 + +/* Device Type definitions for new protocol MDIO commands */ +#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 +#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 +#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 +#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ +#define IXGBE_TWINAX_DEV 1 + +#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ + +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Ctrl Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 + +#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */ +#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */ +#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ +#define IXGBE_AUTO_NEG_10GBASE_EEE_ADVT 0x8 /* AUTO NEG EEE 10GBaseT Advt */ +#define IXGBE_AUTO_NEG_1000BASE_EEE_ADVT 0x4 /* AUTO NEG EEE 1000BaseT Advt */ +#define IXGBE_AUTO_NEG_100BASE_EEE_ADVT 0x2 /* AUTO NEG EEE 100BaseT Advt */ +#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ +#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ +#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ +#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ +#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ +#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ +#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ +#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ +#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ +#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */ +#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ +#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ +#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ +#define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */ +#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT Cap */ +#define IXGBE_AUTO_NEG_LP_10GBASE_CAP 0x0800 /* AUTO NEG Rx LP 10GBaseT Cap */ +#define IXGBE_AUTO_NEG_10GBASET_STAT 0x0021 /* AUTO NEG 10G BaseT Stat */ + +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ +#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ +#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */ +#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */ +#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */ +#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* Global Fault Message */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ +#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 /* autoneg vendor alarm int enable */ +#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */ +#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */ +#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */ +#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /* int dev fault enable */ +#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */ +#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Transmit Dis Reg */ +#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Transmit Dis */ + +#define IXGBE_PCRC8ECL 0x0E810 /* PCR CRC-8 Error Count Lo */ +#define IXGBE_PCRC8ECH 0x0E811 /* PCR CRC-8 Error Count Hi */ +#define IXGBE_PCRC8ECH_MASK 0x1F +#define IXGBE_LDPCECL 0x0E820 /* PCR Uncorrected Error Count Lo */ +#define IXGBE_LDPCECH 0x0E821 /* PCR Uncorrected Error Count Hi */ + +/* MII clause 22/28 definitions */ +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 + +#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register*/ +#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ + +#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ + +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ +#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */ + +#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ +#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400 +#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800 +#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define IXGBE_MII_RESTART 0x200 +#define IXGBE_MII_AUTONEG_COMPLETE 0x20 +#define IXGBE_MII_AUTONEG_LINK_UP 0x04 +#define IXGBE_MII_AUTONEG_REG 0x0 + +#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 +#define IXGBE_MAX_PHY_ADDR 32 + +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410 +#define TNX_FW_REV 0xB +#define X540_PHY_ID 0x01540200 +#define X550_PHY_ID1 0x01540220 +#define X550_PHY_ID2 0x01540223 +#define X550_PHY_ID3 0x01540221 +#define X557_PHY_ID 0x01540240 +#define AQ_FW_REV 0x20 +#define QT2022_PHY_ID 0x0043A400 +#define ATH_PHY_ID 0x03429050 + +/* PHY Types */ +#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 + +/* Special PHY Init Routine */ +#define IXGBE_PHY_INIT_OFFSET_NL 0x002B +#define IXGBE_PHY_INIT_END_NL 0xFFFF +#define IXGBE_CONTROL_MASK_NL 0xF000 +#define IXGBE_DATA_MASK_NL 0x0FFF +#define IXGBE_CONTROL_SHIFT_NL 12 +#define IXGBE_DELAY_NL 0 +#define IXGBE_DATA_NL 1 +#define IXGBE_CONTROL_NL 0x000F +#define IXGBE_CONTROL_EOL_NL 0x0FFF +#define IXGBE_CONTROL_SOL_NL 0x0000 + +/* General purpose Interrupt Enable */ +#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ +#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ +#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ +#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */ +#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */ +#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */ +#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN) +#define IXGBE_SDP1_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN) +#define IXGBE_SDP2_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN) + +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ +#define IXGBE_GPIE_EIAME 0x40000000 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 +#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 +#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ +#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ +#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ +#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ + +/* Packet Buffer Initialization */ +#define IXGBE_MAX_PACKET_BUFFERS 8 + +#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ +#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ +#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer */ +#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer */ + +#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define IXGBE_MAX_PB 8 + +/* Packet buffer allocation strategies */ +enum { + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED +}; + +/* Transmit Flow Control status */ +#define IXGBE_TFCS_TXOFF 0x00000001 +#define IXGBE_TFCS_TXOFF0 0x00000100 +#define IXGBE_TFCS_TXOFF1 0x00000200 +#define IXGBE_TFCS_TXOFF2 0x00000400 +#define IXGBE_TFCS_TXOFF3 0x00000800 +#define IXGBE_TFCS_TXOFF4 0x00001000 +#define IXGBE_TFCS_TXOFF5 0x00002000 +#define IXGBE_TFCS_TXOFF6 0x00004000 +#define IXGBE_TFCS_TXOFF7 0x00008000 + +/* TCP Timer */ +#define IXGBE_TCPTIMER_KS 0x00000100 +#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 +#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 +#define IXGBE_TCPTIMER_LOOP 0x00000800 +#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF + +/* HLREG0 Bit Masks */ +#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ +#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ +#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ +#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ +#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ +#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ +#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ +#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ +#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ +#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ +#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ +#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ +#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ +#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ +#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ + +/* VMD_CTL bitmasks */ +#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 +#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 + +/* VT_CTL bitmasks */ +#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ +#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ +#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ +#define IXGBE_VT_CTL_POOL_SHIFT 7 +#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) + +/* VMOLR bitmasks */ +#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ +#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ +#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ +#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ +#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ + +/* VFRE bitmask */ +#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF + +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +/* RDHMPN and TDHMPN bitmasks */ +#define IXGBE_RDHMPN_RDICADDR 0x007FF800 +#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 +#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 +#define IXGBE_TDHMPN_TDICADDR 0x003FF800 +#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 +#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 + +#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 +#define IXGBE_RDMAM_DWORD_SHIFT 9 +#define IXGBE_RDMAM_DESC_COMP_FIFO 1 +#define IXGBE_RDMAM_DFC_CMD_FIFO 2 +#define IXGBE_RDMAM_RSC_HEADER_ADDR 3 +#define IXGBE_RDMAM_TCN_STATUS_RAM 4 +#define IXGBE_RDMAM_WB_COLL_FIFO 5 +#define IXGBE_RDMAM_QSC_CNT_RAM 6 +#define IXGBE_RDMAM_QSC_FCOE_RAM 7 +#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 +#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA +#define IXGBE_RDMAM_QSC_RSC_RAM 0xB +#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 +#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 +#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 +#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 +#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32 +#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4 +#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 +#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 +#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 +#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 +#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 +#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 +#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512 +#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 +#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32 +#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8 + +#define IXGBE_TXDESCIC_READY 0x80000000 + +/* Receive Checksum Control */ +#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* FCRTL Bit Masks */ +#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ +#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ + +/* PAP bit masks*/ +#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ + +/* RMCS Bit Masks */ +#define IXGBE_RMCS_RRM 0x00000002 /* Rx Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RMCS_RAC 0x00000004 +/* Deficit Fixed Prio ena */ +#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC +#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ +#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ +#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* FCCFG Bit Masks */ +#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ +#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ + +/* Interrupt register bitmasks */ + +/* Extended Interrupt Cause Read */ +#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ +#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ +#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ +#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ +#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ +#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ +#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ +#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ +#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ +#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ +#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ +#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0) +#define IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1) +#define IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2) + +#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ +#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ +#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ + +/* Extended Interrupt Cause Set */ +#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ +#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EICS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EICS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EICS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Set */ +#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */ +#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EIMS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EIMS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ +#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Clear */ +#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMC_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EIMC_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EIMC_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) +#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ +#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +#define IXGBE_EIMS_ENABLE_MASK ( \ + IXGBE_EIMS_RTX_QUEUE | \ + IXGBE_EIMS_LSC | \ + IXGBE_EIMS_TCP_TIMER | \ + IXGBE_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ +#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass chk of ctrl bits */ +#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ +#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ +#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ +#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ +#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ + +#define IXGBE_MAX_FTQF_FILTERS 128 +#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 +#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 +#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 +#define IXGBE_FTQF_PROTOCOL_SCTP 2 +#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 +#define IXGBE_FTQF_PRIORITY_SHIFT 2 +#define IXGBE_FTQF_POOL_MASK 0x0000003F +#define IXGBE_FTQF_POOL_SHIFT 8 +#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F +#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 +#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E +#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D +#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B +#define IXGBE_FTQF_DEST_PORT_MASK 0x17 +#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F +#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 +#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 + +/* Interrupt clear mask */ +#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF + +/* Interrupt Vector Allocation Registers */ +#define IXGBE_IVAR_REG_NUM 25 +#define IXGBE_IVAR_REG_NUM_82599 64 +#define IXGBE_IVAR_TXRX_ENTRY 96 +#define IXGBE_IVAR_RX_ENTRY 64 +#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) +#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) +#define IXGBE_IVAR_TX_ENTRY 32 + +#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ +#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ + +#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) + +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +/* ETYPE Queue Filter/Select Bit Masks */ +#define IXGBE_MAX_ETQF_FILTERS 8 +#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ +#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ +#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ +#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ +#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ +#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ +#define IXGBE_ETQF_POOL_SHIFT 20 + +#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ +#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 +#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ +#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ + +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + * FCoE (0x8906): Filter 2 + * 1588 (0x88f7): Filter 3 + * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 + * FC (0x8808): Filter 7 + */ +#define IXGBE_ETQF_FILTER_EAPOL 0 +#define IXGBE_ETQF_FILTER_FCOE 2 +#define IXGBE_ETQF_FILTER_1588 3 +#define IXGBE_ETQF_FILTER_FIP 4 +#define IXGBE_ETQF_FILTER_LLDP 5 +#define IXGBE_ETQF_FILTER_LACP 6 +#define IXGBE_ETQF_FILTER_FC 7 +/* VLAN Control Bit Masks */ +#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ +#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ +#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ +#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ +#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ + +/* VLAN pool filtering masks */ +#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ +#define IXGBE_VLVF_ENTRIES 64 +#define IXGBE_VLVF_VLANID_MASK 0x00000FFF +/* Per VF Port VLAN insertion rules */ +#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* STATUS Bit Masks */ +#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ +#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ +#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Ena Status */ + +#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ +#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ + +/* ESDP Bit Masks */ +#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ +#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ +#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ +#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ +#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ +#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ +#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ +#define IXGBE_ESDP_SDP7 0x00000080 /* SDP7 Data Value */ +#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ +#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ +#define IXGBE_ESDP_SDP2_DIR 0x00000400 /* SDP1 IO direction */ +#define IXGBE_ESDP_SDP3_DIR 0x00000800 /* SDP3 IO direction */ +#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */ +#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ +#define IXGBE_ESDP_SDP6_DIR 0x00004000 /* SDP6 IO direction */ +#define IXGBE_ESDP_SDP7_DIR 0x00008000 /* SDP7 IO direction */ +#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */ +#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */ + + +/* LEDCTL Bit Masks */ +#define IXGBE_LED_IVRT_BASE 0x00000040 +#define IXGBE_LED_BLINK_BASE 0x00000080 +#define IXGBE_LED_MODE_MASK_BASE 0x0000000F +#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) +#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) +#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) +#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) +#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) +#define IXGBE_X557_LED_MANUAL_SET_MASK (1 << 8) +#define IXGBE_X557_MAX_LED_INDEX 3 +#define IXGBE_X557_LED_PROVISIONING 0xC430 + +/* LED modes */ +#define IXGBE_LED_LINK_UP 0x0 +#define IXGBE_LED_LINK_10G 0x1 +#define IXGBE_LED_MAC 0x2 +#define IXGBE_LED_FILTER 0x3 +#define IXGBE_LED_LINK_ACTIVE 0x4 +#define IXGBE_LED_LINK_1G 0x5 +#define IXGBE_LED_ON 0xE +#define IXGBE_LED_OFF 0xF + +/* AUTOC Bit Masks */ +#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000 +#define IXGBE_AUTOC_KX4_SUPP 0x80000000 +#define IXGBE_AUTOC_KX_SUPP 0x40000000 +#define IXGBE_AUTOC_PAUSE 0x30000000 +#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 +#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 +#define IXGBE_AUTOC_RF 0x08000000 +#define IXGBE_AUTOC_PD_TMR 0x06000000 +#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 +#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 +#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 +#define IXGBE_AUTOC_FECA 0x00040000 +#define IXGBE_AUTOC_FECR 0x00020000 +#define IXGBE_AUTOC_KR_SUPP 0x00010000 +#define IXGBE_AUTOC_AN_RESTART 0x00001000 +#define IXGBE_AUTOC_FLU 0x00000001 +#define IXGBE_AUTOC_LMS_SHIFT 13 +#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 +#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 +#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 +#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 +#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 +#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 +#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 + +#define IXGBE_MACC_FLU 0x00000001 +#define IXGBE_MACC_FSV_10G 0x00030000 +#define IXGBE_MACC_FS 0x00040000 +#define IXGBE_MAC_RX2TX_LPBK 0x00000002 + +/* Veto Bit definiton */ +#define IXGBE_MMNGC_MNG_VETO 0x00000001 + +/* LINKS Bit Masks */ +#define IXGBE_LINKS_KX_AN_COMP 0x80000000 +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED 0x20000000 +#define IXGBE_LINKS_MODE 0x18000000 +#define IXGBE_LINKS_RX_MODE 0x06000000 +#define IXGBE_LINKS_TX_MODE 0x01800000 +#define IXGBE_LINKS_XGXS_EN 0x00400000 +#define IXGBE_LINKS_SGMII_EN 0x02000000 +#define IXGBE_LINKS_PCS_1G_EN 0x00200000 +#define IXGBE_LINKS_1G_AN_EN 0x00100000 +#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 +#define IXGBE_LINKS_1G_SYNC 0x00040000 +#define IXGBE_LINKS_10G_ALIGN 0x00020000 +#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 +#define IXGBE_LINKS_TL_FAULT 0x00001000 +#define IXGBE_LINKS_SIGNAL 0x00000F00 + +#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ +#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ + +#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 + +/* PCS1GLSTA Bit Masks */ +#define IXGBE_PCS1GLSTA_LINK_OK 1 +#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 +#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 +#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 +#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 +#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 +#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 + +#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 +#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 + +/* PCS1GLCTL Bit Masks */ +#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ +#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 +#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 +#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 +#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 +#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 + +/* ANLP1 Bit Masks */ +#define IXGBE_ANLP1_PAUSE 0x0C00 +#define IXGBE_ANLP1_SYM_PAUSE 0x0400 +#define IXGBE_ANLP1_ASM_PAUSE 0x0800 +#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 + +/* SW Semaphore Register bitmasks */ +#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ + +/* SW_FW_SYNC/GSSR definitions */ +#define IXGBE_GSSR_EEP_SM 0x0001 +#define IXGBE_GSSR_PHY0_SM 0x0002 +#define IXGBE_GSSR_PHY1_SM 0x0004 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +#define IXGBE_GSSR_FLASH_SM 0x0010 +#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200 +#define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */ +#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */ +#define IXGBE_GSSR_I2C_MASK 0x1800 +#define IXGBE_GSSR_NVM_PHY_MASK 0xF + +/* FW Status register bitmask */ +#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ + +/* EEC Register */ +#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ +#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ +#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ +#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ +#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ +#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define IXGBE_EEC_FWE_SHIFT 4 +#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ +#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ +#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ +#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ +#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ +#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ +#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define IXGBE_EEC_ADDR_SIZE 0x00000400 +#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ +#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ + +#define IXGBE_EEC_SIZE_SHIFT 11 +#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define IXGBE_EEPROM_OPCODE_BITS 8 + +/* FLA Register */ +#define IXGBE_FLA_LOCKED 0x00000040 + +/* Part Number String Length */ +#define IXGBE_PBANUM_LENGTH 11 + +/* Checksum and EEPROM pointers */ +#define IXGBE_PBANUM_PTR_GUARD 0xFAFA +#define IXGBE_EEPROM_CHECKSUM 0x3F +#define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_PCIE_ANALOG_PTR 0x03 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_PHY_PTR 0x04 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_OPTION_ROM_PTR 0x05 +#define IXGBE_PCIE_GENERAL_PTR 0x06 +#define IXGBE_PCIE_CONFIG0_PTR 0x07 +#define IXGBE_PCIE_CONFIG1_PTR 0x08 +#define IXGBE_CORE0_PTR 0x09 +#define IXGBE_CORE1_PTR 0x0A +#define IXGBE_MAC0_PTR 0x0B +#define IXGBE_MAC1_PTR 0x0C +#define IXGBE_CSR0_CONFIG_PTR 0x0D +#define IXGBE_CSR1_CONFIG_PTR 0x0E +#define IXGBE_PCIE_ANALOG_PTR_X550 0x02 +#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000 +#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24 +#define IXGBE_PCIE_CONFIG_SIZE 0x08 +#define IXGBE_EEPROM_LAST_WORD 0x41 +#define IXGBE_FW_PTR 0x0F +#define IXGBE_PBANUM0_PTR 0x15 +#define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_ALT_MAC_ADDR_PTR 0x37 +#define IXGBE_FREE_SPACE_PTR 0X3E + +/* External Thermal Sensor Config */ +#define IXGBE_ETS_CFG 0x26 +#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0 +#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6 +#define IXGBE_ETS_TYPE_MASK 0x0038 +#define IXGBE_ETS_TYPE_SHIFT 3 +#define IXGBE_ETS_TYPE_EMC 0x000 +#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007 +#define IXGBE_ETS_DATA_LOC_MASK 0x3C00 +#define IXGBE_ETS_DATA_LOC_SHIFT 10 +#define IXGBE_ETS_DATA_INDEX_MASK 0x0300 +#define IXGBE_ETS_DATA_INDEX_SHIFT 8 +#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF + +#define IXGBE_SAN_MAC_ADDR_PTR 0x28 +#define IXGBE_DEVICE_CAPS 0x2C +#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 +#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 +#define IXGBE_MAX_MSIX_VECTORS_82599 0x40 +#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 +#define IXGBE_MAX_MSIX_VECTORS_82598 0x13 + +/* MSI-X capability fields masks */ +#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + +/* Legacy EEPROM word offsets */ +#define IXGBE_ISCSI_BOOT_CAPS 0x0033 +#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 +#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 + +/* EEPROM Commands - SPI */ +#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enable latch */ +#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */ +#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */ + +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 +#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 +#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 + +#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 + +#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 +#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */ +#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */ +#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +#define IXGBE_EEPROM_CCD_BIT 2 + +#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS +#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */ +#endif + +/* Number of 5 microseconds we wait for EERD read and + * EERW write to complete */ +#define IXGBE_EERD_EEWR_ATTEMPTS 100000 + +/* # attempts we wait for flush update to complete */ +#define IXGBE_FLUDONE_ATTEMPTS 20000 + +#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ + +#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define IXGBE_FW_LESM_STATE_1 0x1 +#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_FW_PATCH_VERSION_4 0x7 +#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */ + +/* FW header offset */ +#define IXGBE_X540_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_X540_FW_MODULE_MASK 0x7FFF +/* 4KB multiplier */ +#define IXGBE_X540_FW_MODULE_LENGTH 0x1000 +/* version word 2 (month & day) */ +#define IXGBE_X540_FW_PATCH_VERSION_2 0x5 +/* version word 3 (silicon compatibility & year) */ +#define IXGBE_X540_FW_PATCH_VERSION_3 0x6 +/* version word 4 (major & minor numbers) */ +#define IXGBE_X540_FW_PATCH_VERSION_4 0x7 + +#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ + +/* PCI Bus Info */ +#define IXGBE_PCI_DEVICE_STATUS 0xAA +#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define IXGBE_PCI_LINK_STATUS 0xB2 +#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 +#define IXGBE_PCI_LINK_WIDTH 0x3F0 +#define IXGBE_PCI_LINK_WIDTH_1 0x10 +#define IXGBE_PCI_LINK_WIDTH_2 0x20 +#define IXGBE_PCI_LINK_WIDTH_4 0x40 +#define IXGBE_PCI_LINK_WIDTH_8 0x80 +#define IXGBE_PCI_LINK_SPEED 0xF +#define IXGBE_PCI_LINK_SPEED_2500 0x1 +#define IXGBE_PCI_LINK_SPEED_5000 0x2 +#define IXGBE_PCI_LINK_SPEED_8000 0x3 +#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf +#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0 +#define IXGBE_PCIDEVCTRL2_50_100us 0x1 +#define IXGBE_PCIDEVCTRL2_1_2ms 0x2 +#define IXGBE_PCIDEVCTRL2_16_32ms 0x5 +#define IXGBE_PCIDEVCTRL2_65_130ms 0x6 +#define IXGBE_PCIDEVCTRL2_260_520ms 0x9 +#define IXGBE_PCIDEVCTRL2_1_2s 0xa +#define IXGBE_PCIDEVCTRL2_4_8s 0xd +#define IXGBE_PCIDEVCTRL2_17_34s 0xe + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* Check whether address is multicast. This is little-endian specific check.*/ +#define IXGBE_IS_MULTICAST(Address) \ + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define IXGBE_IS_BROADCAST(Address) \ + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) + +/* RAH */ +#define IXGBE_RAH_VIND_MASK 0x003C0000 +#define IXGBE_RAH_VIND_SHIFT 18 +#define IXGBE_RAH_AV 0x80000000 +#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF + +/* Header split receive */ +#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 +#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 +#define IXGBE_RFCTL_RSC_DIS 0x00000020 +#define IXGBE_RFCTL_NFSW_DIS 0x00000040 +#define IXGBE_RFCTL_NFSR_DIS 0x00000080 +#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 +#define IXGBE_RFCTL_NFS_VER_SHIFT 8 +#define IXGBE_RFCTL_NFS_VER_2 0 +#define IXGBE_RFCTL_NFS_VER_3 1 +#define IXGBE_RFCTL_NFS_VER_4 2 +#define IXGBE_RFCTL_IPV6_DIS 0x00000400 +#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 +#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 +#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Transmit Config masks */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ +#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +/* Enable short packet padding to 64 bytes */ +#define IXGBE_TX_PAD_ENABLE 0x00000400 +#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ +/* This allows for 16K packets + 4k for vlan */ +#define IXGBE_MAX_FRAME_SZ 0x40040000 + +#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ + +/* Receive Config masks */ +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Desc Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc wr-bk flushing */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* X540 supported only */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ + +#define IXGBE_TSAUXC_EN_CLK 0x00000004 +#define IXGBE_TSAUXC_SYNCLK 0x00000008 +#define IXGBE_TSAUXC_SDP0_INT 0x00000040 +#define IXGBE_TSAUXC_EN_TT0 0x00000001 +#define IXGBE_TSAUXC_EN_TT1 0x00000002 +#define IXGBE_TSAUXC_ST0 0x00000010 +#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000 + +#define IXGBE_TSSDP_TS_SDP0_SEL_MASK 0x000000C0 +#define IXGBE_TSSDP_TS_SDP0_CLK0 0x00000080 +#define IXGBE_TSSDP_TS_SDP0_EN 0x00000100 + +#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ + +#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IXGBE_TSYNCRXCTL_TYPE_ALL 0x08 +#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ +#define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */ +#define IXGBE_TSYNCRXCTL_TSIP_UP_MASK 0xFF000000 /* Rx Timestamp UP Mask */ + +#define IXGBE_TSIM_SYS_WRAP 0x00000001 +#define IXGBE_TSIM_TXTS 0x00000002 +#define IXGBE_TSIM_TADJ 0x00000080 + +#define IXGBE_TSICR_SYS_WRAP IXGBE_TSIM_SYS_WRAP +#define IXGBE_TSICR_TXTS IXGBE_TSIM_TXTS +#define IXGBE_TSICR_TADJ IXGBE_TSIM_TADJ + +#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF +#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 +#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01 +#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02 +#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03 +#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04 + +#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00 +#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000 +#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100 +#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200 +#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300 +#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800 +#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900 +#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00 +#define IXGBE_RXMTRL_V2_SIGNALLING_MSG 0x0C00 +#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 + +#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ +#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ +#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ +#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ +#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ +/* Receive Priority Flow Control Enable */ +#define IXGBE_FCTRL_RPFCE 0x00004000 +#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ +#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ +#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ +#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ +#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ +#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Rx Priority FC bitmap mask */ +#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */ + +/* Multiple Receive Queue Control */ +#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ +#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ +#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ +#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ +#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ +#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ +#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ +#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ +#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 +#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 +#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 + +/* Queue Drop Enable */ +#define IXGBE_QDE_ENABLE 0x00000001 +#define IXGBE_QDE_HIDE_VLAN 0x00000002 +#define IXGBE_QDE_IDX_MASK 0x00007F00 +#define IXGBE_QDE_IDX_SHIFT 8 +#define IXGBE_QDE_WRITE 0x00010000 +#define IXGBE_QDE_READ 0x00020000 + +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ + +#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +/* Multiple Transmit Queue Command Register */ +#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ +#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ +#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ +#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ +#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ +#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */ +#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ + +/* Receive Descriptor bit definitions */ +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ +#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */ +#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */ +#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCEOFe/IPE */ +#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ +#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ +#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ +#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ +#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */ +#define IXGBE_RXDADV_STAT_TSIP 0x00008000 /* Time Stamp in packet buffer */ + +/* PSRTYPE bit definitions */ +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 + +/* SRRCTL bit definitions */ +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* 64byte resolution (>> 6) + * + at bit 8 offset (<< 8) + * = (<< 2) + */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 + +#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 + +/* RSS Hash results */ +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor. */ +#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ +#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */ +#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* Security Processing bit Indication */ +#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +/* Masks to determine if packets should be dropped due to frame errors */ +#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599 IXGBE_RXDADV_ERR_RXE + +/* Multicast bit mask */ +#define IXGBE_MCSTCTRL_MFE 0x4 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* SR-IOV specific macros */ +#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) +#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4)) +#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) +#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) +/* Translated register #defines */ +#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P))) +#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P))) +#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * (P))) +#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * (P))) +#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * (P))) +#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * (P))) +#define IXGBE_PVTEICR(P) (0x00B00 + (4 * (P))) +#define IXGBE_PVTEICS(P) (0x00C00 + (4 * (P))) +#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * (P))) +#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * (P))) +#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * (P))) +#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * (P))) +#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \ + (0x012300 + (((P) - 24) * 4))) +#define IXGBE_PVTIVAR(P) (0x12500 + (4 * (P))) +#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * (P))) +#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * (P))) +#define IXGBE_VFPBACL(P) (0x110C8 + (4 * (P))) +#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * (P))) \ + : (0x0D000 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * (P))) \ + : (0x0D004 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * (P))) \ + : (0x0D008 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * (P))) \ + : (0x0D010 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * (P))) \ + : (0x0D018 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * (P))) \ + : (0x0D028 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * (P))) \ + : (0x0D014 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P))) +#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P))) +#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P))) +#define IXGBE_PVFTTDLEN(P) (0x06008 + (0x40 * (P))) +#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) +#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) +#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P))) +#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) +#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) +#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \ + : (0x0D00C + (0x40 * ((P) - 64)))) +#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P))) +#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x))) +#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x))) +#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x))) +#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x))) +#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x))) +#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x))) +#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x))) + +#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index))) + +#define IXGBE_PVFTDHn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDTn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index))) + +/* Little Endian defines */ +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 + +#endif +#ifndef __be16 +/* Big Endian defines */ +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#endif +enum ixgbe_fdir_pballoc_type { + IXGBE_FDIR_PBALLOC_NONE = 0, + IXGBE_FDIR_PBALLOC_64K = 1, + IXGBE_FDIR_PBALLOC_128K = 2, + IXGBE_FDIR_PBALLOC_256K = 3, +}; + +/* Flow Director register values */ +#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 +#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 +#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 +#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 +#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 +#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 +#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 +#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 +#define IXGBE_FDIRCTRL_DROP_Q_MASK 0x00007F00 +#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000 +#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21 +#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */ +#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */ +#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 +#define IXGBE_FDIRCTRL_FILTERMODE_MASK 0x00E00000 +#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 +#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 +#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 + +#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 +#define IXGBE_FDIRM_VLANID 0x00000001 +#define IXGBE_FDIRM_VLANP 0x00000002 +#define IXGBE_FDIRM_POOL 0x00000004 +#define IXGBE_FDIRM_L4P 0x00000008 +#define IXGBE_FDIRM_FLEX 0x00000010 +#define IXGBE_FDIRM_DIPv6 0x00000020 +#define IXGBE_FDIRM_L3P 0x00000040 + +#define IXGBE_FDIRIP6M_INNER_MAC 0x03F0 /* bit 9:4 */ +#define IXGBE_FDIRIP6M_TUNNEL_TYPE 0x0800 /* bit 11 */ +#define IXGBE_FDIRIP6M_TNI_VNI 0xF000 /* bit 15:12 */ +#define IXGBE_FDIRIP6M_TNI_VNI_24 0x1000 /* bit 12 */ +#define IXGBE_FDIRIP6M_ALWAYS_MASK 0x040F /* bit 10, 3:0 */ + +#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF +#define IXGBE_FDIRFREE_FREE_SHIFT 0 +#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 +#define IXGBE_FDIRFREE_COLL_SHIFT 16 +#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F +#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 +#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 +#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 +#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF +#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 +#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 +#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 +#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF +#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 +#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 +#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 +#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 +#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 +#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 +#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 + +#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 +#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 +#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 +#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 +#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 +#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 +#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 +#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 +#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 +#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 +#define IXGBE_FDIRCMD_IPV6 0x00000080 +#define IXGBE_FDIRCMD_CLEARHT 0x00000100 +#define IXGBE_FDIRCMD_DROP 0x00000200 +#define IXGBE_FDIRCMD_INT 0x00000400 +#define IXGBE_FDIRCMD_LAST 0x00000800 +#define IXGBE_FDIRCMD_COLLISION 0x00001000 +#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 +#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 +#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 +#define IXGBE_FDIRCMD_TUNNEL_FILTER_SHIFT 23 +#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 +#define IXGBE_FDIR_INIT_DONE_POLL 10 +#define IXGBE_FDIRCMD_CMD_POLL 10 +#define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000 +#define IXGBE_FDIR_DROP_QUEUE 127 + + +/* Manageablility Host Interface defines */ +#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ +#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */ +#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */ +#define IXGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ + +/* CEM Support */ +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0X0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 1024 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 +#define FW_PHY_TOKEN_REQ_CMD 0xA +#define FW_PHY_TOKEN_REQ_LEN 2 +#define FW_PHY_TOKEN_REQ 0 +#define FW_PHY_TOKEN_REL 1 +#define FW_PHY_TOKEN_OK 1 +#define FW_PHY_TOKEN_RETRY 0x80 +#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */ +#define FW_PHY_TOKEN_WAIT 5 /* seconds */ +#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY) +#define FW_INT_PHY_REQ_CMD 0xB +#define FW_INT_PHY_REQ_LEN 10 +#define FW_INT_PHY_REQ_READ 0 +#define FW_INT_PHY_REQ_WRITE 1 + +/* Host Interface Command Structures */ + +struct ixgbe_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +struct ixgbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct ixgbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union ixgbe_hic_hdr2 { + struct ixgbe_hic_hdr2_req req; + struct ixgbe_hic_hdr2_rsp rsp; +}; + +struct ixgbe_hic_drv_info { + struct ixgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + u8 pad; /* end spacing to ensure length is mult. of dword */ + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +/* These need to be dword aligned */ +struct ixgbe_hic_read_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_write_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_disable_rxen { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + +struct ixgbe_hic_phy_token_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 command_type; + u16 pad; +}; + +struct ixgbe_hic_internal_phy_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 command_type; + u16 address; + u16 rsv1; + u32 write_data; + u16 pad; +}; + +struct ixgbe_hic_internal_phy_resp { + struct ixgbe_hic_hdr hdr; + u32 read_data; +}; + + +/* Transmit Descriptor - Legacy */ +struct ixgbe_legacy_tx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 vlan; + } fields; + } upper; +}; + +/* Transmit Descriptor - Advanced */ +union ixgbe_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor - Legacy */ +struct ixgbe_legacy_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 vlan; +}; + +/* Receive Descriptor - Advanced */ +union ixgbe_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ixgbe_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ +#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ +#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 time stamp */ +#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ +#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Adv Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Adv Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ +#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +/* 1st&Last TSO-full iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* req Markers and CRC */ +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ +#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ +#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */ +#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */ +#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define IXGBE_ADVTXD_OUTER_IPLEN 16 /* Adv ctxt OUTERIPLEN shift */ +#define IXGBE_ADVTXD_TUNNEL_LEN 24 /* Adv ctxt TUNNELLEN shift */ +#define IXGBE_ADVTXD_TUNNEL_TYPE_SHIFT 16 /* Adv Tx Desc Tunnel Type shift */ +#define IXGBE_ADVTXD_OUTERIPCS_SHIFT 17 /* Adv Tx Desc OUTERIPCS Shift */ +#define IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE 1 /* Adv Tx Desc Tunnel Type NVGRE */ +/* Adv Tx Desc OUTERIPCS Shift for X550EM_a */ +#define IXGBE_ADVTXD_OUTERIPCS_SHIFT_X550EM_a 26 +/* Autonegotiation advertised speeds */ +typedef u32 ixgbe_autoneg_advertised; +/* Link speed */ +typedef u32 ixgbe_link_speed; +#define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 +#define IXGBE_LINK_SPEED_5GB_FULL 0x0800 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) +#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ + IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) + +/* Physical layer type */ +typedef u32 ixgbe_physical_layer; +#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001 +#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 +#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004 +#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020 +#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080 +#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100 +#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 +#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800 +#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 +#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 +#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000 + +/* Flow Control Data Sheet defined values + * Calculation and defines taken from 802.1bb Annex O + */ + +/* BitTimes (BT) conversion */ +#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) +#define IXGBE_B2BT(BT) (BT * 8) + +/* Calculate Delay to respond to PFC */ +#define IXGBE_PFC_D 672 + +/* Calculate Cable Delay */ +#define IXGBE_CABLE_DC 5556 /* Delay Copper */ +#define IXGBE_CABLE_DO 5000 /* Delay Optical */ + +/* Calculate Interface Delay X540 */ +#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */ +#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ +#define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ + +#define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC) + +/* Calculate Interface Delay 82598, 82599 */ +#define IXGBE_PHY_D 12800 +#define IXGBE_MAC_D 4096 +#define IXGBE_XAUI_D (2 * 1024) + +#define IXGBE_ID (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D) + +/* Calculate Delay incurred from higher layer */ +#define IXGBE_HD 6144 + +/* Calculate PCI Bus delay for low thresholds */ +#define IXGBE_PCI_DELAY 10000 + +/* Calculate X540 delay value in bit times */ +#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (IXGBE_B2BT(_max_frame_link) + \ + IXGBE_PFC_D + \ + (2 * IXGBE_CABLE_DC) + \ + (2 * IXGBE_ID_X540) + \ + IXGBE_HD) / 25 + 1) + \ + 2 * IXGBE_B2BT(_max_frame_tc)) + +/* Calculate 82599, 82598 delay value in bit times */ +#define IXGBE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (IXGBE_B2BT(_max_frame_link) + \ + IXGBE_PFC_D + \ + (2 * IXGBE_CABLE_DC) + \ + (2 * IXGBE_ID) + \ + IXGBE_HD) / 25 + 1) + \ + 2 * IXGBE_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define IXGBE_LOW_DV_X540(_max_frame_tc) \ + (2 * IXGBE_B2BT(_max_frame_tc) + \ + (36 * IXGBE_PCI_DELAY / 25) + 1) +#define IXGBE_LOW_DV(_max_frame_tc) \ + (2 * IXGBE_LOW_DV_X540(_max_frame_tc)) + +/* Software ATR hash keys */ +#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define IXGBE_ATR_HASH_MASK 0x7fff +#define IXGBE_ATR_L4TYPE_MASK 0x3 +#define IXGBE_ATR_L4TYPE_UDP 0x1 +#define IXGBE_ATR_L4TYPE_TCP 0x2 +#define IXGBE_ATR_L4TYPE_SCTP 0x3 +#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 +enum ixgbe_atr_flow_type { + IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, +}; + +/* Flow Director ATR input struct. */ +union ixgbe_atr_input { + /* + * Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * inner_mac - 6 bytes + * cloud_mode - 2 bytes + * tni_vni - 4 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + u8 inner_mac[6]; + __be16 tunnel_type; + __be32 tni_vni; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[14]; +}; + +/* Flow Director compressed ATR hash input struct */ +union ixgbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + + +#define IXGBE_MVALS_INIT(m) \ + IXGBE_CAT(EEC, m), \ + IXGBE_CAT(FLA, m), \ + IXGBE_CAT(GRC, m), \ + IXGBE_CAT(SRAMREL, m), \ + IXGBE_CAT(FACTPS, m), \ + IXGBE_CAT(SWSM, m), \ + IXGBE_CAT(SWFW_SYNC, m), \ + IXGBE_CAT(FWSM, m), \ + IXGBE_CAT(SDP0_GPIEN, m), \ + IXGBE_CAT(SDP1_GPIEN, m), \ + IXGBE_CAT(SDP2_GPIEN, m), \ + IXGBE_CAT(EICR_GPI_SDP0, m), \ + IXGBE_CAT(EICR_GPI_SDP1, m), \ + IXGBE_CAT(EICR_GPI_SDP2, m), \ + IXGBE_CAT(CIAA, m), \ + IXGBE_CAT(CIAD, m), \ + IXGBE_CAT(I2C_CLK_IN, m), \ + IXGBE_CAT(I2C_CLK_OUT, m), \ + IXGBE_CAT(I2C_DATA_IN, m), \ + IXGBE_CAT(I2C_DATA_OUT, m), \ + IXGBE_CAT(I2C_DATA_OE_N_EN, m), \ + IXGBE_CAT(I2C_BB_EN, m), \ + IXGBE_CAT(I2C_CLK_OE_N_EN, m), \ + IXGBE_CAT(I2CCTL, m) + +enum ixgbe_mvals { + IXGBE_MVALS_INIT(_IDX), + IXGBE_MVALS_IDX_LIMIT +}; + +/* + * Unavailable: The FCoE Boot Option ROM is not present in the flash. + * Disabled: Present; boot order is not set for any targets on the port. + * Enabled: Present; boot order is set for at least one target on the port. + */ +enum ixgbe_fcoe_boot_status { + ixgbe_fcoe_bootstatus_disabled = 0, + ixgbe_fcoe_bootstatus_enabled = 1, + ixgbe_fcoe_bootstatus_unavailable = 0xFFFF +}; + +enum ixgbe_eeprom_type { + ixgbe_eeprom_uninitialized = 0, + ixgbe_eeprom_spi, + ixgbe_flash, + ixgbe_eeprom_none /* No NVM support */ +}; + +enum ixgbe_mac_type { + ixgbe_mac_unknown = 0, + ixgbe_mac_82598EB, + ixgbe_mac_82599EB, + ixgbe_mac_82599_vf, + ixgbe_mac_X540, + ixgbe_mac_X540_vf, + ixgbe_mac_X550, + ixgbe_mac_X550EM_x, + ixgbe_mac_X550EM_a, + ixgbe_mac_X550_vf, + ixgbe_mac_X550EM_x_vf, + ixgbe_mac_X550EM_a_vf, + ixgbe_num_macs +}; + +enum ixgbe_phy_type { + ixgbe_phy_unknown = 0, + ixgbe_phy_none, + ixgbe_phy_tn, + ixgbe_phy_aq, + ixgbe_phy_x550em_kr, + ixgbe_phy_x550em_kx4, + ixgbe_phy_x550em_ext_t, + ixgbe_phy_cu_unknown, + ixgbe_phy_qt, + ixgbe_phy_xaui, + ixgbe_phy_nl, + ixgbe_phy_sfp_passive_tyco, + ixgbe_phy_sfp_passive_unknown, + ixgbe_phy_sfp_active_unknown, + ixgbe_phy_sfp_avago, + ixgbe_phy_sfp_ftl, + ixgbe_phy_sfp_ftl_active, + ixgbe_phy_sfp_unknown, + ixgbe_phy_sfp_intel, + ixgbe_phy_qsfp_passive_unknown, + ixgbe_phy_qsfp_active_unknown, + ixgbe_phy_qsfp_intel, + ixgbe_phy_qsfp_unknown, + ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ + ixgbe_phy_generic +}; + +/* + * SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 - 82599-specific + * 4 SFP_DA_CU_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + */ +enum ixgbe_sfp_type { + ixgbe_sfp_type_da_cu = 0, + ixgbe_sfp_type_sr = 1, + ixgbe_sfp_type_lr = 2, + ixgbe_sfp_type_da_cu_core0 = 3, + ixgbe_sfp_type_da_cu_core1 = 4, + ixgbe_sfp_type_srlr_core0 = 5, + ixgbe_sfp_type_srlr_core1 = 6, + ixgbe_sfp_type_da_act_lmt_core0 = 7, + ixgbe_sfp_type_da_act_lmt_core1 = 8, + ixgbe_sfp_type_1g_cu_core0 = 9, + ixgbe_sfp_type_1g_cu_core1 = 10, + ixgbe_sfp_type_1g_sx_core0 = 11, + ixgbe_sfp_type_1g_sx_core1 = 12, + ixgbe_sfp_type_1g_lx_core0 = 13, + ixgbe_sfp_type_1g_lx_core1 = 14, + ixgbe_sfp_type_not_present = 0xFFFE, + ixgbe_sfp_type_unknown = 0xFFFF +}; + +enum ixgbe_media_type { + ixgbe_media_type_unknown = 0, + ixgbe_media_type_fiber, + ixgbe_media_type_fiber_qsfp, + ixgbe_media_type_fiber_lco, + ixgbe_media_type_copper, + ixgbe_media_type_backplane, + ixgbe_media_type_cx4, + ixgbe_media_type_virtual +}; + +/* Flow Control Settings */ +enum ixgbe_fc_mode { + ixgbe_fc_none = 0, + ixgbe_fc_rx_pause, + ixgbe_fc_tx_pause, + ixgbe_fc_full, + ixgbe_fc_default +}; + +/* Smart Speed Settings */ +#define IXGBE_SMARTSPEED_MAX_RETRIES 3 +enum ixgbe_smart_speed { + ixgbe_smart_speed_auto = 0, + ixgbe_smart_speed_on, + ixgbe_smart_speed_off +}; + +/* PCI bus types */ +enum ixgbe_bus_type { + ixgbe_bus_type_unknown = 0, + ixgbe_bus_type_pci, + ixgbe_bus_type_pcix, + ixgbe_bus_type_pci_express, + ixgbe_bus_type_internal, + ixgbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum ixgbe_bus_speed { + ixgbe_bus_speed_unknown = 0, + ixgbe_bus_speed_33 = 33, + ixgbe_bus_speed_66 = 66, + ixgbe_bus_speed_100 = 100, + ixgbe_bus_speed_120 = 120, + ixgbe_bus_speed_133 = 133, + ixgbe_bus_speed_2500 = 2500, + ixgbe_bus_speed_5000 = 5000, + ixgbe_bus_speed_8000 = 8000, + ixgbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum ixgbe_bus_width { + ixgbe_bus_width_unknown = 0, + ixgbe_bus_width_pcie_x1 = 1, + ixgbe_bus_width_pcie_x2 = 2, + ixgbe_bus_width_pcie_x4 = 4, + ixgbe_bus_width_pcie_x8 = 8, + ixgbe_bus_width_32 = 32, + ixgbe_bus_width_64 = 64, + ixgbe_bus_width_reserved +}; + +struct ixgbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; + bool user_set_promisc; +}; + +/* Bus parameters */ +struct ixgbe_bus_info { + enum ixgbe_bus_speed speed; + enum ixgbe_bus_width width; + enum ixgbe_bus_type type; + + u16 func; + u16 lan_id; +}; + +/* Flow control parameters */ +struct ixgbe_fc_info { + u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */ + u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ + enum ixgbe_fc_mode current_mode; /* FC mode in effect */ + enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +/* Statistics counters collected by the MAC */ +struct ixgbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 pxon2offc[8]; + u64 fdirustat_add; + u64 fdirustat_remove; + u64 fdirfstat_fadd; + u64 fdirfstat_fremove; + u64 fdirmatch; + u64 fdirmiss; + u64 fccrc; + u64 fclast; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; + u64 fcoe_noddp; + u64 fcoe_noddp_ext_buff; + u64 ldpcec; + u64 pcrc8ec; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; + u64 o2bspc; +}; + +/* forward declaration */ +struct ixgbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + +/* Function pointer table */ +struct ixgbe_eeprom_operations { + s32 (*init_params)(struct ixgbe_hw *); + s32 (*read)(struct ixgbe_hw *, u16, u16 *); + s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*write)(struct ixgbe_hw *, u16, u16); + s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); + s32 (*update_checksum)(struct ixgbe_hw *); + s32 (*calc_checksum)(struct ixgbe_hw *); +}; + +struct ixgbe_mac_operations { + s32 (*init_hw)(struct ixgbe_hw *); + s32 (*reset_hw)(struct ixgbe_hw *); + s32 (*start_hw)(struct ixgbe_hw *); + s32 (*clear_hw_cntrs)(struct ixgbe_hw *); + void (*enable_relaxed_ordering)(struct ixgbe_hw *); + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + u32 (*get_supported_physical_layer)(struct ixgbe_hw *); + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); + s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); + s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *); + s32 (*stop_adapter)(struct ixgbe_hw *); + s32 (*get_bus_info)(struct ixgbe_hw *); + void (*set_lan_id)(struct ixgbe_hw *); + s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); + s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); + s32 (*setup_sfp)(struct ixgbe_hw *); + s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); + s32 (*disable_sec_rx_path)(struct ixgbe_hw *); + s32 (*enable_sec_rx_path)(struct ixgbe_hw *); + s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); + void (*release_swfw_sync)(struct ixgbe_hw *, u32); + s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); + s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); + + /* Link */ + void (*disable_tx_laser)(struct ixgbe_hw *); + void (*enable_tx_laser)(struct ixgbe_hw *); + void (*flap_tx_laser)(struct ixgbe_hw *); + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, + bool *); + void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed); + + /* Packet Buffer manipulation */ + void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int); + + /* LED */ + s32 (*led_on)(struct ixgbe_hw *, u32); + s32 (*led_off)(struct ixgbe_hw *, u32); + s32 (*blink_led_start)(struct ixgbe_hw *, u32); + s32 (*blink_led_stop)(struct ixgbe_hw *, u32); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); + s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); + s32 (*clear_rar)(struct ixgbe_hw *, u32); + s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32); + s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32); + s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*init_rx_addrs)(struct ixgbe_hw *); + s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr, bool clear); + s32 (*enable_mc)(struct ixgbe_hw *); + s32 (*disable_mc)(struct ixgbe_hw *); + s32 (*clear_vfta)(struct ixgbe_hw *); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); + s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, bool *); + s32 (*init_uta_tables)(struct ixgbe_hw *); + void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); + void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); + + /* Flow Control */ + s32 (*fc_enable)(struct ixgbe_hw *); + s32 (*setup_fc)(struct ixgbe_hw *); + + /* Manageability interface */ + s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); + s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); + s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map); + void (*disable_rx)(struct ixgbe_hw *hw); + void (*enable_rx)(struct ixgbe_hw *hw); + void (*set_source_address_pruning)(struct ixgbe_hw *, bool, + unsigned int); + void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); + s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); + s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); + s32 (*dmac_config)(struct ixgbe_hw *hw); + s32 (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee); + s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *); + s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32); + void (*disable_mdd)(struct ixgbe_hw *hw); + void (*enable_mdd)(struct ixgbe_hw *hw); + void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap); + void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf); +}; + +struct ixgbe_phy_operations { + s32 (*identify)(struct ixgbe_hw *); + s32 (*identify_sfp)(struct ixgbe_hw *); + s32 (*init)(struct ixgbe_hw *); + s32 (*reset)(struct ixgbe_hw *); + s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); + s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); + s32 (*setup_link)(struct ixgbe_hw *); + s32 (*setup_internal_link)(struct ixgbe_hw *); + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); + s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); + s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); + s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); + s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); + s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); + void (*i2c_bus_clear)(struct ixgbe_hw *); + s32 (*check_overtemp)(struct ixgbe_hw *); + s32 (*set_phy_power)(struct ixgbe_hw *, bool on); + s32 (*enter_lplu)(struct ixgbe_hw *); + s32 (*handle_lasi)(struct ixgbe_hw *hw); + s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, + u8 *value); + s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, + u8 value); +}; + +struct ixgbe_link_operations { + s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); + s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, + u16 *val); + s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); + s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, + u16 val); +}; + +struct ixgbe_link_info { + struct ixgbe_link_operations ops; + u8 addr; +}; + +struct ixgbe_eeprom_info { + struct ixgbe_eeprom_operations ops; + enum ixgbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; + u16 word_page_size; + u16 ctrl_word_3; +}; + +#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 +struct ixgbe_mac_info { + struct ixgbe_mac_operations ops; + enum ixgbe_mac_type type; + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + /* prefix for World Wide Node Name (WWNN) */ + u16 wwnn_prefix; + /* prefix for World Wide Port Name (WWPN) */ + u16 wwpn_prefix; +#define IXGBE_MAX_MTA 128 + u32 mta_shadow[IXGBE_MAX_MTA]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_size; + u32 num_rar_entries; + u32 rar_highwater; + u32 rx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + u32 orig_autoc; + u8 san_mac_rar_index; + bool get_link_status; + u32 orig_autoc2; + u16 max_msix_vectors; + bool arc_subsystem_valid; + bool orig_link_settings_stored; + bool autotry_restart; + u8 flags; + struct ixgbe_thermal_sensor_data thermal_sensor_data; + bool thermal_sensor_enabled; + struct ixgbe_dmac_config dmac_config; + bool set_lben; + u32 max_link_up_time; +}; + +struct ixgbe_phy_info { + struct ixgbe_phy_operations ops; + enum ixgbe_phy_type type; + u32 addr; + u32 id; + enum ixgbe_sfp_type sfp_type; + bool sfp_setup_needed; + u32 revision; + enum ixgbe_media_type media_type; + u32 phy_semaphore_mask; + bool reset_disable; + ixgbe_autoneg_advertised autoneg_advertised; + ixgbe_link_speed speeds_supported; + enum ixgbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; + bool reset_if_overtemp; + bool qsfp_shared_i2c_bus; + u32 nw_mng_if_sel; +}; + +#include "ixgbe_mbx.h" + +struct ixgbe_mbx_operations { + void (*init_params)(struct ixgbe_hw *hw); + s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct ixgbe_hw *, u16); + s32 (*check_for_ack)(struct ixgbe_hw *, u16); + s32 (*check_for_rst)(struct ixgbe_hw *, u16); +}; + +struct ixgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct ixgbe_mbx_info { + struct ixgbe_mbx_operations ops; + struct ixgbe_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u32 v2p_mailbox; + u16 size; +}; + +struct ixgbe_hw { + u8 IOMEM *hw_addr; + void *back; + struct ixgbe_mac_info mac; + struct ixgbe_addr_filter_info addr_ctrl; + struct ixgbe_fc_info fc; + struct ixgbe_phy_info phy; + struct ixgbe_link_info link; + struct ixgbe_eeprom_info eeprom; + struct ixgbe_bus_info bus; + struct ixgbe_mbx_info mbx; + const u32 *mvals; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + int api_version; + bool force_full_reset; + bool allow_unsupported_sfp; + bool wol_enabled; +}; + +#define ixgbe_call_func(hw, func, params, error) \ + (func != NULL) ? func params : error + + +/* Error Codes */ +#define IXGBE_SUCCESS 0 +#define IXGBE_ERR_EEPROM -1 +#define IXGBE_ERR_EEPROM_CHECKSUM -2 +#define IXGBE_ERR_PHY -3 +#define IXGBE_ERR_CONFIG -4 +#define IXGBE_ERR_PARAM -5 +#define IXGBE_ERR_MAC_TYPE -6 +#define IXGBE_ERR_UNKNOWN_PHY -7 +#define IXGBE_ERR_LINK_SETUP -8 +#define IXGBE_ERR_ADAPTER_STOPPED -9 +#define IXGBE_ERR_INVALID_MAC_ADDR -10 +#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 +#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 +#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 +#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 +#define IXGBE_ERR_RESET_FAILED -15 +#define IXGBE_ERR_SWFW_SYNC -16 +#define IXGBE_ERR_PHY_ADDR_INVALID -17 +#define IXGBE_ERR_I2C -18 +#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 +#define IXGBE_ERR_SFP_NOT_PRESENT -20 +#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 +#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 +#define IXGBE_ERR_FDIR_REINIT_FAILED -23 +#define IXGBE_ERR_EEPROM_VERSION -24 +#define IXGBE_ERR_NO_SPACE -25 +#define IXGBE_ERR_OVERTEMP -26 +#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 +#define IXGBE_ERR_FC_NOT_SUPPORTED -28 +#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 +#define IXGBE_ERR_PBA_SECTION -31 +#define IXGBE_ERR_INVALID_ARGUMENT -32 +#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 +#define IXGBE_ERR_OUT_OF_MEM -34 +#define IXGBE_ERR_FEATURE_NOT_SUPPORTED -36 +#define IXGBE_ERR_EEPROM_PROTECTED_REGION -37 +#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 +#define IXGBE_ERR_FW_RESP_INVALID -39 +#define IXGBE_ERR_TOKEN_RETRY -40 + +#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF + + +#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) +#define IXGBE_FUSES0_300MHZ (1 << 5) +#define IXGBE_FUSES0_REV_MASK (3 << 6) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) +#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) +#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) +#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) +#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) +#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00) +#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) +#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) + +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) + +#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28) +#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29) + +#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) +#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) +#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) + +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) + +#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) + +#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) +#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) + +#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 +#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 + +#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0 +#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18 +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ + (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT) +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20 +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ + (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT) +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 +#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 +#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) +#define IXGBE_SB_IOSF_TARGET_KR_PHY 0 + +#define IXGBE_NW_MNG_IF_SEL 0x00011178 +#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24) + +#endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ixgbe/base/ixgbe_vf.c b/drivers/net/ixgbe/base/ixgbe_vf.c new file mode 100644 index 00000000..40dc1c8c --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_vf.c @@ -0,0 +1,726 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + + +#include "ixgbe_api.h" +#include "ixgbe_type.h" +#include "ixgbe_vf.h" + +#ifndef IXGBE_VFWRITE_REG +#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG +#endif +#ifndef IXGBE_VFREAD_REG +#define IXGBE_VFREAD_REG IXGBE_READ_REG +#endif + +/** + * ixgbe_init_ops_vf - Initialize the pointers for vf + * @hw: pointer to hardware structure + * + * This will assign function pointers, adapter-specific functions can + * override the assignment of generic function pointers by assigning + * their own adapter-specific function pointers. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw) +{ + /* MAC */ + hw->mac.ops.init_hw = ixgbe_init_hw_vf; + hw->mac.ops.reset_hw = ixgbe_reset_hw_vf; + hw->mac.ops.start_hw = ixgbe_start_hw_vf; + /* Cannot clear stats on VF */ + hw->mac.ops.clear_hw_cntrs = NULL; + hw->mac.ops.get_media_type = NULL; + hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf; + hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf; + hw->mac.ops.get_bus_info = NULL; + + /* Link */ + hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf; + hw->mac.ops.check_link = ixgbe_check_mac_link_vf; + hw->mac.ops.get_link_capabilities = NULL; + + /* RAR, Multicast, VLAN */ + hw->mac.ops.set_rar = ixgbe_set_rar_vf; + hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf; + hw->mac.ops.init_rx_addrs = NULL; + hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf; + hw->mac.ops.enable_mc = NULL; + hw->mac.ops.disable_mc = NULL; + hw->mac.ops.clear_vfta = NULL; + hw->mac.ops.set_vfta = ixgbe_set_vfta_vf; + + hw->mac.max_tx_queues = 1; + hw->mac.max_rx_queues = 1; + + hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf; + + return IXGBE_SUCCESS; +} + +/* ixgbe_virt_clr_reg - Set register to default (power on) state. + * @hw: pointer to hardware structure + */ +static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw) +{ + int i; + u32 vfsrrctl; + u32 vfdca_rxctrl; + u32 vfdca_txctrl; + + /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */ + vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; + vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + + /* DCA_RXCTRL default value */ + vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN | + IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN; + + /* DCA_TXCTRL default value */ + vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN | + IXGBE_DCA_TXCTRL_DESC_WRO_EN | + IXGBE_DCA_TXCTRL_DATA_RRO_EN; + + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); + + for (i = 0; i < 7; i++) { + IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl); + IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl); + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl); + } + + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_start_hw_vf - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw) +{ + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_hw_vf - virtual function hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware and then starting + * the hardware + **/ +s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw) +{ + s32 status = hw->mac.ops.start_hw(hw); + + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + return status; +} + +/** + * ixgbe_reset_hw_vf - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by reseting the transmit and receive units, masks and + * clears all interrupts. + **/ +s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 timeout = IXGBE_VF_INIT_TIMEOUT; + s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR; + u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; + u8 *addr = (u8 *)(&msgbuf[1]); + + DEBUGFUNC("ixgbevf_reset_hw_vf"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + hw->mac.ops.stop_adapter(hw); + + /* reset the api version */ + hw->api_version = ixgbe_mbox_api_10; + + DEBUGOUT("Issuing a function level reset to MAC\n"); + + IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); + IXGBE_WRITE_FLUSH(hw); + + msec_delay(50); + + /* we cannot reset while the RSTI / RSTD bits are asserted */ + while (!mbx->ops.check_for_rst(hw, 0) && timeout) { + timeout--; + usec_delay(5); + } + + if (!timeout) + return IXGBE_ERR_RESET_FAILED; + + /* Reset VF registers to initial values */ + ixgbe_virt_clr_reg(hw); + + /* mailbox timeout can now become active */ + mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; + + msgbuf[0] = IXGBE_VF_RESET; + mbx->ops.write_posted(hw, msgbuf, 1, 0); + + msec_delay(10); + + /* + * set our "perm_addr" based on info provided by PF + * also set up the mc_filter_type which is piggy backed + * on the mac address in word 3 + */ + ret_val = mbx->ops.read_posted(hw, msgbuf, + IXGBE_VF_PERMADDR_MSG_LEN, 0); + if (ret_val) + return ret_val; + + if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) && + msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) + return IXGBE_ERR_INVALID_MAC_ADDR; + + if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) + memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS); + + hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; + + return ret_val; +} + +/** + * ixgbe_stop_adapter_vf - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw) +{ + u32 reg_val; + u16 i; + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Clear interrupt mask to stop from interrupts being generated */ + IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts, flush previous writes */ + IXGBE_VFREAD_REG(hw, IXGBE_VTEICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) + IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH); + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i)); + reg_val &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); + } + /* Clear packet split and pool config */ + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); + + /* flush all queues disables */ + IXGBE_WRITE_FLUSH(hw); + msec_delay(2); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + DEBUGOUT("MC filter type param set incorrectly\n"); + ASSERT(0); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +STATIC void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, + u32 *msg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 retmsg[IXGBE_VFMAILBOX_SIZE]; + s32 retval = mbx->ops.write_posted(hw, msg, size, 0); + + if (!retval) + mbx->ops.read_posted(hw, retmsg, size, 0); +} + +/** + * ixgbe_set_rar_vf - set device MAC address + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + **/ +s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + UNREFERENCED_3PARAMETER(vmdq, enable_addr, index); + + memset(msgbuf, 0, 12); + msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; + memcpy(msg_addr, addr, 6); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0); + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* if nacked the address was rejected, use "perm_addr" */ + if (!ret_val && + (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) + ixgbe_get_mac_addr_vf(hw, hw->mac.addr); + + return ret_val; +} + +/** + * ixgbe_update_mc_addr_list_vf - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @next: caller supplied function to return next address in list + * + * Updates the Multicast Table Array. + **/ +s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr next, + bool clear) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + u16 *vector_list = (u16 *)&msgbuf[1]; + u32 vector; + u32 cnt, i; + u32 vmdq; + + UNREFERENCED_1PARAMETER(clear); + + DEBUGFUNC("ixgbe_update_mc_addr_list_vf"); + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count); + + cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; + msgbuf[0] = IXGBE_VF_SET_MULTICAST; + msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; + + for (i = 0; i < cnt; i++) { + vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq)); + DEBUGOUT1("Hash value = 0x%03X\n", vector); + vector_list[i] = (u16)vector; + } + + return mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE, 0); +} + +/** + * ixgbe_set_vfta_vf - Set/Unset vlan filter table address + * @hw: pointer to the HW structure + * @vlan: 12 bit VLAN ID + * @vind: unused by VF drivers + * @vlan_on: if true then set bit, else clear bit + **/ +s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 ret_val; + UNREFERENCED_1PARAMETER(vind); + + msgbuf[0] = IXGBE_VF_SET_VLAN; + msgbuf[1] = vlan; + /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ + msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; + + ret_val = mbx->ops.write_posted(hw, msgbuf, 2, 0); + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 1, 0); + + if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_SUCCESS; + + return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK); +} + +/** + * ixgbe_get_num_of_tx_queues_vf - Get number of TX queues + * @hw: pointer to hardware structure + * + * Returns the number of transmit queues for the given adapter. + **/ +u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return IXGBE_VF_MAX_TX_QUEUES; +} + +/** + * ixgbe_get_num_of_rx_queues_vf - Get number of RX queues + * @hw: pointer to hardware structure + * + * Returns the number of receive queues for the given adapter. + **/ +u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return IXGBE_VF_MAX_RX_QUEUES; +} + +/** + * ixgbe_get_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + **/ +s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) +{ + int i; + + for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++) + mac_addr[i] = hw->mac.perm_addr[i]; + + return IXGBE_SUCCESS; +} + +s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, sizeof(msgbuf)); + /* + * If index is one then this is the start of a new list and needs + * indication to the PF so it can do it's own list management. + * If it is zero then that tells the PF to just clear all of + * this VF's macvlans and there is no new list. + */ + msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; + msgbuf[0] |= IXGBE_VF_SET_MACVLAN; + if (addr) + memcpy(msg_addr, addr, 6); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0); + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + if (!ret_val) + if (msgbuf[0] == (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK)) + ret_val = IXGBE_ERR_OUT_OF_MEM; + + return ret_val; +} + +/** + * ixgbe_setup_mac_link_vf - Setup MAC link settings + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if autonegotiation enabled + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + UNREFERENCED_3PARAMETER(hw, speed, autoneg_wait_to_complete); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_check_mac_link_vf - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool autoneg_wait_to_complete) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + s32 ret_val = IXGBE_SUCCESS; + u32 links_reg; + u32 in_msg = 0; + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + usec_delay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + } + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error + */ + if (mbx->ops.read(hw, &in_msg, 1, 0)) + goto out; + + if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { + /* msg is not CTS and is NACK we must have lost CTS status */ + if (in_msg & IXGBE_VT_MSGTYPE_NACK) + ret_val = -1; + goto out; + } + + /* the pf is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + ret_val = -1; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return ret_val; +} + +/** + * ixgbevf_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) +{ + u32 msgbuf[2]; + + msgbuf[0] = IXGBE_VF_SET_LPE; + msgbuf[1] = max_size; + ixgbevf_write_msg_read_ack(hw, msgbuf, 2); +} + +/** + * ixgbevf_negotiate_api_version - Negotiate supported API version + * @hw: pointer to the HW structure + * @api: integer containing requested API version + **/ +int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) +{ + int err; + u32 msg[3]; + + /* Negotiate the mailbox API version */ + msg[0] = IXGBE_VF_API_NEGOTIATE; + msg[1] = api; + msg[2] = 0; + err = hw->mbx.ops.write_posted(hw, msg, 3, 0); + + if (!err) + err = hw->mbx.ops.read_posted(hw, msg, 3, 0); + + if (!err) { + msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* Store value and return 0 on success */ + if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) { + hw->api_version = api; + return 0; + } + + err = IXGBE_ERR_INVALID_ARGUMENT; + } + + return err; +} + +int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, + unsigned int *default_tc) +{ + int err; + u32 msg[5]; + + /* do nothing if API doesn't support ixgbevf_get_queues */ + switch (hw->api_version) { + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + break; + default: + return 0; + } + + /* Fetch queue configuration from the PF */ + msg[0] = IXGBE_VF_GET_QUEUES; + msg[1] = msg[2] = msg[3] = msg[4] = 0; + err = hw->mbx.ops.write_posted(hw, msg, 5, 0); + + if (!err) + err = hw->mbx.ops.read_posted(hw, msg, 5, 0); + + if (!err) { + msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* + * if we we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such + */ + if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_ERR_MBX; + + /* record and validate values from message */ + hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES]; + if (hw->mac.max_tx_queues == 0 || + hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES) + hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; + + hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES]; + if (hw->mac.max_rx_queues == 0 || + hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES) + hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; + + *num_tcs = msg[IXGBE_VF_TRANS_VLAN]; + /* in case of unknown state assume we cannot tag frames */ + if (*num_tcs > hw->mac.max_rx_queues) + *num_tcs = 1; + + *default_tc = msg[IXGBE_VF_DEF_QUEUE]; + /* default to queue 0 on out-of-bounds queue number */ + if (*default_tc >= hw->mac.max_tx_queues) + *default_tc = 0; + } + + return err; +} diff --git a/drivers/net/ixgbe/base/ixgbe_vf.h b/drivers/net/ixgbe/base/ixgbe_vf.h new file mode 100644 index 00000000..411152a4 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_vf.h @@ -0,0 +1,140 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef __IXGBE_VF_H__ +#define __IXGBE_VF_H__ + +#define IXGBE_VF_IRQ_CLEAR_MASK 7 +#define IXGBE_VF_MAX_TX_QUEUES 8 +#define IXGBE_VF_MAX_RX_QUEUES 8 + +/* DCB define */ +#define IXGBE_VF_MAX_TRAFFIC_CLASS 8 + +#define IXGBE_VFCTRL 0x00000 +#define IXGBE_VFSTATUS 0x00008 +#define IXGBE_VFLINKS 0x00010 +#define IXGBE_VFFRTIMER 0x00048 +#define IXGBE_VFRXMEMWRAP 0x03190 +#define IXGBE_VTEICR 0x00100 +#define IXGBE_VTEICS 0x00104 +#define IXGBE_VTEIMS 0x00108 +#define IXGBE_VTEIMC 0x0010C +#define IXGBE_VTEIAC 0x00110 +#define IXGBE_VTEIAM 0x00114 +#define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) +#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) +#define IXGBE_VTIVAR_MISC 0x00140 +#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) +/* define IXGBE_VFPBACL still says TBD in EAS */ +#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) +#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) +#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) +#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) +#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) +#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) +#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) +#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) +#define IXGBE_VFPSRTYPE 0x00300 +#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) +#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) +#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) +#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) +#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) +#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) +#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) +#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) +#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) +#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) +#define IXGBE_VFGPRC 0x0101C +#define IXGBE_VFGPTC 0x0201C +#define IXGBE_VFGORC_LSB 0x01020 +#define IXGBE_VFGORC_MSB 0x01024 +#define IXGBE_VFGOTC_LSB 0x02020 +#define IXGBE_VFGOTC_MSB 0x02024 +#define IXGBE_VFMPRC 0x01034 +#define IXGBE_VFMRQC 0x3000 +#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) +#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) + + +struct ixgbevf_hw_stats { + u64 base_vfgprc; + u64 base_vfgptc; + u64 base_vfgorc; + u64 base_vfgotc; + u64 base_vfmprc; + + u64 last_vfgprc; + u64 last_vfgptc; + u64 last_vfgorc; + u64 last_vfgotc; + u64 last_vfmprc; + + u64 vfgprc; + u64 vfgptc; + u64 vfgorc; + u64 vfgotc; + u64 vfmprc; + + u64 saved_reset_vfgprc; + u64 saved_reset_vfgptc; + u64 saved_reset_vfgorc; + u64 saved_reset_vfgotc; + u64 saved_reset_vfmprc; +}; + +s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw); +s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool autoneg_wait_to_complete); +s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr); +s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr, + bool clear); +s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); +void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); +int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); +int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, + unsigned int *default_tc); + +#endif /* __IXGBE_VF_H__ */ diff --git a/drivers/net/ixgbe/base/ixgbe_x540.c b/drivers/net/ixgbe/base/ixgbe_x540.c new file mode 100644 index 00000000..9ade1b5e --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_x540.c @@ -0,0 +1,1012 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "ixgbe_x540.h" +#include "ixgbe_type.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +#define IXGBE_X540_MAX_TX_QUEUES 128 +#define IXGBE_X540_MAX_RX_QUEUES 128 +#define IXGBE_X540_RAR_ENTRIES 128 +#define IXGBE_X540_MC_TBL_SIZE 128 +#define IXGBE_X540_VFT_TBL_SIZE 128 +#define IXGBE_X540_RX_PB_SIZE 384 + +STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); +STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); +STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); + +/** + * ixgbe_init_ops_X540 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for X540. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X540"); + + ret_val = ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); + + + /* EEPROM */ + eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; + eeprom->ops.read = ixgbe_read_eerd_X540; + eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540; + eeprom->ops.write = ixgbe_write_eewr_X540; + eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540; + + /* PHY */ + phy->ops.init = ixgbe_init_phy_ops_generic; + phy->ops.reset = NULL; + phy->ops.set_phy_power = ixgbe_set_copper_phy_power; + + /* MAC */ + mac->ops.reset_hw = ixgbe_reset_hw_X540; + mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2; + mac->ops.get_media_type = ixgbe_get_media_type_X540; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_X540; + mac->ops.read_analog_reg8 = NULL; + mac->ops.write_analog_reg8 = NULL; + mac->ops.start_hw = ixgbe_start_hw_X540; + mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; + mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; + mac->ops.get_device_caps = ixgbe_get_device_caps_generic; + mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; + mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540; + mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; + mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; + + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = ixgbe_set_vmdq_generic; + mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; + mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; + mac->rar_highwater = 1; + mac->ops.set_vfta = ixgbe_set_vfta_generic; + mac->ops.set_vlvf = ixgbe_set_vlvf_generic; + mac->ops.clear_vfta = ixgbe_clear_vfta_generic; + mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; + mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; + + /* Link */ + mac->ops.get_link_capabilities = + ixgbe_get_copper_link_capabilities_generic; + mac->ops.setup_link = ixgbe_setup_mac_link_X540; + mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; + mac->ops.check_link = ixgbe_check_mac_link_generic; + + + mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; + mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + /* + * FWSM register + * ARC supported; valid only if manageability features are + * enabled. + */ + mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) + & IXGBE_FWSM_MODE_MASK); + + hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; + + /* LEDs */ + mac->ops.blink_led_start = ixgbe_blink_led_start_X540; + mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; + + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; + + return ret_val; +} + +/** + * ixgbe_get_link_capabilities_X540 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_media_type_X540 - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return ixgbe_media_type_copper; +} + +/** + * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + **/ +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + DEBUGFUNC("ixgbe_setup_mac_link_X540"); + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); +} + +/** + * ixgbe_reset_hw_X540 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, and perform a reset. + **/ +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) +{ + s32 status; + u32 ctrl, i; + + DEBUGFUNC("ixgbe_reset_hw_X540"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != IXGBE_SUCCESS) + goto reset_hw_out; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + +mac_reset_top: + ctrl = IXGBE_CTRL_RST; + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Reset polling failed to complete.\n"); + } + msec_delay(100); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Set the Rx packet buffer size. */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + +reset_hw_out: + return status; +} + +/** + * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_X540"); + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val != IXGBE_SUCCESS) + goto out; + + ret_val = ixgbe_start_hw_gen2(hw); + +out: + return ret_val; +} + +/** + * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) +{ + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_supported_physical_layer_X540"); + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + + return physical_layer; +} + +/** + * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + DEBUGFUNC("ixgbe_init_eeprom_params_X540"); + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + DEBUGOUT2("Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_eerd_X540- Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_read_eerd_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_read_eerd_generic(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_read_eerd_buffer_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_read_eerd_buffer_generic(hw, offset, + words, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_eewr_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_write_eewr_generic(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_eewr_buffer_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_write_eewr_buffer_generic(hw, offset, + words, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum + * + * This function does not use synchronization for EERD and EEWR. It can + * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. + * + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ + u16 i, j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM; + u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; + + /* Do not use hw->eeprom.ops.read because we do not want to take + * the synchronization semaphores here. Instead use + * ixgbe_read_eerd_generic + */ + + DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540"); + + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i <= checksum_last_word; i++) { + if (ixgbe_read_eerd_generic(hw, i, &word)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + if (i != IXGBE_EEPROM_CHECKSUM) + checksum += word; + } + + /* Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = ptr_start; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + if (ixgbe_read_eerd_generic(hw, i, &pointer)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + if (ixgbe_read_eerd_generic(hw, pointer, &length)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (pointer + length) >= hw->eeprom.word_size) + continue; + + for (j = pointer + 1; j <= pointer + length; j++) { + if (ixgbe_read_eerd_generic(hw, j, &word)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + checksum += word; + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** + * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; + + checksum = (u16)(status & 0xffff); + + /* Do not use hw->eeprom.ops.read because we do not want to take + * the synchronization semaphores twice here. + */ + status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + goto out; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum"); + status = IXGBE_ERR_EEPROM_CHECKSUM; + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + return status; +} + +/** + * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum; + + DEBUGFUNC("ixgbe_update_eeprom_checksum_X540"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; + + checksum = (u16)(status & 0xffff); + + /* Do not use hw->eeprom.ops.write because we do not want to + * take the synchronization semaphores twice here. + */ + status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum); + if (status) + goto out; + + status = ixgbe_update_flash_X540(hw); + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + return status; +} + +/** + * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy + * EEPROM from shadow RAM to the flash device. + **/ +s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) +{ + u32 flup; + s32 status; + + DEBUGFUNC("ixgbe_update_flash_X540"); + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == IXGBE_ERR_EEPROM) { + DEBUGOUT("Flash update time out\n"); + goto out; + } + + flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)) | IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == IXGBE_SUCCESS) + DEBUGOUT("Flash update complete\n"); + else + DEBUGOUT("Flash update time out\n"); + + if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) { + flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + if (flup & IXGBE_EEC_SEC1VAL) { + flup |= IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); + } + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == IXGBE_SUCCESS) + DEBUGOUT("Flash update complete\n"); + else + DEBUGOUT("Flash update time out\n"); + } +out: + return status; +} + +/** + * ixgbe_poll_flash_update_done_X540 - Poll flash update status + * @hw: pointer to hardware structure + * + * Polls the FLUDONE (bit 26) of the EEC Register to determine when the + * flash update is done. + **/ +STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) +{ + u32 i; + u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + DEBUGFUNC("ixgbe_poll_flash_update_done_X540"); + + for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + if (reg & IXGBE_EEC_FLUDONE) { + status = IXGBE_SUCCESS; + break; + } + msec_delay(5); + } + + if (i == IXGBE_FLUDONE_ATTEMPTS) + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Flash update status polling timed out"); + + return status; +} + +/** + * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore thought the SW_FW_SYNC register for + * the specified function (CSR, PHY0, PHY1, NVM, Flash) + **/ +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) +{ + u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK; + u32 fwmask = swmask << 5; + u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; + u32 timeout = 200; + u32 hwmask = 0; + u32 swfw_sync; + u32 i; + + DEBUGFUNC("ixgbe_acquire_swfw_sync_X540"); + + if (swmask & IXGBE_GSSR_EEP_SM) + hwmask |= IXGBE_GSSR_FLASH_SM; + + /* SW only mask doesn't have FW bit pair */ + if (mask & IXGBE_GSSR_SW_MNG_SM) + swmask |= IXGBE_GSSR_SW_MNG_SM; + + swmask |= swi2c_mask; + fwmask |= swi2c_mask << 2; + for (i = 0; i < timeout; i++) { + /* SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ixgbe_get_swfw_sync_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + if (!(swfw_sync & (fwmask | swmask | hwmask))) { + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), + swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + msec_delay(5); + return IXGBE_SUCCESS; + } + /* Firmware currently using resource (fwmask), hardware + * currently using resource (hwmask), or other software + * thread currently using resource (swmask) + */ + ixgbe_release_swfw_sync_semaphore(hw); + msec_delay(5); + } + + /* Failed to get SW only semaphore */ + if (swmask == IXGBE_GSSR_SW_MNG_SM) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Failed to get SW only semaphore"); + return IXGBE_ERR_SWFW_SYNC; + } + + /* If the resource is not released by the FW/HW the SW can assume that + * the FW/HW malfunctions. In that case the SW should set the SW bit(s) + * of the requested resource(s) while ignoring the corresponding FW/HW + * bits in the SW_FW_SYNC register. + */ + if (ixgbe_get_swfw_sync_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + if (swfw_sync & (fwmask | hwmask)) { + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + msec_delay(5); + return IXGBE_SUCCESS; + } + /* If the resource is not released by other SW the SW can assume that + * the other SW malfunctions. In that case the SW should clear all SW + * flags that it does not own and then repeat the whole process once + * again. + */ + if (swfw_sync & swmask) { + u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | + IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM; + + if (swi2c_mask) + rmask |= IXGBE_GSSR_I2C_MASK; + ixgbe_release_swfw_sync_X540(hw, rmask); + ixgbe_release_swfw_sync_semaphore(hw); + return IXGBE_ERR_SWFW_SYNC; + } + ixgbe_release_swfw_sync_semaphore(hw); + + return IXGBE_ERR_SWFW_SYNC; +} + +/** + * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the SW_FW_SYNC register + * for the specified function (CSR, PHY0, PHY1, EVM, Flash) + **/ +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) +{ + u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM); + u32 swfw_sync; + + DEBUGFUNC("ixgbe_release_swfw_sync_X540"); + + if (mask & IXGBE_GSSR_I2C_MASK) + swmask |= mask & IXGBE_GSSR_I2C_MASK; + ixgbe_get_swfw_sync_semaphore(hw); + + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + swfw_sync &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); + + ixgbe_release_swfw_sync_semaphore(hw); + msec_delay(5); +} + +/** + * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so SW/FW can gain control of shared resources + **/ +STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + + DEBUGFUNC("ixgbe_get_swfw_sync_semaphore"); + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = IXGBE_SUCCESS; + break; + } + usec_delay(50); + } + + /* Now get the semaphore between SW/FW through the REGSMP bit */ + if (status == IXGBE_SUCCESS) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + if (!(swsm & IXGBE_SWFW_REGSMP)) + break; + + usec_delay(50); + } + + /* + * Release semaphores and return error if SW NVM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "REGSMP Software NVM semaphore not granted.\n"); + ixgbe_release_swfw_sync_semaphore(hw); + status = IXGBE_ERR_EEPROM; + } + } else { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "Software semaphore SMBI between device drivers " + "not granted.\n"); + } + + return status; +} + +/** + * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("ixgbe_release_swfw_sync_semaphore"); + + /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ + + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + swsm &= ~IXGBE_SWFW_REGSMP; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swsm); + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + swsm &= ~IXGBE_SWSM_SMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); + + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_blink_led_start_X540 - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + * + * Devices that implement the version 2 interface: + * X540 + **/ +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) +{ + u32 macc_reg; + u32 ledctl_reg; + ixgbe_link_speed speed; + bool link_up; + + DEBUGFUNC("ixgbe_blink_led_start_X540"); + + /* + * Link should be up in order for the blink bit in the LED control + * register to work. Force link and speed in the MAC if link is down. + * This will be reversed when we stop the blinking. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (link_up == false) { + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + } + /* Set the LED to LINK_UP + BLINK. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); + ledctl_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + * + * Devices that implement the version 2 interface: + * X540 + **/ +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) +{ + u32 macc_reg; + u32 ledctl_reg; + + DEBUGFUNC("ixgbe_blink_led_stop_X540"); + + /* Restore the LED to its default value. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); + ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + ledctl_reg &= ~IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); + + /* Unforce link and speed in the MAC. */ + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS); + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} diff --git a/drivers/net/ixgbe/base/ixgbe_x540.h b/drivers/net/ixgbe/base/ixgbe_x540.h new file mode 100644 index 00000000..42c08a82 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_x540.h @@ -0,0 +1,66 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _IXGBE_X540_H_ +#define _IXGBE_X540_H_ + +#include "ixgbe_type.h" + +s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *autoneg); +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool link_up_wait_to_complete); +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw); +u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw); + +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); +s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, + u16 *data); +s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, + u16 *data); +s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw); +s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val); +s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw); +s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); + +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); + +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); +#endif /* _IXGBE_X540_H_ */ + diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c b/drivers/net/ixgbe/base/ixgbe_x550.c new file mode 100644 index 00000000..0bbaa55b --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_x550.c @@ -0,0 +1,3466 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#include "ixgbe_x550.h" +#include "ixgbe_x540.h" +#include "ixgbe_type.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed); +static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); +static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); + +/** + * ixgbe_init_ops_X550 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for X550. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X550"); + + ret_val = ixgbe_init_ops_X540(hw); + mac->ops.dmac_config = ixgbe_dmac_config_X550; + mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550; + mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550; + mac->ops.setup_eee = ixgbe_setup_eee_X550; + mac->ops.set_source_address_pruning = + ixgbe_set_source_address_pruning_X550; + mac->ops.set_ethertype_anti_spoofing = + ixgbe_set_ethertype_anti_spoofing_X550; + + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; + eeprom->ops.init_params = ixgbe_init_eeprom_params_X550; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; + eeprom->ops.read = ixgbe_read_ee_hostif_X550; + eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; + eeprom->ops.write = ixgbe_write_ee_hostif_X550; + eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; + + mac->ops.disable_mdd = ixgbe_disable_mdd_X550; + mac->ops.enable_mdd = ixgbe_enable_mdd_X550; + mac->ops.mdd_event = ixgbe_mdd_event_X550; + mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550; + mac->ops.disable_rx = ixgbe_disable_rx_x550; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: + hw->mac.ops.led_on = ixgbe_led_on_t_X550em; + hw->mac.ops.led_off = ixgbe_led_off_t_X550em; + break; + default: + break; + } + return ret_val; +} + +/** + * ixgbe_read_cs4227 - Read CS4227 register + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: pointer to receive value read + * + * Returns status code + **/ +STATIC s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) +{ + return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); +} + +/** + * ixgbe_write_cs4227 - Write CS4227 register + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: value to write to register + * + * Returns status code + **/ +STATIC s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) +{ + return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); +} + +/** + * ixgbe_read_pe - Read register from port expander + * @hw: pointer to hardware structure + * @reg: register number to read + * @value: pointer to receive read value + * + * Returns status code + **/ +STATIC s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) +{ + s32 status; + + status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); + if (status != IXGBE_SUCCESS) + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "port expander access failed with %d\n", status); + return status; +} + +/** + * ixgbe_write_pe - Write register to port expander + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: value to write + * + * Returns status code + **/ +STATIC s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) +{ + s32 status; + + status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); + if (status != IXGBE_SUCCESS) + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "port expander access failed with %d\n", status); + return status; +} + +/** + * ixgbe_reset_cs4227 - Reset CS4227 using port expander + * @hw: pointer to hardware structure + * + * This function assumes that the caller has acquired the proper semaphore. + * Returns error code + **/ +STATIC s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) +{ + s32 status; + u32 retry; + u16 value; + u8 reg; + + /* Trigger hard reset. */ + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status != IXGBE_SUCCESS) + return status; + reg |= IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status != IXGBE_SUCCESS) + return status; + + status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®); + if (status != IXGBE_SUCCESS) + return status; + reg &= ~IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg); + if (status != IXGBE_SUCCESS) + return status; + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status != IXGBE_SUCCESS) + return status; + reg &= ~IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status != IXGBE_SUCCESS) + return status; + + usec_delay(IXGBE_CS4227_RESET_HOLD); + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status != IXGBE_SUCCESS) + return status; + reg |= IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status != IXGBE_SUCCESS) + return status; + + /* Wait for the reset to complete. */ + msec_delay(IXGBE_CS4227_RESET_DELAY); + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS, + &value); + if (status == IXGBE_SUCCESS && + value == IXGBE_CS4227_EEPROM_LOAD_OK) + break; + msec_delay(IXGBE_CS4227_CHECK_DELAY); + } + if (retry == IXGBE_CS4227_RETRIES) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "CS4227 reset did not complete."); + return IXGBE_ERR_PHY; + } + + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value); + if (status != IXGBE_SUCCESS || + !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "CS4227 EEPROM did not load successfully."); + return IXGBE_ERR_PHY; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_check_cs4227 - Check CS4227 and reset as needed + * @hw: pointer to hardware structure + **/ +STATIC void ixgbe_check_cs4227(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u16 value = 0; + u8 retry; + + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); + msec_delay(IXGBE_CS4227_CHECK_DELAY); + continue; + } + + /* Get status of reset flow. */ + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); + + if (status == IXGBE_SUCCESS && + value == IXGBE_CS4227_RESET_COMPLETE) + goto out; + + if (status != IXGBE_SUCCESS || + value != IXGBE_CS4227_RESET_PENDING) + break; + + /* Reset is pending. Wait and check again. */ + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(IXGBE_CS4227_CHECK_DELAY); + } + + /* If still pending, assume other instance failed. */ + if (retry == IXGBE_CS4227_RETRIES) { + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); + return; + } + } + + /* Reset the CS4227. */ + status = ixgbe_reset_cs4227(hw); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, + "CS4227 reset failed: %d", status); + goto out; + } + + /* Reset takes so long, temporarily release semaphore in case the + * other driver instance is waiting for the reset indication. + */ + ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, + IXGBE_CS4227_RESET_PENDING); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(10); + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d", status); + return; + } + + /* Record completion for next time. */ + status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, + IXGBE_CS4227_RESET_COMPLETE); + +out: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(hw->eeprom.semaphore_delay); +} + +/** + * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control + * @hw: pointer to hardware structure + **/ +STATIC void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) +{ + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_identify_phy_x550em - Get PHY type based on device id + * @hw: pointer to hardware structure + * + * Returns error code + */ +STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) +{ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP: + hw->phy.phy_semaphore_mask = IXGBE_GSSR_TOKEN_SM; + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; + return ixgbe_identify_module_generic(hw); + case IXGBE_DEV_ID_X550EM_X_SFP: + /* set up for CS4227 usage */ + hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + ixgbe_setup_mux_ctl(hw); + ixgbe_check_cs4227(hw); + /* Fallthrough */ + + case IXGBE_DEV_ID_X550EM_A_SFP_N: + return ixgbe_identify_module_generic(hw); + break; + case IXGBE_DEV_ID_X550EM_X_KX4: + hw->phy.type = ixgbe_phy_x550em_kx4; + break; + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + hw->phy.type = ixgbe_phy_x550em_kr; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + case IXGBE_DEV_ID_X550EM_A_10G_T: + return ixgbe_identify_phy_generic(hw); + default: + break; + } + return IXGBE_SUCCESS; +} + +STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data); + return IXGBE_NOT_IMPLEMENTED; +} + +STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data); + return IXGBE_NOT_IMPLEMENTED; +} + +/** + * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); +} + +/** + * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +static s32 +ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); +} + +/** + * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); +} + +/** + * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +static s32 +ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); +} + +/** +* ixgbe_init_ops_X550EM - Inits func ptrs and MAC type +* @hw: pointer to hardware structure +* +* Initialize the function pointers and for MAC type X550EM. +* Does not touch the hardware. +**/ +s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + struct ixgbe_phy_info *phy = &hw->phy; + struct ixgbe_link_info *link = &hw->link; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X550EM"); + + /* Similar to X550 so start there. */ + ret_val = ixgbe_init_ops_X550(hw); + + /* Since this function eventually calls + * ixgbe_init_ops_540 by design, we are setting + * the pointers to NULL explicitly here to overwrite + * the values being set in the x540 function. + */ + /* Thermal sensor not supported in x550EM */ + mac->ops.get_thermal_sensor_data = NULL; + mac->ops.init_thermal_sensor_thresh = NULL; + mac->thermal_sensor_enabled = false; + + /* FCOE not supported in x550EM */ + mac->ops.get_san_mac_addr = NULL; + mac->ops.set_san_mac_addr = NULL; + mac->ops.get_wwn_prefix = NULL; + mac->ops.get_fcoe_boot_status = NULL; + + /* IPsec not supported in x550EM */ + mac->ops.disable_sec_rx_path = NULL; + mac->ops.enable_sec_rx_path = NULL; + + /* AUTOC register is not present in x550EM. */ + mac->ops.prot_autoc_read = NULL; + mac->ops.prot_autoc_write = NULL; + + /* X550EM bus type is internal*/ + hw->bus.type = ixgbe_bus_type_internal; + mac->ops.get_bus_info = ixgbe_get_bus_info_X550em; + + if (hw->mac.type == ixgbe_mac_X550EM_x) { + mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; + mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em; + link->ops.read_link = ixgbe_read_i2c_combined_generic; + link->ops.read_link_unlocked = + ixgbe_read_i2c_combined_generic_unlocked; + link->ops.write_link = ixgbe_write_i2c_combined_generic; + link->ops.write_link_unlocked = + ixgbe_write_i2c_combined_generic_unlocked; + link->addr = IXGBE_CS4227; + } + if (hw->mac.type == ixgbe_mac_X550EM_a) { + mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a; + mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a; + } + + mac->ops.get_media_type = ixgbe_get_media_type_X550em; + mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em; + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em; + mac->ops.reset_hw = ixgbe_reset_hw_X550em; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_X550em; + + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) + mac->ops.setup_fc = ixgbe_setup_fc_generic; + else + mac->ops.setup_fc = ixgbe_setup_fc_X550em; + + + if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR) + mac->ops.setup_eee = NULL; + + /* PHY */ + phy->ops.init = ixgbe_init_phy_ops_X550em; + phy->ops.identify = ixgbe_identify_phy_x550em; + if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) + phy->ops.set_phy_power = NULL; + + + /* EEPROM */ + eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; + eeprom->ops.read = ixgbe_read_ee_hostif_X550; + eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; + eeprom->ops.write = ixgbe_write_ee_hostif_X550; + eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; + + return ret_val; +} + +/** + * ixgbe_dmac_config_X550 + * @hw: pointer to hardware structure + * + * Configure DMA coalescing. If enabling dmac, dmac is activated. + * When disabling dmac, dmac enable dmac bit is cleared. + **/ +s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw) +{ + u32 reg, high_pri_tc; + + DEBUGFUNC("ixgbe_dmac_config_X550"); + + /* Disable DMA coalescing before configuring */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg &= ~IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + /* Disable DMA Coalescing if the watchdog timer is 0 */ + if (!hw->mac.dmac_config.watchdog_timer) + goto out; + + ixgbe_dmac_config_tcs_X550(hw); + + /* Configure DMA Coalescing Control Register */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + + /* Set the watchdog timer in units of 40.96 usec */ + reg &= ~IXGBE_DMACR_DMACWT_MASK; + reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096; + + reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK; + /* If fcoe is enabled, set high priority traffic class */ + if (hw->mac.dmac_config.fcoe_en) { + high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc; + reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) & + IXGBE_DMACR_HIGH_PRI_TC_MASK); + } + reg |= IXGBE_DMACR_EN_MNG_IND; + + /* Enable DMA coalescing after configuration */ + reg |= IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + +out: + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dmac_config_tcs_X550 + * @hw: pointer to hardware structure + * + * Configure DMA coalescing threshold per TC. The dmac enable bit must + * be cleared before configuring. + **/ +s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw) +{ + u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb; + + DEBUGFUNC("ixgbe_dmac_config_tcs_X550"); + + /* Configure DMA coalescing enabled */ + switch (hw->mac.dmac_config.link_speed) { + case IXGBE_LINK_SPEED_100_FULL: + pb_headroom = IXGBE_DMACRXT_100M; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + pb_headroom = IXGBE_DMACRXT_1G; + break; + default: + pb_headroom = IXGBE_DMACRXT_10G; + break; + } + + maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >> + IXGBE_MHADD_MFS_SHIFT) / 1024); + + /* Set the per Rx packet buffer receive threshold */ + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) { + reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc)); + reg &= ~IXGBE_DMCTH_DMACRXT_MASK; + + if (tc < hw->mac.dmac_config.num_tcs) { + /* Get Rx PB size */ + rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc)); + rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >> + IXGBE_RXPBSIZE_SHIFT; + + /* Calculate receive buffer threshold in kilobytes */ + if (rx_pb_size > pb_headroom) + rx_pb_size = rx_pb_size - pb_headroom; + else + rx_pb_size = 0; + + /* Minimum of MFS shall be set for DMCTH */ + reg |= (rx_pb_size > maxframe_size_kb) ? + rx_pb_size : maxframe_size_kb; + } + IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg); + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dmac_update_tcs_X550 + * @hw: pointer to hardware structure + * + * Disables dmac, updates per TC settings, and then enables dmac. + **/ +s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw) +{ + u32 reg; + + DEBUGFUNC("ixgbe_dmac_update_tcs_X550"); + + /* Disable DMA coalescing before configuring */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg &= ~IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + ixgbe_dmac_config_tcs_X550(hw); + + /* Enable DMA coalescing after configuration */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg |= IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + DEBUGFUNC("ixgbe_init_eeprom_params_X550"); + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + DEBUGOUT2("Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_setup_eee_X550 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE + * + * Enable/disable EEE based on enable_eee flag. + * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C + * are modified. + * + **/ +s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee) +{ + u32 eeer; + u16 autoneg_eee_reg; + u32 link_reg; + s32 status; + + DEBUGFUNC("ixgbe_setup_eee_X550"); + + eeer = IXGBE_READ_REG(hw, IXGBE_EEER); + /* Enable or disable EEE per flag */ + if (enable_eee) { + eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN); + + if (hw->mac.type == ixgbe_mac_X550) { + /* Advertise EEE capability */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg); + + autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT | + IXGBE_AUTO_NEG_1000BASE_EEE_ADVT | + IXGBE_AUTO_NEG_100BASE_EEE_ADVT); + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg); + } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) { + /* Not supported on first revision of X550EM_x. */ + if ((hw->mac.type == ixgbe_mac_X550EM_x) && + !(IXGBE_FUSES0_REV_MASK & + IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) + return IXGBE_SUCCESS; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg); + if (status != IXGBE_SUCCESS) + return status; + + link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX; + + /* Don't advertise FEC capability when EEE enabled. */ + link_reg &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC; + + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg); + if (status != IXGBE_SUCCESS) + return status; + } + } else { + eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN); + + if (hw->mac.type == ixgbe_mac_X550) { + /* Disable advertised EEE capability */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg); + + autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT | + IXGBE_AUTO_NEG_1000BASE_EEE_ADVT | + IXGBE_AUTO_NEG_100BASE_EEE_ADVT); + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg); + } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) { + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg); + if (status != IXGBE_SUCCESS) + return status; + + link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX); + + /* Advertise FEC capability when EEE is disabled. */ + link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC; + + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg); + if (status != IXGBE_SUCCESS) + return status; + } + } + IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning + * @hw: pointer to hardware structure + * @enable: enable or disable source address pruning + * @pool: Rx pool to set source address pruning for + **/ +void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, + unsigned int pool) +{ + u64 pfflp; + + /* max rx pool is 63 */ + if (pool > 63) + return; + + pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); + pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; + + if (enable) + pfflp |= (1ULL << pool); + else + pfflp &= ~(1ULL << pool); + + IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); + IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); +} + +/** + * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; + u32 pfvfspoof; + + DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550"); + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= (1 << vf_target_shift); + else + pfvfspoof &= ~(1 << vf_target_shift); + + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** + * ixgbe_iosf_wait - Wait for IOSF command completion + * @hw: pointer to hardware structure + * @ctrl: pointer to location to receive final IOSF control value + * + * Returns failing status on timeout + * + * Note: ctrl can be NULL if the IOSF control register value is not needed + **/ +STATIC s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) +{ + u32 i, command = 0; + + /* Check every 10 usec to see if the address cycle completed. + * The SB IOSF BUSY bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); + if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) + break; + usec_delay(10); + } + if (ctrl) + *ctrl = command; + if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n"); + return IXGBE_ERR_PHY; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF + * device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data) +{ + u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; + u32 command, error; + s32 ret; + + ret = ixgbe_acquire_swfw_semaphore(hw, gssr); + if (ret != IXGBE_SUCCESS) + return ret; + + ret = ixgbe_iosf_wait(hw, NULL); + if (ret != IXGBE_SUCCESS) + goto out; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + /* Write IOSF data register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); + + ret = ixgbe_iosf_wait(hw, &command); + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Failed to write, error %x\n", error); + ret = IXGBE_ERR_PHY; + } + +out: + ixgbe_release_swfw_semaphore(hw, gssr); + return ret; +} + +/** + * ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF + * device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @phy_data: Pointer to read data from the register + **/ +s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data) +{ + u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; + u32 command, error; + s32 ret; + + ret = ixgbe_acquire_swfw_semaphore(hw, gssr); + if (ret != IXGBE_SUCCESS) + return ret; + + ret = ixgbe_iosf_wait(hw, NULL); + if (ret != IXGBE_SUCCESS) + goto out; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + ret = ixgbe_iosf_wait(hw, &command); + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Failed to read, error %x\n", error); + ret = IXGBE_ERR_PHY; + } + + if (ret == IXGBE_SUCCESS) + *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); + +out: + ixgbe_release_swfw_semaphore(hw, gssr); + return ret; +} + +/** + * ixgbe_get_phy_token - Get the token for shared phy access + * @hw: Pointer to hardware structure + */ + +s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REQ; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd, + sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) + return status; + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return IXGBE_SUCCESS; + if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) + return IXGBE_ERR_FW_RESP_INVALID; + + return IXGBE_ERR_TOKEN_RETRY; +} + +/** + * ixgbe_put_phy_token - Put the token for shared phy access + * @hw: Pointer to hardware structure + */ + +s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REL; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd, + sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) + return status; + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return IXGBE_SUCCESS; + return IXGBE_ERR_FW_RESP_INVALID; +} + +/** + * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register + * of the IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data) +{ + struct ixgbe_hic_internal_phy_req write_cmd; + s32 status; + UNREFERENCED_1PARAMETER(device_type); + + write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + write_cmd.port_number = hw->bus.lan_id; + write_cmd.command_type = FW_INT_PHY_REQ_WRITE; + write_cmd.address = (u16)reg_addr; + write_cmd.rsv1 = 0; + write_cmd.write_data = data; + write_cmd.pad = 0; + + status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd, + sizeof(write_cmd), IXGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/** + * ixgbe_read_iosf_sb_reg_x550a - Writes a value to specified register + * of the IOSF device. + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Pointer to read data from the register + **/ +s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data) +{ + struct ixgbe_hic_internal_phy_req read_cmd; + s32 status; + UNREFERENCED_1PARAMETER(device_type); + + read_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + read_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + read_cmd.port_number = hw->bus.lan_id; + read_cmd.command_type = FW_INT_PHY_REQ_READ; + read_cmd.address = (u16)reg_addr; + read_cmd.rsv1 = 0; + read_cmd.write_data = 0; + read_cmd.pad = 0; + + status = ixgbe_host_interface_command(hw, (u32 *)&read_cmd, + sizeof(read_cmd), IXGBE_HI_COMMAND_TIMEOUT, true); + + /* Extract the register value from the response. */ + *data = ((struct ixgbe_hic_internal_phy_resp *)&read_cmd)->read_data; + + return status; +} + +/** + * ixgbe_disable_mdd_X550 + * @hw: pointer to hardware structure + * + * Disable malicious driver detection + **/ +void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw) +{ + u32 reg; + + DEBUGFUNC("ixgbe_disable_mdd_X550"); + + /* Disable MDD for TX DMA and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + + /* Disable MDD for RX and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); +} + +/** + * ixgbe_enable_mdd_X550 + * @hw: pointer to hardware structure + * + * Enable malicious driver detection + **/ +void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw) +{ + u32 reg; + + DEBUGFUNC("ixgbe_enable_mdd_X550"); + + /* Enable MDD for TX DMA and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + + /* Enable MDD for RX and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); +} + +/** + * ixgbe_restore_mdd_vf_X550 + * @hw: pointer to hardware structure + * @vf: vf index + * + * Restore VF that was disabled during malicious driver detection event + **/ +void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf) +{ + u32 idx, reg, num_qs, start_q, bitmask; + + DEBUGFUNC("ixgbe_restore_mdd_vf_X550"); + + /* Map VF to queues */ + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + switch (reg & IXGBE_MRQC_MRQE_MASK) { + case IXGBE_MRQC_VMDQRT8TCEN: + num_qs = 8; /* 16 VFs / pools */ + bitmask = 0x000000FF; + break; + case IXGBE_MRQC_VMDQRSS32EN: + case IXGBE_MRQC_VMDQRT4TCEN: + num_qs = 4; /* 32 VFs / pools */ + bitmask = 0x0000000F; + break; + default: /* 64 VFs / pools */ + num_qs = 2; + bitmask = 0x00000003; + break; + } + start_q = vf * num_qs; + + /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */ + idx = start_q / 32; + reg = 0; + reg |= (bitmask << (start_q % 32)); + IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg); + IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg); +} + +/** + * ixgbe_mdd_event_X550 + * @hw: pointer to hardware structure + * @vf_bitmap: vf bitmap of malicious vfs + * + * Handle malicious driver detection event. + **/ +void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap) +{ + u32 wqbr; + u32 i, j, reg, q, shift, vf, idx; + + DEBUGFUNC("ixgbe_mdd_event_X550"); + + /* figure out pool size for mapping to vf's */ + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + switch (reg & IXGBE_MRQC_MRQE_MASK) { + case IXGBE_MRQC_VMDQRT8TCEN: + shift = 3; /* 16 VFs / pools */ + break; + case IXGBE_MRQC_VMDQRSS32EN: + case IXGBE_MRQC_VMDQRT4TCEN: + shift = 2; /* 32 VFs / pools */ + break; + default: + shift = 1; /* 64 VFs / pools */ + break; + } + + /* Read WQBR_TX and WQBR_RX and check for malicious queues */ + for (i = 0; i < 4; i++) { + wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)); + wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i)); + + if (!wqbr) + continue; + + /* Get malicious queue */ + for (j = 0; j < 32 && wqbr; j++) { + + if (!(wqbr & (1 << j))) + continue; + + /* Get queue from bitmask */ + q = j + (i * 32); + + /* Map queue to vf */ + vf = (q >> shift); + + /* Set vf bit in vf_bitmap */ + idx = vf / 32; + vf_bitmap[idx] |= (1 << (vf % 32)); + wqbr &= ~(1 << j); + } + } +} + +/** + * ixgbe_get_media_type_X550em - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + */ +enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + DEBUGFUNC("ixgbe_get_media_type_X550em"); + + /* Detect if there is a copper PHY attached. */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_X550EM_X_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_X550EM_A_QSFP: + case IXGBE_DEV_ID_X550EM_A_QSFP_N: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + case IXGBE_DEV_ID_X550EM_A_10G_T: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } + return media_type; +} + +/** + * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported + * @hw: pointer to hardware structure + * @linear: true if SFP module is linear + */ +STATIC s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) +{ + DEBUGFUNC("ixgbe_supported_sfp_modules_X550em"); + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_not_present: + return IXGBE_ERR_SFP_NOT_PRESENT; + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + *linear = true; + break; + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_da_act_lmt_core0: + case ixgbe_sfp_type_da_act_lmt_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + case ixgbe_sfp_type_1g_lx_core0: + case ixgbe_sfp_type_1g_lx_core1: + *linear = false; + break; + case ixgbe_sfp_type_unknown: + case ixgbe_sfp_type_1g_cu_core0: + case ixgbe_sfp_type_1g_cu_core1: + default: + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_identify_sfp_module_X550em - Identifies SFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ +s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw) +{ + s32 status; + bool linear; + + DEBUGFUNC("ixgbe_identify_sfp_module_X550em"); + + status = ixgbe_identify_module_generic(hw); + + if (status != IXGBE_SUCCESS) + return status; + + /* Check if SFP module is supported */ + status = ixgbe_supported_sfp_modules_X550em(hw, &linear); + + return status; +} + +/** + * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops + * @hw: pointer to hardware structure + */ +s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) +{ + s32 status; + bool linear; + + DEBUGFUNC("ixgbe_setup_sfp_modules_X550em"); + + /* Check if SFP module is supported */ + status = ixgbe_supported_sfp_modules_X550em(hw, &linear); + + if (status != IXGBE_SUCCESS) + return status; + + ixgbe_init_mac_link_ops_X550em(hw); + hw->phy.ops.reset = NULL; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_mac_link_ops_X550em - init mac link function pointers + * @hw: pointer to hardware structure + */ +void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + DEBUGFUNC("ixgbe_init_mac_link_ops_X550em"); + + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + /* CS4227 does not support autoneg, so disable the laser control + * functions for SFP+ fiber + */ + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em; + mac->ops.set_rate_select_speed = + ixgbe_set_soft_rate_select_speed; + break; + case ixgbe_media_type_copper: + mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + mac->ops.check_link = ixgbe_check_link_t_X550em; + break; + default: + break; + } +} + +/** + * ixgbe_get_link_capabilities_x550em - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + */ +s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + DEBUGFUNC("ixgbe_get_link_capabilities_X550em"); + + /* SFP */ + if (hw->phy.media_type == ixgbe_media_type_fiber) { + + /* CS4227 SFP must not enable auto-negotiation */ + *autoneg = false; + + /* Check if 1G SFP module. */ + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 + || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + return IXGBE_SUCCESS; + } + + /* Link capabilities are based on SFP */ + if (hw->phy.multispeed_fiber) + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + } else { + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause + * @hw: pointer to hardware structure + * @lsc: pointer to boolean flag which indicates whether external Base T + * PHY interrupt is lsc + * + * Determime if external Base T PHY interrupt cause is high temperature + * failure alarm or link status change. + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + */ +STATIC s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) +{ + u32 status; + u16 reg; + + *lsc = false; + + /* Vendor alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS || + !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) + return status; + + /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS || + !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT))) + return status; + + /* Global alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* If high temperature failure, then return over temp error and exit */ + if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) { + /* power down the PHY in case the PHY FW didn't already */ + ixgbe_set_copper_phy_power(hw, false); + return IXGBE_ERR_OVERTEMP; + } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { + /* device fault alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* if device fault was due to high temp alarm handle and exit */ + if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) { + /* power down the PHY in case the PHY FW didn't */ + ixgbe_set_copper_phy_power(hw, false); + return IXGBE_ERR_OVERTEMP; + } + } + + /* Vendor alarm 2 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status != IXGBE_SUCCESS || + !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) + return status; + + /* link connect/disconnect event occurred */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* Indicate LSC */ + if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC) + *lsc = true; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts + * @hw: pointer to hardware structure + * + * Enable link status change and temperature failure alarm for the external + * Base T PHY + * + * Returns PHY access status + */ +STATIC s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 reg; + bool lsc; + + /* Clear interrupt flags */ + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + + /* Enable link status change alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg); + + if (status != IXGBE_SUCCESS) + return status; + + /* Enable high temperature failure and global fault alarms */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN | + IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + if (status != IXGBE_SUCCESS) + return status; + + /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + if (status != IXGBE_SUCCESS) + return status; + + /* Enable chip-wide vendor alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + return status; +} + +/** + * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed. + * @hw: pointer to hardware structure + * @speed: link speed + * + * Configures the integrated KR PHY. + **/ +STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + s32 status; + u32 reg_val; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); + + /* Advertise 10G support. */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; + + /* Advertise 1G support. */ + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; + + /* Restart auto-negotiation. */ + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** + * ixgbe_init_phy_ops_X550em - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + */ +s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_phy_ops_X550em"); + + hw->mac.ops.set_lan_id(hw); + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { + phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + ixgbe_setup_mux_ctl(hw); + + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode. + */ + phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em; + } + + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) + return ret_val; + + /* Setup function pointers based on detected hardware */ + ixgbe_init_mac_link_ops_X550em(hw); + if (phy->sfp_type != ixgbe_sfp_type_unknown) + phy->ops.reset = NULL; + + /* Set functions pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_x550em_kx4: + phy->ops.setup_link = NULL; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_kr: + phy->ops.setup_link = ixgbe_setup_kr_x550em; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_ext_t: + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode + */ + phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + + /* If internal link mode is XFI, then setup iXFI internal link, + * else setup KR now. + */ + phy->ops.setup_internal_link = + ixgbe_setup_internal_phy_t_x550em; + + /* setup SW LPLU only for first revision of X550EM_x */ + if ((hw->mac.type == ixgbe_mac_X550EM_x) && + !(IXGBE_FUSES0_REV_MASK & + IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) + phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; + + phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; + phy->ops.reset = ixgbe_reset_phy_t_X550em; + break; + default: + break; + } + return ret_val; +} + +/** + * ixgbe_set_mdio_speed - Set MDIO clock speed + * @hw: pointer to hardware structure + */ +static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) +{ + u32 hlreg0; + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_QSFP: + /* Config MDIO clock speed before the first MDIO PHY access */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 &= ~IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + break; + default: + break; + } +} + +/** + * ixgbe_reset_hw_X550em - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + */ +s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status; + u32 ctrl = 0; + u32 i; + bool link_up = false; + + DEBUGFUNC("ixgbe_reset_hw_X550em"); + + /* Call adapter stop to disable Tx/Rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != IXGBE_SUCCESS) + return status; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + ixgbe_set_mdio_speed(hw); + + /* PHY ops must be identified and initialized prior to reset */ + status = hw->phy.ops.init(hw); + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return status; + + /* start the external PHY */ + if (hw->phy.type == ixgbe_phy_x550em_ext_t) { + status = ixgbe_init_ext_t_x550em(hw); + if (status) + return status; + } + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = false; + } + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return status; + + /* Reset PHY */ + if (!hw->phy.reset_disable && hw->phy.ops.reset) + hw->phy.ops.reset(hw); + +mac_reset_top: + /* Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + ctrl = IXGBE_CTRL_LNK_RST; + if (!hw->force_full_reset) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) + ctrl = IXGBE_CTRL_RST; + } + + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear meaning reset is complete */ + for (i = 0; i < 10; i++) { + usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + DEBUGOUT("Reset polling failed to complete.\n"); + } + + msec_delay(50); + + /* Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to + * allow time for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + ixgbe_set_mdio_speed(hw); + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) + ixgbe_setup_mux_ctl(hw); + + return status; +} + +/** + * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. + * @hw: pointer to hardware structure + */ +s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 reg; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* If PHY FW reset completed bit is set then this is the first + * SW instance after a power on so the PHY FW must be un-stalled. + */ + if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg &= ~IXGBE_MDIO_POWER_UP_STALL; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + if (status != IXGBE_SUCCESS) + return status; + } + + return status; +} + +/** + * ixgbe_setup_kr_x550em - Configure the KR PHY. + * @hw: pointer to hardware structure + * + * Configures the integrated KR PHY. + **/ +s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) +{ + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); +} + +/** + * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP + * @hw: pointer to hardware structure + * + * Configure the external PHY and the integrated KR PHY for SFP support. + **/ +s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 ret_val; + u16 reg_slice, reg_val; + bool setup_linear = false; + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + /* Check if SFP module is supported and linear */ + ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * there is no reason to configure CS4227 and SFP not present error is + * not excepted in the setup MAC link flow. + */ + if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + return IXGBE_SUCCESS; + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + /* Configure CS4227 LINE side to 10G SR. */ + reg_slice = IXGBE_CS4227_LINE_SPARE22_MSB + + (hw->bus.lan_id << 12); + reg_val = IXGBE_CS4227_SPEED_10G; + ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, + reg_val); + + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + + (hw->bus.lan_id << 12); + reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, + reg_val); + + /* Configure CS4227 for HOST connection rate then type. */ + reg_slice = IXGBE_CS4227_HOST_SPARE22_MSB + + (hw->bus.lan_id << 12); + reg_val = (speed & IXGBE_LINK_SPEED_10GB_FULL) ? + IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G; + ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, + reg_val); + + reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB + + (hw->bus.lan_id << 12); + if (setup_linear) + reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, + reg_val); + + /* Setup XFI internal link. */ + ret_val = ixgbe_setup_ixfi_x550em(hw, &speed); + } else { + /* Configure internal PHY for KR/KX. */ + ixgbe_setup_kr_speed_x550em(hw, speed); + + /* Configure CS4227 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + + (hw->bus.lan_id << 12); + if (setup_linear) + reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, + reg_val); + } + return ret_val; +} + +/** + * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration + * @hw: pointer to hardware structure + * + * iXfI configuration needed for ixgbe_mac_X550EM_x devices. + **/ +STATIC s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + /* Disable training protocol FSM. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Disable Flex from training TXFFE. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Enable override for coefficients. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + return status; +} + +/** + * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. + * @hw: pointer to hardware structure + * @speed: the link speed to force + * + * Configures the integrated KR PHY to use iXFI mode. Used to connect an + * internal and external PHY at a specific speed, without autonegotiation. + **/ +STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +{ + s32 status; + u32 reg_val; + + /* Disable AN and force speed to 10G Serial. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + + /* Select forced link speed for internal PHY. */ + switch (*speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + break; + default: + /* Other link speeds are not supported by internal KR PHY. */ + return IXGBE_ERR_LINK_SETUP; + } + + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Additional configuration needed for x550em_x */ + if (hw->mac.type == ixgbe_mac_X550EM_x) { + status = ixgbe_setup_ixfi_x550em_x(hw); + if (status != IXGBE_SUCCESS) + return status; + } + + /* Toggle port SW reset by AN reset. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** + * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status + * @hw: address of hardware structure + * @link_up: address of boolean to indicate link status + * + * Returns error code if unable to get link status. + */ +STATIC s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) +{ + u32 ret; + u16 autoneg_status; + + *link_up = false; + + /* read this twice back to back to indicate current status */ + ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (ret != IXGBE_SUCCESS) + return ret; + + ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (ret != IXGBE_SUCCESS) + return ret; + + *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link + * @hw: point to hardware structure + * + * Configures the link between the integrated KR PHY and the external X557 PHY + * The driver will call this function when it gets a link status change + * interrupt from the X557 PHY. This function configures the link speed + * between the PHYs to match the link speed of the BASE-T link. + * + * A return of a non-zero value indicates an error, and the base driver should + * not report link up. + */ +s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) +{ + ixgbe_link_speed force_speed; + bool link_up; + u32 status; + u16 speed; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; + + if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + /* If link is down, there is no setup necessary so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return status; + + if (!link_up) + return IXGBE_SUCCESS; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + if (status != IXGBE_SUCCESS) + return status; + + /* If link is still down - no setup is required so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return status; + if (!link_up) + return IXGBE_SUCCESS; + + /* clear everything but the speed and duplex bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; + + switch (speed) { + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + default: + /* Internal PHY does not support anything else */ + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + return ixgbe_setup_ixfi_x550em(hw, &force_speed); + } else { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + return ixgbe_setup_kr_speed_x550em(hw, speed); + } +} + +/** + * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback. + * @hw: pointer to hardware structure + * + * Configures the integrated KR PHY to use internal loopback mode. + **/ +s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + /* Disable AN and force speed to 10G Serial. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Set near-end loopback clocks. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B; + reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Set loopback enable. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Training bypass. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** + * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status; + struct ixgbe_hic_read_shadow_ram buffer; + + DEBUGFUNC("ixgbe_read_ee_hostif_data_X550"); + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = IXGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + + if (status) + return status; + + *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + FW_NVM_DATA_OFFSET); + + return 0; +} + +/** + * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_read_ee_hostif_X550"); + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_read_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + struct ixgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 i; + + DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550"); + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status) { + DEBUGOUT("EEPROM read buffer - semaphore failed\n"); + return status; + } + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2); + buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, + false); + + if (status) { + DEBUGOUT("Host interface command failed\n"); + goto out; + } + + for (i = 0; i < words_to_read; i++) { + u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + + 2 * i; + u32 value = IXGBE_READ_REG(hw, reg); + + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status; + struct ixgbe_hic_write_shadow_ram buffer; + + DEBUGFUNC("ixgbe_write_ee_hostif_data_X550"); + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); + buffer.data = data; + buffer.address = IXGBE_CPU_TO_BE32(offset * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/** + * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_ee_hostif_X550"); + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + DEBUGOUT("write ee hostif failed to get semaphore"); + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + u32 i = 0; + + DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550"); + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status != IXGBE_SUCCESS) { + DEBUGOUT("EEPROM write buffer - semaphore failed\n"); + goto out; + } + + for (i = 0; i < words; i++) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, + data[i]); + + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Eeprom buffered write failed\n"); + break; + } + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +out: + + return status; +} + +/** + * ixgbe_checksum_ptr_x550 - Checksum one pointer region + * @hw: pointer to hardware structure + * @ptr: pointer offset in eeprom + * @size: size of section pointed by ptr, if 0 first word will be used as size + * @csum: address of checksum to update + * + * Returns error status for any failure + */ +STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, + u16 size, u16 *csum, u16 *buffer, + u32 buffer_size) +{ + u16 buf[256]; + s32 status; + u16 length, bufsz, i, start; + u16 *local_buffer; + + bufsz = sizeof(buf) / sizeof(buf[0]); + + /* Read a chunk at the pointer location */ + if (!buffer) { + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + local_buffer = buf; + } else { + if (buffer_size < ptr) + return IXGBE_ERR_PARAM; + local_buffer = &buffer[ptr]; + } + + if (size) { + start = 0; + length = size; + } else { + start = 1; + length = local_buffer[0]; + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (ptr + length) >= hw->eeprom.word_size) + return IXGBE_SUCCESS; + } + + if (buffer && ((u32)start + (u32)length > buffer_size)) + return IXGBE_ERR_PARAM; + + for (i = start; length; i++, length--) { + if (i == bufsz && !buffer) { + ptr += bufsz; + i = 0; + if (length < bufsz) + bufsz = length; + + /* Read a chunk at the pointer location */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, + bufsz, buf); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + } + *csum += local_buffer[i]; + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_calc_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @buffer: pointer to buffer containing calculated checksum + * @buffer_size: size of buffer + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) +{ + u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 pointer, i, size; + + DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550"); + + hw->eeprom.ops.init_params(hw); + + if (!buffer) { + /* Read pointer area */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, + IXGBE_EEPROM_LAST_WORD + 1, + eeprom_ptrs); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < IXGBE_EEPROM_LAST_WORD) + return IXGBE_ERR_PARAM; + local_buffer = buffer; + } + + /* + * For X550 hardware include 0x0-0x41 in the checksum, skip the + * checksum word itself + */ + for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) + if (i != IXGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + + /* + * Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + pointer = local_buffer[i]; + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + switch (i) { + case IXGBE_PCIE_GENERAL_PTR: + size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; + break; + case IXGBE_PCIE_CONFIG0_PTR: + case IXGBE_PCIE_CONFIG1_PTR: + size = IXGBE_PCIE_CONFIG_SIZE; + break; + default: + size = 0; + break; + } + + status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, + buffer, buffer_size); + if (status) + return status; + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** + * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + return ixgbe_calc_checksum_X550(hw, NULL, 0); +} + +/** + * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = IXGBE_ERR_EEPROM_CHECKSUM; + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** + * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum = 0; + + DEBUGFUNC("ixgbe_update_eeprom_checksum_X550"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = ixgbe_calc_eeprom_checksum_X550(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + if (status) + return status; + + status = ixgbe_update_flash_X550(hw); + + return status; +} + +/** + * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. + **/ +s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + union ixgbe_hic_hdr2 buffer; + + DEBUGFUNC("ixgbe_update_flash_X550"); + + buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; + buffer.req.buf_lenh = 0; + buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; + buffer.req.checksum = FW_DEFAULT_CHECKSUM; + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + + return status; +} + +/** + * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw) +{ + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em"); + + hw->phy.ops.identify(hw); + + switch (hw->phy.type) { + case ixgbe_phy_x550em_kr: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR | + IXGBE_PHYSICAL_LAYER_1000BASE_KX; + break; + case ixgbe_phy_x550em_kx4: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | + IXGBE_PHYSICAL_LAYER_1000BASE_KX; + break; + case ixgbe_phy_x550em_ext_t: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + break; + default: + break; + } + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) + physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); + + return physical_layer; +} + +/** + * ixgbe_get_bus_info_x550em - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets bus link width and speed to unknown because X550em is + * not a PCI device. + **/ +s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) +{ + + DEBUGFUNC("ixgbe_get_bus_info_x550em"); + + hw->bus.width = ixgbe_bus_width_unknown; + hw->bus.speed = ixgbe_bus_speed_unknown; + + hw->mac.ops.set_lan_id(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_disable_rx_x550 - Disable RX unit + * + * Enables the Rx DMA unit for x550 + **/ +void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) +{ + u32 rxctrl, pfdtxgswc; + s32 status; + struct ixgbe_hic_disable_rxen fw_cmd; + + DEBUGFUNC("ixgbe_enable_rx_dma_x550"); + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + + fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; + fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; + fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + fw_cmd.port_number = (u8)hw->bus.lan_id; + + status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(struct ixgbe_hic_disable_rxen), + IXGBE_HI_COMMAND_TIMEOUT, true); + + /* If we fail - disable RX using register write */ + if (status) { + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } + } + } +} + +/** + * ixgbe_enter_lplu_x550em - Transition to low power states + * @hw: pointer to hardware structure + * + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the + * X557 PHY immediately prior to entering LPLU. + **/ +s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) +{ + u16 an_10g_cntl_reg, autoneg_reg, speed; + s32 status; + ixgbe_link_speed lcd_speed; + u32 save_autoneg; + bool link_up; + + /* SW LPLU not required on later HW revisions. */ + if ((hw->mac.type == ixgbe_mac_X550EM_x) && + (IXGBE_FUSES0_REV_MASK & + IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) + return IXGBE_SUCCESS; + + /* If blocked by MNG FW, then don't restart AN */ + if (ixgbe_check_reset_blocked(hw)) + return IXGBE_SUCCESS; + + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return status; + + status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3); + + if (status != IXGBE_SUCCESS) + return status; + + /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability + * disabled, then force link down by entering low power mode. + */ + if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) || + !(hw->wol_enabled || ixgbe_mng_present(hw))) + return ixgbe_set_copper_phy_power(hw, FALSE); + + /* Determine LCD */ + status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed); + + if (status != IXGBE_SUCCESS) + return status; + + /* If no valid LCD link speed, then force link down and exit. */ + if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN) + return ixgbe_set_copper_phy_power(hw, FALSE); + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + + if (status != IXGBE_SUCCESS) + return status; + + /* If no link now, speed is invalid so take link down */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return ixgbe_set_copper_phy_power(hw, false); + + /* clear everything but the speed bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK; + + /* If current speed is already LCD, then exit. */ + if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) && + (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) || + ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) && + (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL))) + return status; + + /* Clear AN completed indication */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + if (status != IXGBE_SUCCESS) + return status; + + status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &an_10g_cntl_reg); + + if (status != IXGBE_SUCCESS) + return status; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + if (status != IXGBE_SUCCESS) + return status; + + save_autoneg = hw->phy.autoneg_advertised; + + /* Setup link at least common link speed */ + status = hw->mac.ops.setup_link(hw, lcd_speed, false); + + /* restore autoneg from before setting lplu speed */ + hw->phy.autoneg_advertised = save_autoneg; + + return status; +} + +/** + * ixgbe_get_lcd_x550em - Determine lowest common denominator + * @hw: pointer to hardware structure + * @lcd_speed: pointer to lowest common link speed + * + * Determine lowest common link speed with link partner. + **/ +s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed) +{ + u16 an_lp_status; + s32 status; + u16 word = hw->eeprom.ctrl_word_3; + + *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; + + status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &an_lp_status); + + if (status != IXGBE_SUCCESS) + return status; + + /* If link partner advertised 1G, return 1G */ + if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { + *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; + return status; + } + + /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ + if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || + (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) + return status; + + /* Link partner not capable of lower speeds, return 10G */ + *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; + return status; +} + +/** + * ixgbe_setup_fc_X550em - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + u32 pause, asm_dir, reg_val; + + DEBUGFUNC("ixgbe_setup_fc_X550em"); + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* Determine PAUSE and ASM_DIR bits. */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + pause = 0; + asm_dir = 0; + break; + case ixgbe_fc_tx_pause: + pause = 0; + asm_dir = 1; + break; + case ixgbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ixgbe_fc_full: + pause = 1; + asm_dir = 1; + break; + default: + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + } + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) { + ret_val = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (ret_val != IXGBE_SUCCESS) + goto out; + reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + if (pause) + reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + if (asm_dir) + reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + ret_val = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + /* This device does not fully support AN. */ + hw->fc.disable_fc_autoneg = true; + } + +out: + return ret_val; +} + +/** + * ixgbe_set_mux - Set mux for port 1 access with CS4227 + * @hw: pointer to hardware structure + * @state: set mux if 1, clear if 0 + */ +STATIC void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) +{ + u32 esdp; + + if (!hw->bus.lan_id) + return; + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (state) + esdp |= IXGBE_ESDP_SDP1; + else + esdp &= ~IXGBE_ESDP_SDP1; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and sets the I2C MUX + **/ +s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) +{ + s32 status; + + DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em"); + + status = ixgbe_acquire_swfw_sync_X540(hw, mask); + if (status) + return status; + + if (mask & IXGBE_GSSR_I2C_MASK) + ixgbe_set_mux(hw, 1); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore and sets the I2C MUX + **/ +void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) +{ + DEBUGFUNC("ixgbe_release_swfw_sync_X550em"); + + if (mask & IXGBE_GSSR_I2C_MASK) + ixgbe_set_mux(hw, 0); + + ixgbe_release_swfw_sync_X540(hw, mask); +} + +/** + * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and get the shared phy token as needed + */ +static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) +{ + u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; + int retries = FW_PHY_TOKEN_RETRIES; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a"); + + while (--retries) { + if (hmask) + status = ixgbe_acquire_swfw_sync_X540(hw, hmask); + if (status) + break; + if (!(mask & IXGBE_GSSR_TOKEN_SM)) + break; + status = ixgbe_get_phy_token(hw); + if (status != IXGBE_ERR_TOKEN_RETRY) + break; + if (hmask) + ixgbe_release_swfw_sync_X540(hw, hmask); + msec_delay(FW_PHY_TOKEN_DELAY); + } + + return status; +} + +/** + * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore and puts the shared phy token as needed + */ +static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) +{ + u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; + + DEBUGFUNC("ixgbe_release_swfw_sync_X550a"); + + if (mask & IXGBE_GSSR_TOKEN_SM) + ixgbe_put_phy_token(hw); + + if (hmask) + ixgbe_release_swfw_sync_X540(hw, hmask); +} + +/** + * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt + * @hw: pointer to hardware structure + * + * Handle external Base T PHY interrupt. If high temperature + * failure alarm then return error, else if link status change + * then setup internal/external PHY link + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + */ +s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) +{ + bool lsc; + u32 status; + + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + + if (status != IXGBE_SUCCESS) + return status; + + if (lsc) + return ixgbe_setup_internal_phy(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Setup internal/external PHY link speed based on link speed, then set + * external PHY auto advertised link speed. + * + * Returns error status for any failure + **/ +s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status; + ixgbe_link_speed force_speed; + + DEBUGFUNC("ixgbe_setup_mac_link_t_X550em"); + + /* Setup internal/external PHY link speed to iXFI (10G), unless + * only 1G is auto advertised then setup KX link. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + else + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If internal link mode is XFI, then setup XFI internal link. */ + if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + status = ixgbe_setup_ixfi_x550em(hw, &force_speed); + + if (status != IXGBE_SUCCESS) + return status; + } + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); +} + +/** + * ixgbe_check_link_t_X550em - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Check that both the MAC and X557 external PHY have link. + **/ +s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 status; + u16 autoneg_status; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; + + status = ixgbe_check_mac_link_generic(hw, speed, link_up, + link_up_wait_to_complete); + + /* If check link fails or MAC link is not up, then return */ + if (status != IXGBE_SUCCESS || !(*link_up)) + return status; + + /* MAC link is up, so check external PHY link. + * Read this twice back to back to indicate current status. + */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + + if (status != IXGBE_SUCCESS) + return status; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + + if (status != IXGBE_SUCCESS) + return status; + + /* If external PHY link is not up, then indicate link not up */ + if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) + *link_up = false; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) +{ + s32 status; + + status = ixgbe_reset_phy_generic(hw); + + if (status != IXGBE_SUCCESS) + return status; + + /* Configure Link Status Alarm and Temperature Threshold interrupts */ + return ixgbe_enable_lasi_ext_t_x550em(hw); +} + +/** + * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn on + **/ +s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; + + DEBUGFUNC("ixgbe_led_on_t_X550em"); + + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); + phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; + ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn off + **/ +s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; + + DEBUGFUNC("ixgbe_led_off_t_X550em"); + + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); + phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; + ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); + + return IXGBE_SUCCESS; +} + diff --git a/drivers/net/ixgbe/base/ixgbe_x550.h b/drivers/net/ixgbe/base/ixgbe_x550.h new file mode 100644 index 00000000..a8c0a678 --- /dev/null +++ b/drivers/net/ixgbe/base/ixgbe_x550.h @@ -0,0 +1,113 @@ +/******************************************************************************* + +Copyright (c) 2001-2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _IXGBE_X550_H_ +#define _IXGBE_X550_H_ + +#include "ixgbe_type.h" + +s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw); +s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw); +s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw); + +s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw); +s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw); +s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw); +s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw); +s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size); +s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val); +s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw); +s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, + u16 data); +s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, +u16 *data); +s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 *data); +s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 data); +s32 ixgbe_set_eee_X550(struct ixgbe_hw *hw, bool enable_eee); +s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee); +void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, + unsigned int pool); +void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf); +s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data); +s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data); +s32 ixgbe_get_phy_token(struct ixgbe_hw *); +s32 ixgbe_put_phy_token(struct ixgbe_hw *); +s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data); +s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data); +void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw); +void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw); +void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap); +void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf); +enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw); +s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *autoneg); +void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw); +s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw); +u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw); +void ixgbe_disable_rx_x550(struct ixgbe_hw *hw); +s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed); +s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw); +s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw); +s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx); +s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx); +#endif /* _IXGBE_X550_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_82599_bypass.c b/drivers/net/ixgbe/ixgbe_82599_bypass.c new file mode 100644 index 00000000..21c42eac --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_82599_bypass.c @@ -0,0 +1,314 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "base/ixgbe_type.h" +#include "base/ixgbe_82599.h" +#include "base/ixgbe_api.h" +#include "base/ixgbe_common.h" +#include "base/ixgbe_phy.h" +#include "ixgbe_bypass_defines.h" +#include "ixgbe_bypass.h" + +/** + * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * We set the module speed differently for fixed fiber. For other + * multi-speed devices we don't have an error value so here if we + * detect an error we just log it and exit. + */ +static void +ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) +{ + s32 status; + u8 rs, eeprom_data; + + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + /* one bit mask same as setting on */ + rs = IXGBE_SFF_SOFT_RS_SELECT_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + rs = IXGBE_SFF_SOFT_RS_SELECT_1G; + break; + default: + PMD_DRV_LOG(ERR, "Invalid fixed module speed"); + return; + } + + /* Set RS0 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + PMD_DRV_LOG(ERR, "Failed to read Rx Rate Select RS0"); + goto out; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + PMD_DRV_LOG(ERR, "Failed to write Rx Rate Select RS0"); + goto out; + } + + /* Set RS1 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + PMD_DRV_LOG(ERR, "Failed to read Rx Rate Select RS1"); + goto out; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + PMD_DRV_LOG(ERR, "Failed to write Rx Rate Select RS1"); + goto out; + } +out: + return; +} + +/** + * ixgbe_setup_mac_link_multispeed_fixed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 +ixgbe_setup_mac_link_multispeed_fixed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status = IXGBE_SUCCESS; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; + u32 speedcnt = 0; + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + u32 i = 0; + bool link_up = false; + bool negotiation; + + PMD_INIT_FUNC_TRACE(); + + /* Mask off requested but non-supported speeds */ + status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation); + if (status != IXGBE_SUCCESS) + return status; + + speed &= link_speed; + + /* + * Try each speed one by one, highest priority first. We do this in + * software because 10gb fiber doesn't support speed autonegotiation. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) + return status; + + if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) + goto out; + /* Set the module link speed */ + ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_10GB_FULL); + + /* Set the module link speed */ + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + + /* Allow module to change analog characteristics (1G->10G) */ + msec_delay(40); + + status = ixgbe_setup_mac_link_82599(hw, + IXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + return status; + + /* Flap the tx laser if it has not already been done */ + ixgbe_flap_tx_laser(hw); + + /* + * Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. 82599 uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, + &link_up, false); + if (status != IXGBE_SUCCESS) + return status; + + if (link_up) + goto out; + } + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + speedcnt++; + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) + return status; + + if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) + goto out; + + /* Set the module link speed */ + ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_1GB_FULL); + + /* Allow module to change analog characteristics (10G->1G) */ + msec_delay(40); + + status = ixgbe_setup_mac_link_82599(hw, + IXGBE_LINK_SPEED_1GB_FULL, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + return status; + + /* Flap the tx laser if it has not already been done */ + ixgbe_flap_tx_laser(hw); + + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, false); + if (status != IXGBE_SUCCESS) + return status; + + if (link_up) + goto out; + } + + /* + * We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = ixgbe_setup_mac_link_multispeed_fixed_fiber(hw, + highest_link_speed, autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + return status; +} + +static enum ixgbe_media_type +ixgbe_bypass_get_media_type(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + PMD_INIT_FUNC_TRACE(); + + if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + media_type = ixgbe_media_type_fiber; + } else { + media_type = ixgbe_get_media_type_82599(hw); + } + return media_type; +} + +/* + * Wrapper around shared code (base driver) to support BYPASS nic. + */ +s32 +ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw) +{ + s32 ret_val; + + if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + hw->mac.type = ixgbe_mac_82599EB; + } + + ret_val = ixgbe_init_shared_code(hw); + if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type; + ixgbe_init_mac_link_ops_82599(hw); + } + + return ret_val; +} + +s32 +ixgbe_bypass_init_hw(struct ixgbe_hw *hw) +{ + int rc; + + if ((rc = ixgbe_init_hw(hw)) == 0 && + hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + + hw->mac.ops.setup_link = + &ixgbe_setup_mac_link_multispeed_fixed_fiber; + + hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type; + + hw->mac.ops.disable_tx_laser = NULL; + hw->mac.ops.enable_tx_laser = NULL; + hw->mac.ops.flap_tx_laser = NULL; + } + + return rc; +} diff --git a/drivers/net/ixgbe/ixgbe_bypass.c b/drivers/net/ixgbe/ixgbe_bypass.c new file mode 100644 index 00000000..73f608b9 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_bypass.c @@ -0,0 +1,414 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <time.h> +#include <rte_atomic.h> +#include <rte_ethdev.h> +#include "ixgbe_ethdev.h" +#include "ixgbe_bypass_api.h" + +#define BYPASS_STATUS_OFF_MASK 3 + +/* Macros to check for invlaid function pointers. */ +#define FUNC_PTR_OR_ERR_RET(func, retval) do { \ + if ((func) == NULL) { \ + PMD_DRV_LOG(ERR, "%s:%d function not supported", \ + __func__, __LINE__); \ + return retval; \ + } \ +} while (0) + +#define FUNC_PTR_OR_RET(func) do { \ + if ((func) == NULL) { \ + PMD_DRV_LOG(ERR, "%s:%d function not supported", \ + __func__, __LINE__); \ + return; \ + } \ +} while (0) + + +/** + * ixgbe_bypass_set_time - Set bypass FW time epoc. + * + * @hw: pointer to hardware structure + * + * This function with sync the FW date stamp with that of the + * system clock. + **/ +static void +ixgbe_bypass_set_time(struct ixgbe_adapter *adapter) +{ + u32 mask, value; + u32 sec; + struct ixgbe_hw *hw = &adapter->hw; + + sec = 0; + + /* + * Send the FW our current time and turn on time_valid and + * timer_reset bits. + */ + mask = BYPASS_CTL1_TIME_M | + BYPASS_CTL1_VALID_M | + BYPASS_CTL1_OFFTRST_M; + value = (sec & BYPASS_CTL1_TIME_M) | + BYPASS_CTL1_VALID | + BYPASS_CTL1_OFFTRST; + + FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set); + + /* Store FW reset time (in seconds from epoch). */ + adapter->bps.reset_tm = time(NULL); + + /* reset FW timer. */ + adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL1, mask, value); +} + +/** + * ixgbe_bypass_init - Make some environment changes for bypass + * + * @adapter: pointer to ixgbe_adapter structure for access to state bits + * + * This function collects all the modifications needed by the bypass + * driver. + **/ +void +ixgbe_bypass_init(struct rte_eth_dev *dev) +{ + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + + adapter = IXGBE_DEV_TO_ADPATER(dev); + hw = &adapter->hw; + + /* Only allow BYPASS ops on the first port */ + if (hw->device_id != IXGBE_DEV_ID_82599_BYPASS || + hw->bus.func != 0) { + PMD_DRV_LOG(ERR, "bypass function is not supported on that device"); + return; + } + + /* set bypass ops. */ + adapter->bps.ops.bypass_rw = &ixgbe_bypass_rw_generic; + adapter->bps.ops.bypass_valid_rd = &ixgbe_bypass_valid_rd_generic; + adapter->bps.ops.bypass_set = &ixgbe_bypass_set_generic; + adapter->bps.ops.bypass_rd_eep = &ixgbe_bypass_rd_eep_generic; + + /* set the time for logging. */ + ixgbe_bypass_set_time(adapter); + + /* Don't have the SDP to the laser */ + hw->mac.ops.disable_tx_laser = NULL; + hw->mac.ops.enable_tx_laser = NULL; + hw->mac.ops.flap_tx_laser = NULL; +} + +s32 +ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state) +{ + struct ixgbe_hw *hw; + s32 ret_val; + u32 cmd; + u32 by_ctl = 0; + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + + hw = &adapter->hw; + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP); + + cmd = BYPASS_PAGE_CTL0; + ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl); + + /* Assume bypass_rw didn't error out, if it did state will + * be ignored anyway. + */ + *state = (by_ctl >> BYPASS_STATUS_OFF_SHIFT) & BYPASS_STATUS_OFF_MASK; + + return ret_val; +} + + +s32 +ixgbe_bypass_state_store(struct rte_eth_dev *dev, u32 *new_state) +{ + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + struct ixgbe_hw *hw; + s32 ret_val; + + hw = &adapter->hw; + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP); + + /* Set the new state */ + ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0, + BYPASS_MODE_OFF_M, *new_state); + if (ret_val) + goto exit; + + /* Set AUTO back on so FW can receive events */ + ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0, + BYPASS_MODE_OFF_M, BYPASS_AUTO); + +exit: + return ret_val; + +} + +s32 +ixgbe_bypass_event_show(struct rte_eth_dev *dev, u32 event, + u32 *state) +{ + struct ixgbe_hw *hw; + s32 ret_val; + u32 shift; + u32 cmd; + u32 by_ctl = 0; + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + + hw = &adapter->hw; + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP); + + cmd = BYPASS_PAGE_CTL0; + ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl); + + /* Assume bypass_rw didn't error out, if it did event will + * be ignored anyway. + */ + switch (event) { + case BYPASS_EVENT_WDT_TO: + shift = BYPASS_WDTIMEOUT_SHIFT; + break; + case BYPASS_EVENT_MAIN_ON: + shift = BYPASS_MAIN_ON_SHIFT; + break; + case BYPASS_EVENT_MAIN_OFF: + shift = BYPASS_MAIN_OFF_SHIFT; + break; + case BYPASS_EVENT_AUX_ON: + shift = BYPASS_AUX_ON_SHIFT; + break; + case BYPASS_EVENT_AUX_OFF: + shift = BYPASS_AUX_OFF_SHIFT; + break; + default: + return EINVAL; + } + + *state = (by_ctl >> shift) & 0x3; + + return ret_val; +} + +s32 +ixgbe_bypass_event_store(struct rte_eth_dev *dev, u32 event, + u32 state) +{ + struct ixgbe_hw *hw; + u32 status; + u32 off; + s32 ret_val; + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + + hw = &adapter->hw; + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP); + + switch (event) { + case BYPASS_EVENT_WDT_TO: + off = BYPASS_WDTIMEOUT_M; + status = state << BYPASS_WDTIMEOUT_SHIFT; + break; + case BYPASS_EVENT_MAIN_ON: + off = BYPASS_MAIN_ON_M; + status = state << BYPASS_MAIN_ON_SHIFT; + break; + case BYPASS_EVENT_MAIN_OFF: + off = BYPASS_MAIN_OFF_M; + status = state << BYPASS_MAIN_OFF_SHIFT; + break; + case BYPASS_EVENT_AUX_ON: + off = BYPASS_AUX_ON_M; + status = state << BYPASS_AUX_ON_SHIFT; + break; + case BYPASS_EVENT_AUX_OFF: + off = BYPASS_AUX_OFF_M; + status = state << BYPASS_AUX_OFF_SHIFT; + break; + default: + return EINVAL; + } + + ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0, + off, status); + + return ret_val; +} + +s32 +ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout) +{ + struct ixgbe_hw *hw; + u32 status; + u32 mask; + s32 ret_val; + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + + hw = &adapter->hw; + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP); + + /* disable the timer with timeout of zero */ + if (timeout == RTE_BYPASS_TMT_OFF) { + status = 0x0; /* WDG enable off */ + mask = BYPASS_WDT_ENABLE_M; + } else { + /* set time out value */ + mask = BYPASS_WDT_VALUE_M; + + /* enable the timer */ + status = timeout << BYPASS_WDT_TIME_SHIFT; + status |= 0x1 << BYPASS_WDT_ENABLE_SHIFT; + mask |= BYPASS_WDT_ENABLE_M; + } + + ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0, + mask, status); + + return ret_val; +} + +s32 +ixgbe_bypass_ver_show(struct rte_eth_dev *dev, u32 *ver) +{ + struct ixgbe_hw *hw; + u32 cmd; + u32 status; + s32 ret_val; + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + + hw = &adapter->hw; + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP); + + cmd = BYPASS_PAGE_CTL2 | BYPASS_WE; + cmd |= (BYPASS_EEPROM_VER_ADD << BYPASS_CTL2_OFFSET_SHIFT) & + BYPASS_CTL2_OFFSET_M; + ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status); + if (ret_val) + goto exit; + + /* wait for the write to stick */ + msleep(100); + + /* Now read the results */ + cmd &= ~BYPASS_WE; + ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status); + if (ret_val) + goto exit; + + *ver = status & BYPASS_CTL2_DATA_M; /* only one byte of date */ + +exit: + return ret_val; +} + +s32 +ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout) +{ + struct ixgbe_hw *hw; + u32 by_ctl = 0; + u32 cmd; + u32 wdg; + s32 ret_val; + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + + hw = &adapter->hw; + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP); + + cmd = BYPASS_PAGE_CTL0; + ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl); + + wdg = by_ctl & BYPASS_WDT_ENABLE_M; + if (!wdg) + *wd_timeout = RTE_BYPASS_TMT_OFF; + else + *wd_timeout = (by_ctl >> BYPASS_WDT_TIME_SHIFT) & + BYPASS_WDT_MASK; + + return ret_val; +} + +s32 +ixgbe_bypass_wd_reset(struct rte_eth_dev *dev) +{ + u32 cmd; + u32 status; + u32 sec; + u32 count = 0; + s32 ret_val; + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); + + hw = &adapter->hw; + + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP); + FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_valid_rd, -ENOTSUP); + + /* Use the lower level bit-bang functions since we don't need + * to read the register first to get it's current state as we + * are setting every thing in this write. + */ + /* Set up WD pet */ + cmd = BYPASS_PAGE_CTL1 | BYPASS_WE | BYPASS_CTL1_WDT_PET; + + /* Resync the FW time while writing to CTL1 anyway */ + adapter->bps.reset_tm = time(NULL); + sec = 0; + + cmd |= (sec & BYPASS_CTL1_TIME_M) | BYPASS_CTL1_VALID; + + /* reset FW timer offset since we are resetting the clock */ + cmd |= BYPASS_CTL1_OFFTRST; + + ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status); + + /* Read until it matches what we wrote, or we time out */ + do { + if (count++ > 10) { + ret_val = IXGBE_BYPASS_FW_WRITE_FAILURE; + break; + } + + if (adapter->bps.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &status)) { + ret_val = IXGBE_ERR_INVALID_ARGUMENT; + break; + } + } while (!adapter->bps.ops.bypass_valid_rd(cmd, status)); + + return ret_val; +} diff --git a/drivers/net/ixgbe/ixgbe_bypass.h b/drivers/net/ixgbe/ixgbe_bypass.h new file mode 100644 index 00000000..fcd97743 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_bypass.h @@ -0,0 +1,68 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _IXGBE_BYPASS_H_ +#define _IXGBE_BYPASS_H_ + +#ifdef RTE_NIC_BYPASS + +struct ixgbe_bypass_mac_ops { + s32 (*bypass_rw) (struct ixgbe_hw *hw, u32 cmd, u32 *status); + bool (*bypass_valid_rd) (u32 in_reg, u32 out_reg); + s32 (*bypass_set) (struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action); + s32 (*bypass_rd_eep) (struct ixgbe_hw *hw, u32 addr, u8 *value); +}; + +struct ixgbe_bypass_info { + uint64_t reset_tm; + struct ixgbe_bypass_mac_ops ops; +}; + +struct rte_eth_dev; + +void ixgbe_bypass_init(struct rte_eth_dev *dev); +s32 ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state); +s32 ixgbe_bypass_state_store(struct rte_eth_dev *dev, u32 *new_state); +s32 ixgbe_bypass_event_show(struct rte_eth_dev *dev, u32 event, u32 *state); +s32 ixgbe_bypass_event_store(struct rte_eth_dev *dev, u32 event, u32 state); +s32 ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout); +s32 ixgbe_bypass_ver_show(struct rte_eth_dev *dev, u32 *ver); +s32 ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout); +s32 ixgbe_bypass_wd_reset(struct rte_eth_dev *dev); + +s32 ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw); +s32 ixgbe_bypass_init_hw(struct ixgbe_hw *hw); + +#endif /* RTE_NIC_BYPASS */ + +#endif /* _IXGBE_BYPASS_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_bypass_api.h b/drivers/net/ixgbe/ixgbe_bypass_api.h new file mode 100644 index 00000000..aec8f1ec --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_bypass_api.h @@ -0,0 +1,300 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _IXGBE_BYPASS_API_H_ +#define _IXGBE_BYPASS_API_H_ + +#ifdef RTE_NIC_BYPASS + +#include "ixgbe_bypass_defines.h" +/** + * ixgbe_bypass_rw_generic - Bit bang data into by_pass FW + * + * @hw: pointer to hardware structure + * @cmd: Command we send to the FW + * @status: The reply from the FW + * + * Bit-bangs the cmd to the by_pass FW status points to what is returned. + **/ +#define IXGBE_BYPASS_BB_WAIT 1 +static s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status) +{ + int i; + u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo; + u32 esdp; + + if (!status) + return IXGBE_ERR_PARAM; + + *status = 0; + + /* SDP vary by MAC type */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + sck = IXGBE_ESDP_SDP7; + sdi = IXGBE_ESDP_SDP0; + sdo = IXGBE_ESDP_SDP6; + dir_sck = IXGBE_ESDP_SDP7_DIR; + dir_sdi = IXGBE_ESDP_SDP0_DIR; + dir_sdo = IXGBE_ESDP_SDP6_DIR; + break; + case ixgbe_mac_X540: + sck = IXGBE_ESDP_SDP2; + sdi = IXGBE_ESDP_SDP0; + sdo = IXGBE_ESDP_SDP1; + dir_sck = IXGBE_ESDP_SDP2_DIR; + dir_sdi = IXGBE_ESDP_SDP0_DIR; + dir_sdo = IXGBE_ESDP_SDP1_DIR; + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + sck = IXGBE_ESDP_SDP2; + sdi = IXGBE_ESDP_SDP0; + sdo = IXGBE_ESDP_SDP1; + dir_sck = IXGBE_ESDP_SDP2_DIR; + dir_sdi = IXGBE_ESDP_SDP0_DIR; + dir_sdo = IXGBE_ESDP_SDP1_DIR; + break; + default: + return IXGBE_ERR_DEVICE_NOT_SUPPORTED; + } + + /* Set SDP pins direction */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= dir_sck; /* SCK as output */ + esdp |= dir_sdi; /* SDI as output */ + esdp &= ~dir_sdo; /* SDO as input */ + esdp |= sck; + esdp |= sdi; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + // TODO: + msleep(IXGBE_BYPASS_BB_WAIT); + + /* Generate start condition */ + esdp &= ~sdi; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + msleep(IXGBE_BYPASS_BB_WAIT); + + esdp &= ~sck; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + msleep(IXGBE_BYPASS_BB_WAIT); + + /* Clock out the new control word and clock in the status */ + for (i = 0; i < 32; i++) { + if ((cmd >> (31 - i)) & 0x01) { + esdp |= sdi; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + } else { + esdp &= ~sdi; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + } + IXGBE_WRITE_FLUSH(hw); + msleep(IXGBE_BYPASS_BB_WAIT); + + esdp |= sck; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + msleep(IXGBE_BYPASS_BB_WAIT); + + esdp &= ~sck; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + msleep(IXGBE_BYPASS_BB_WAIT); + + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & sdo) + *status = (*status << 1) | 0x01; + else + *status = (*status << 1) | 0x00; + msleep(IXGBE_BYPASS_BB_WAIT); + } + + /* stop condition */ + esdp |= sck; + esdp &= ~sdi; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + msleep(IXGBE_BYPASS_BB_WAIT); + + esdp |= sdi; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + /* set the page bits to match the cmd that the status it belongs to */ + *status = (*status & 0x3fffffff) | (cmd & 0xc0000000); + + return 0; +} + +/** + * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang. + * + * If we send a write we can't be sure it took until we can read back + * that same register. It can be a problem as some of the feilds may + * for valid reasons change between the time wrote the register and + * we read it again to verify. So this function check everything we + * can check and then assumes it worked. + * + * @u32 in_reg - The register cmd for the bit-bang read. + * @u32 out_reg - The register returned from a bit-bang read. + **/ +static bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg) +{ + u32 mask; + + /* Page must match for all control pages */ + if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M)) + return false; + + switch (in_reg & BYPASS_PAGE_M) { + case BYPASS_PAGE_CTL0: + /* All the following can't change since the last write + * - All the event actions + * - The timeout value + */ + mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M | + BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M | + BYPASS_WDTIMEOUT_M | + BYPASS_WDT_VALUE_M; + if ((out_reg & mask) != (in_reg & mask)) + return false; + + /* 0x0 is never a valid value for bypass status */ + if (!(out_reg & BYPASS_STATUS_OFF_M)) + return false; + break; + case BYPASS_PAGE_CTL1: + /* All the following can't change since the last write + * - time valid bit + * - time we last sent + */ + mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M; + if ((out_reg & mask) != (in_reg & mask)) + return false; + break; + case BYPASS_PAGE_CTL2: + /* All we can check in this page is control number + * which is already done above. + */ + break; + } + + /* We are as sure as we can be return true */ + return true; +} + +/** + * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter. + * + * @hw: pointer to hardware structure + * @cmd: The control word we are setting. + * @event: The event we are setting in the FW. This also happens to + * be the mask for the event we are setting (handy) + * @action: The action we set the event to in the FW. This is in a + * bit field that happens to be what we want to put in + * the event spot (also handy) + **/ +static s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event, + u32 action) +{ + u32 by_ctl = 0; + u32 cmd, verify; + u32 count = 0; + + /* Get current values */ + cmd = ctrl; /* just reading only need control number */ + if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl)) + return IXGBE_ERR_INVALID_ARGUMENT; + + /* Set to new action */ + cmd = (by_ctl & ~event) | BYPASS_WE | action; + if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl)) + return IXGBE_ERR_INVALID_ARGUMENT; + + /* Page 0 force a FW eeprom write which is slow so verify */ + if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) { + verify = BYPASS_PAGE_CTL0; + do { + if (count++ > 5) + return IXGBE_BYPASS_FW_WRITE_FAILURE; + + if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl)) + return IXGBE_ERR_INVALID_ARGUMENT; + } while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl)); + } else { + /* We have give the FW time for the write to stick */ + msleep(100); + } + + return 0; +} + +/** + * ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom address. + * + * @hw: pointer to hardware structure + * @addr: The bypass eeprom address to read. + * @value: The 8b of data at the address above. + **/ +static s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value) +{ + u32 cmd; + u32 status; + + + /* send the request */ + cmd = BYPASS_PAGE_CTL2 | BYPASS_WE; + cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M; + if (ixgbe_bypass_rw_generic(hw, cmd, &status)) + return IXGBE_ERR_INVALID_ARGUMENT; + + /* We have give the FW time for the write to stick */ + msleep(100); + + /* now read the results */ + cmd &= ~BYPASS_WE; + if (ixgbe_bypass_rw_generic(hw, cmd, &status)) + return IXGBE_ERR_INVALID_ARGUMENT; + + *value = status & BYPASS_CTL2_DATA_M; + + return 0; +} + +#endif /* RTE_NIC_BYPASS */ + +#endif /* _IXGBE_BYPASS_API_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_bypass_defines.h b/drivers/net/ixgbe/ixgbe_bypass_defines.h new file mode 100644 index 00000000..22570acf --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_bypass_defines.h @@ -0,0 +1,160 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _IXGBE_BYPASS_DEFINES_H_ +#define _IXGBE_BYPASS_DEFINES_H_ + +#ifdef RTE_NIC_BYPASS + +#define msleep(x) rte_delay_us(x*1000) +#define usleep_range(min, max) rte_delay_us(min) + +#define BYPASS_PAGE_CTL0 0x00000000 +#define BYPASS_PAGE_CTL1 0x40000000 +#define BYPASS_PAGE_CTL2 0x80000000 +#define BYPASS_PAGE_M 0xc0000000 +#define BYPASS_WE 0x20000000 + +#define BYPASS_AUTO 0x0 +#define BYPASS_NOP 0x0 +#define BYPASS_NORM 0x1 +#define BYPASS_BYPASS 0x2 +#define BYPASS_ISOLATE 0x3 + +#define BYPASS_EVENT_MAIN_ON 0x1 +#define BYPASS_EVENT_AUX_ON 0x2 +#define BYPASS_EVENT_MAIN_OFF 0x3 +#define BYPASS_EVENT_AUX_OFF 0x4 +#define BYPASS_EVENT_WDT_TO 0x5 +#define BYPASS_EVENT_USR 0x6 + +#define BYPASS_MODE_OFF_M 0x00000003 +#define BYPASS_STATUS_OFF_M 0x0000000c +#define BYPASS_AUX_ON_M 0x00000030 +#define BYPASS_MAIN_ON_M 0x000000c0 +#define BYPASS_MAIN_OFF_M 0x00000300 +#define BYPASS_AUX_OFF_M 0x00000c00 +#define BYPASS_WDTIMEOUT_M 0x00003000 +#define BYPASS_WDT_ENABLE_M 0x00004000 +#define BYPASS_WDT_VALUE_M 0x00070000 + +#define BYPASS_MODE_OFF_SHIFT 0 +#define BYPASS_STATUS_OFF_SHIFT 2 +#define BYPASS_AUX_ON_SHIFT 4 +#define BYPASS_MAIN_ON_SHIFT 6 +#define BYPASS_MAIN_OFF_SHIFT 8 +#define BYPASS_AUX_OFF_SHIFT 10 +#define BYPASS_WDTIMEOUT_SHIFT 12 +#define BYPASS_WDT_ENABLE_SHIFT 14 +#define BYPASS_WDT_TIME_SHIFT 16 + +#define BYPASS_WDT_1 0x0 +#define BYPASS_WDT_1_5 0x1 +#define BYPASS_WDT_2 0x2 +#define BYPASS_WDT_3 0x3 +#define BYPASS_WDT_4 0x4 +#define BYPASS_WDT_8 0x5 +#define BYPASS_WDT_16 0x6 +#define BYPASS_WDT_32 0x7 +#define BYPASS_WDT_OFF 0xffff + +#define BYPASS_WDT_MASK 0x7 + +#define BYPASS_CTL1_TIME_M 0x01ffffff +#define BYPASS_CTL1_VALID_M 0x02000000 +#define BYPASS_CTL1_OFFTRST_M 0x04000000 +#define BYPASS_CTL1_WDT_PET_M 0x08000000 + +#define BYPASS_CTL1_VALID 0x02000000 +#define BYPASS_CTL1_OFFTRST 0x04000000 +#define BYPASS_CTL1_WDT_PET 0x08000000 + +#define BYPASS_CTL2_DATA_M 0x000000ff +#define BYPASS_CTL2_OFFSET_M 0x0000ff00 +#define BYPASS_CTL2_RW_M 0x00010000 +#define BYPASS_CTL2_HEAD_M 0x0ff00000 + +#define BYPASS_CTL2_OFFSET_SHIFT 8 +#define BYPASS_CTL2_HEAD_SHIFT 20 + +#define BYPASS_CTL2_RW 0x00010000 + +enum ixgbe_state_t { + __IXGBE_TESTING, + __IXGBE_RESETTING, + __IXGBE_DOWN, + __IXGBE_SERVICE_SCHED, + __IXGBE_IN_SFP_INIT, + __IXGBE_IN_BYPASS_LOW, + __IXGBE_IN_BYPASS_HIGH, + __IXGBE_IN_BYPASS_LOG, +}; + +#define BYPASS_MAX_LOGS 43 +#define BYPASS_LOG_SIZE 5 +#define BYPASS_LOG_LINE_SIZE 37 + +#define BYPASS_EEPROM_VER_ADD 0x02 + +#define BYPASS_LOG_TIME_M 0x01ffffff +#define BYPASS_LOG_TIME_VALID_M 0x02000000 +#define BYPASS_LOG_HEAD_M 0x04000000 +#define BYPASS_LOG_CLEAR_M 0x08000000 +#define BYPASS_LOG_EVENT_M 0xf0000000 +#define BYPASS_LOG_ACTION_M 0x03 + +#define BYPASS_LOG_EVENT_SHIFT 28 +#define BYPASS_LOG_CLEAR_SHIFT 24 /* bit offset */ +#define IXGBE_DEV_TO_ADPATER(dev) \ + ((struct ixgbe_adapter*)(dev->data->dev_private)) + +/* extractions from ixgbe_phy.h */ +#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 + +#define IXGBE_SFF_SFF_8472_SWAP 0x5C +#define IXGBE_SFF_SFF_8472_COMP 0x5E +#define IXGBE_SFF_SFF_8472_OSCB 0x6E +#define IXGBE_SFF_SFF_8472_ESCB 0x76 + +#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 + +/* extractions from ixgbe_type.h */ +#define IXGBE_DEV_ID_82599_BYPASS 0x155D + +#define IXGBE_BYPASS_FW_WRITE_FAILURE -35 + +#endif /* RTE_NIC_BYPASS */ + +#endif /* _IXGBE_BYPASS_DEFINES_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c new file mode 100644 index 00000000..3f1ebc15 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -0,0 +1,7149 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <stdarg.h> +#include <inttypes.h> +#include <netinet/in.h> +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_cycles.h> + +#include <rte_interrupts.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_pci.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_eal.h> +#include <rte_alarm.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_atomic.h> +#include <rte_malloc.h> +#include <rte_random.h> +#include <rte_dev.h> + +#include "ixgbe_logs.h" +#include "base/ixgbe_api.h" +#include "base/ixgbe_vf.h" +#include "base/ixgbe_common.h" +#include "ixgbe_ethdev.h" +#include "ixgbe_bypass.h" +#include "ixgbe_rxtx.h" +#include "base/ixgbe_type.h" +#include "base/ixgbe_phy.h" +#include "ixgbe_regs.h" + +/* + * High threshold controlling when to start sending XOFF frames. Must be at + * least 8 bytes less than receive packet buffer size. This value is in units + * of 1024 bytes. + */ +#define IXGBE_FC_HI 0x80 + +/* + * Low threshold controlling when to start sending XON frames. This value is + * in units of 1024 bytes. + */ +#define IXGBE_FC_LO 0x40 + +/* Default minimum inter-interrupt interval for EITR configuration */ +#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E + +/* Timer value included in XOFF frames. */ +#define IXGBE_FC_PAUSE 0x680 + +#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ +#define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ +#define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ + +#define IXGBE_MMW_SIZE_DEFAULT 0x4 +#define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 +#define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ + +/* + * Default values for RX/TX configuration + */ +#define IXGBE_DEFAULT_RX_FREE_THRESH 32 +#define IXGBE_DEFAULT_RX_PTHRESH 8 +#define IXGBE_DEFAULT_RX_HTHRESH 8 +#define IXGBE_DEFAULT_RX_WTHRESH 0 + +#define IXGBE_DEFAULT_TX_FREE_THRESH 32 +#define IXGBE_DEFAULT_TX_PTHRESH 32 +#define IXGBE_DEFAULT_TX_HTHRESH 0 +#define IXGBE_DEFAULT_TX_WTHRESH 0 +#define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 + +/* Bit shift and mask */ +#define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) +#define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) +#define IXGBE_8_BIT_WIDTH CHAR_BIT +#define IXGBE_8_BIT_MASK UINT8_MAX + +#define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ + +#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) + +#define IXGBE_HKEY_MAX_INDEX 10 + +/* Additional timesync values. */ +#define NSEC_PER_SEC 1000000000L +#define IXGBE_INCVAL_10GB 0x66666666 +#define IXGBE_INCVAL_1GB 0x40000000 +#define IXGBE_INCVAL_100 0x50000000 +#define IXGBE_INCVAL_SHIFT_10GB 28 +#define IXGBE_INCVAL_SHIFT_1GB 24 +#define IXGBE_INCVAL_SHIFT_100 21 +#define IXGBE_INCVAL_SHIFT_82599 7 +#define IXGBE_INCPER_SHIFT_82599 24 + +#define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL + +#define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 +#define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 +#define DEFAULT_ETAG_ETYPE 0x893f +#define IXGBE_ETAG_ETYPE 0x00005084 +#define IXGBE_ETAG_ETYPE_MASK 0x0000ffff +#define IXGBE_ETAG_ETYPE_VALID 0x80000000 +#define IXGBE_RAH_ADTYPE 0x40000000 +#define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff +#define IXGBE_VMVIR_TAGA_MASK 0x18000000 +#define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 +#define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_QDE_STRIP_TAG 0x00000004 + +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, +}; + +static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev); +static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); +static int ixgbe_dev_configure(struct rte_eth_dev *dev); +static int ixgbe_dev_start(struct rte_eth_dev *dev); +static void ixgbe_dev_stop(struct rte_eth_dev *dev); +static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); +static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); +static void ixgbe_dev_close(struct rte_eth_dev *dev); +static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); +static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); +static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); +static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int ixgbe_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static void ixgbe_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstats *xstats, unsigned n); +static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstats *xstats, unsigned n); +static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev); +static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); +static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, + uint16_t queue_id, + uint8_t stat_idx, + uint8_t is_rx); +static void ixgbe_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); +static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + +static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid_id); +static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, + uint16_t queue, bool on); +static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, + int on); +static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); +static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); +static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); +static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); + +static int ixgbe_dev_led_on(struct rte_eth_dev *dev); +static int ixgbe_dev_led_off(struct rte_eth_dev *dev); +static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_pfc_conf *pfc_conf); +static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); +static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev); +static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); +static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); +static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); +static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, + void *param); +static void ixgbe_dev_interrupt_delayed_handler(void *param); +static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t pool); +static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); +static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr); +static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config); + +/* For Virtual Function support */ +static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); +static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); +static int ixgbevf_dev_configure(struct rte_eth_dev *dev); +static int ixgbevf_dev_start(struct rte_eth_dev *dev); +static void ixgbevf_dev_stop(struct rte_eth_dev *dev); +static void ixgbevf_dev_close(struct rte_eth_dev *dev); +static void ixgbevf_intr_disable(struct ixgbe_hw *hw); +static void ixgbevf_intr_enable(struct ixgbe_hw *hw); +static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); +static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, + uint16_t queue, int on); +static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); +static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); +static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector); +static void ixgbevf_configure_msix(struct rte_eth_dev *dev); +static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); +static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); + +/* For Eth VMDQ APIs support */ +static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct + ether_addr* mac_addr,uint8_t on); +static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on); +static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, + uint16_t rx_mask, uint8_t on); +static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); +static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); +static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, + uint64_t pool_mask,uint8_t vlan_on); +static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, + struct rte_eth_mirror_conf *mirror_conf, + uint8_t rule_id, uint8_t on); +static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, + uint8_t rule_id); +static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); +static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector); +static void ixgbe_configure_msix(struct rte_eth_dev *dev); + +static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t tx_rate); +static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk); + +static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr, + uint32_t index, uint32_t pool); +static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); +static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr); +static int ixgbe_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add); +static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter); +static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter); +static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter); +static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *filter, + bool add); +static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *filter); +static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add); +static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter); +static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); +static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); + +static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, + uint32_t nb_mc_addr); +static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, + struct rte_eth_dcb_info *dcb_info); + +static int ixgbe_get_reg_length(struct rte_eth_dev *dev); +static int ixgbe_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); +static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); +static int ixgbe_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); +static int ixgbe_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); + +static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); +static int ixgbevf_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); + +static int ixgbe_timesync_enable(struct rte_eth_dev *dev); +static int ixgbe_timesync_disable(struct rte_eth_dev *dev); +static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags); +static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); +static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, + const struct timespec *timestamp); + +static int ixgbe_dev_l2_tunnel_eth_type_conf + (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel); +static int ixgbe_dev_l2_tunnel_offload_set + (struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + uint32_t mask, + uint8_t en); +static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); + +static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); +static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); + +/* + * Define VF Stats MACRO for Non "cleared on read" register + */ +#define UPDATE_VF_STAT(reg, last, cur) \ +{ \ + uint32_t latest = IXGBE_READ_REG(hw, reg); \ + cur += (latest - last) & UINT_MAX; \ + last = latest; \ +} + +#define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ +{ \ + u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ + u64 new_msb = IXGBE_READ_REG(hw, msb); \ + u64 latest = ((new_msb << 32) | new_lsb); \ + cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ + last = latest; \ +} + +#define IXGBE_SET_HWSTRIP(h, q) do{\ + uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ + (h)->bitmap[idx] |= 1 << bit;\ + } while (0) + +#define IXGBE_CLEAR_HWSTRIP(h, q) do{\ + uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ + (h)->bitmap[idx] &= ~(1 << bit);\ + } while (0) + +#define IXGBE_GET_HWSTRIP(h, q, r) do{\ + uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ + (r) = (h)->bitmap[idx] >> bit & 1;\ + } while (0) + +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id pci_id_ixgbe_map[] = { + +#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" + +{ .vendor_id = 0, /* sentinel */ }, +}; + + +/* + * The set of PCI devices this driver supports (for 82599 VF) + */ +static const struct rte_pci_id pci_id_ixgbevf_map[] = { + +#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" +{ .vendor_id = 0, /* sentinel */ }, + +}; + +static const struct rte_eth_desc_lim rx_desc_lim = { + .nb_max = IXGBE_MAX_RING_DESC, + .nb_min = IXGBE_MIN_RING_DESC, + .nb_align = IXGBE_RXD_ALIGN, +}; + +static const struct rte_eth_desc_lim tx_desc_lim = { + .nb_max = IXGBE_MAX_RING_DESC, + .nb_min = IXGBE_MIN_RING_DESC, + .nb_align = IXGBE_TXD_ALIGN, +}; + +static const struct eth_dev_ops ixgbe_eth_dev_ops = { + .dev_configure = ixgbe_dev_configure, + .dev_start = ixgbe_dev_start, + .dev_stop = ixgbe_dev_stop, + .dev_set_link_up = ixgbe_dev_set_link_up, + .dev_set_link_down = ixgbe_dev_set_link_down, + .dev_close = ixgbe_dev_close, + .promiscuous_enable = ixgbe_dev_promiscuous_enable, + .promiscuous_disable = ixgbe_dev_promiscuous_disable, + .allmulticast_enable = ixgbe_dev_allmulticast_enable, + .allmulticast_disable = ixgbe_dev_allmulticast_disable, + .link_update = ixgbe_dev_link_update, + .stats_get = ixgbe_dev_stats_get, + .xstats_get = ixgbe_dev_xstats_get, + .stats_reset = ixgbe_dev_stats_reset, + .xstats_reset = ixgbe_dev_xstats_reset, + .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, + .dev_infos_get = ixgbe_dev_info_get, + .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, + .mtu_set = ixgbe_dev_mtu_set, + .vlan_filter_set = ixgbe_vlan_filter_set, + .vlan_tpid_set = ixgbe_vlan_tpid_set, + .vlan_offload_set = ixgbe_vlan_offload_set, + .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, + .rx_queue_start = ixgbe_dev_rx_queue_start, + .rx_queue_stop = ixgbe_dev_rx_queue_stop, + .tx_queue_start = ixgbe_dev_tx_queue_start, + .tx_queue_stop = ixgbe_dev_tx_queue_stop, + .rx_queue_setup = ixgbe_dev_rx_queue_setup, + .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, + .rx_queue_release = ixgbe_dev_rx_queue_release, + .rx_queue_count = ixgbe_dev_rx_queue_count, + .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, + .tx_queue_setup = ixgbe_dev_tx_queue_setup, + .tx_queue_release = ixgbe_dev_tx_queue_release, + .dev_led_on = ixgbe_dev_led_on, + .dev_led_off = ixgbe_dev_led_off, + .flow_ctrl_get = ixgbe_flow_ctrl_get, + .flow_ctrl_set = ixgbe_flow_ctrl_set, + .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, + .mac_addr_add = ixgbe_add_rar, + .mac_addr_remove = ixgbe_remove_rar, + .mac_addr_set = ixgbe_set_default_mac_addr, + .uc_hash_table_set = ixgbe_uc_hash_table_set, + .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, + .mirror_rule_set = ixgbe_mirror_rule_set, + .mirror_rule_reset = ixgbe_mirror_rule_reset, + .set_vf_rx_mode = ixgbe_set_pool_rx_mode, + .set_vf_rx = ixgbe_set_pool_rx, + .set_vf_tx = ixgbe_set_pool_tx, + .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter, + .set_queue_rate_limit = ixgbe_set_queue_rate_limit, + .set_vf_rate_limit = ixgbe_set_vf_rate_limit, + .reta_update = ixgbe_dev_rss_reta_update, + .reta_query = ixgbe_dev_rss_reta_query, +#ifdef RTE_NIC_BYPASS + .bypass_init = ixgbe_bypass_init, + .bypass_state_set = ixgbe_bypass_state_store, + .bypass_state_show = ixgbe_bypass_state_show, + .bypass_event_set = ixgbe_bypass_event_store, + .bypass_event_show = ixgbe_bypass_event_show, + .bypass_wd_timeout_set = ixgbe_bypass_wd_timeout_store, + .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show, + .bypass_ver_show = ixgbe_bypass_ver_show, + .bypass_wd_reset = ixgbe_bypass_wd_reset, +#endif /* RTE_NIC_BYPASS */ + .rss_hash_update = ixgbe_dev_rss_hash_update, + .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, + .filter_ctrl = ixgbe_dev_filter_ctrl, + .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, + .rxq_info_get = ixgbe_rxq_info_get, + .txq_info_get = ixgbe_txq_info_get, + .timesync_enable = ixgbe_timesync_enable, + .timesync_disable = ixgbe_timesync_disable, + .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, + .get_reg_length = ixgbe_get_reg_length, + .get_reg = ixgbe_get_regs, + .get_eeprom_length = ixgbe_get_eeprom_length, + .get_eeprom = ixgbe_get_eeprom, + .set_eeprom = ixgbe_set_eeprom, + .get_dcb_info = ixgbe_dev_get_dcb_info, + .timesync_adjust_time = ixgbe_timesync_adjust_time, + .timesync_read_time = ixgbe_timesync_read_time, + .timesync_write_time = ixgbe_timesync_write_time, + .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf, + .l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set, + .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, +}; + +/* + * dev_ops for virtual function, bare necessities for basic vf + * operation have been implemented + */ +static const struct eth_dev_ops ixgbevf_eth_dev_ops = { + .dev_configure = ixgbevf_dev_configure, + .dev_start = ixgbevf_dev_start, + .dev_stop = ixgbevf_dev_stop, + .link_update = ixgbe_dev_link_update, + .stats_get = ixgbevf_dev_stats_get, + .xstats_get = ixgbevf_dev_xstats_get, + .stats_reset = ixgbevf_dev_stats_reset, + .xstats_reset = ixgbevf_dev_stats_reset, + .dev_close = ixgbevf_dev_close, + .allmulticast_enable = ixgbevf_dev_allmulticast_enable, + .allmulticast_disable = ixgbevf_dev_allmulticast_disable, + .dev_infos_get = ixgbevf_dev_info_get, + .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, + .mtu_set = ixgbevf_dev_set_mtu, + .vlan_filter_set = ixgbevf_vlan_filter_set, + .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, + .vlan_offload_set = ixgbevf_vlan_offload_set, + .rx_queue_setup = ixgbe_dev_rx_queue_setup, + .rx_queue_release = ixgbe_dev_rx_queue_release, + .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, + .tx_queue_setup = ixgbe_dev_tx_queue_setup, + .tx_queue_release = ixgbe_dev_tx_queue_release, + .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, + .mac_addr_add = ixgbevf_add_mac_addr, + .mac_addr_remove = ixgbevf_remove_mac_addr, + .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, + .rxq_info_get = ixgbe_rxq_info_get, + .txq_info_get = ixgbe_txq_info_get, + .mac_addr_set = ixgbevf_set_default_mac_addr, + .get_reg_length = ixgbevf_get_reg_length, + .get_reg = ixgbevf_get_regs, + .reta_update = ixgbe_dev_rss_reta_update, + .reta_query = ixgbe_dev_rss_reta_query, + .rss_hash_update = ixgbe_dev_rss_hash_update, + .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, +}; + +/* store statistics names and its offset in stats structure */ +struct rte_ixgbe_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned offset; +}; + +static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { + {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, + {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, + {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, + {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, + {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, + {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, + {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, + {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, + {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, + {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, + {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, + {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, + {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, + {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, + {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, + prc1023)}, + {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, + prc1522)}, + {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, + {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, + {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, + {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, + {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, + {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, + {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, + {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, + {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, + {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, + {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, + {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, + {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, + {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, + {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, + {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, + {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, + ptc1023)}, + {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, + ptc1522)}, + {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, + {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, + {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, + {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, + + {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, + fdirustat_add)}, + {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, + fdirustat_remove)}, + {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, + fdirfstat_fadd)}, + {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, + fdirfstat_fremove)}, + {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, + fdirmatch)}, + {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, + fdirmiss)}, + + {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, + {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, + {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, + fclast)}, + {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, + {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, + {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, + {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, + {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, + fcoe_noddp)}, + {"rx_fcoe_no_direct_data_placement_ext_buff", + offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, + + {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, + lxontxc)}, + {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, + lxonrxc)}, + {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, + lxofftxc)}, + {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, + lxoffrxc)}, + {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, +}; + +#define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ + sizeof(rte_ixgbe_stats_strings[0])) + +/* Per-queue statistics */ +static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { + {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, + {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, + {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, + {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, +}; + +#define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ + sizeof(rte_ixgbe_rxq_strings[0])) + +static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { + {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, + {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, + {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, + pxon2offc)}, +}; + +#define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ + sizeof(rte_ixgbe_txq_strings[0])) + +static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { + {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, +}; + +#define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ + sizeof(rte_ixgbevf_stats_strings[0])) + +/** + * Atomically reads the link status information from global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &(dev->data->dev_link); + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/** + * Atomically writes the link status information into global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &(dev->data->dev_link); + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/* + * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. + */ +static inline int +ixgbe_is_sfp(struct ixgbe_hw *hw) +{ + switch (hw->phy.type) { + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + return 1; + default: + return 0; + } +} + +static inline int32_t +ixgbe_pf_reset_hw(struct ixgbe_hw *hw) +{ + uint32_t ctrl_ext; + int32_t status; + + status = ixgbe_reset_hw(hw); + + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); + + return status; +} + +static inline void +ixgbe_enable_intr(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); + IXGBE_WRITE_FLUSH(hw); +} + +/* + * This function is based on ixgbe_disable_intr() in base/ixgbe.h. + */ +static void +ixgbe_disable_intr(struct ixgbe_hw *hw) +{ + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type == ixgbe_mac_82598EB) { + IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); + } else { + IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); + } + IXGBE_WRITE_FLUSH(hw); +} + +/* + * This function resets queue statistics mapping registers. + * From Niantic datasheet, Initialization of Statistics section: + * "...if software requires the queue counters, the RQSMR and TQSM registers + * must be re-programmed following a device reset. + */ +static void +ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) +{ + uint32_t i; + + for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); + } +} + + +static int +ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, + uint16_t queue_id, + uint8_t stat_idx, + uint8_t is_rx) +{ +#define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 +#define NB_QMAP_FIELDS_PER_QSM_REG 4 +#define QMAP_FIELD_RESERVED_BITS_MASK 0x0f + + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct ixgbe_stat_mapping_registers *stat_mappings = + IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private); + uint32_t qsmr_mask = 0; + uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; + uint32_t q_map; + uint8_t n, offset; + + if ((hw->mac.type != ixgbe_mac_82599EB) && + (hw->mac.type != ixgbe_mac_X540) && + (hw->mac.type != ixgbe_mac_X550) && + (hw->mac.type != ixgbe_mac_X550EM_x) && + (hw->mac.type != ixgbe_mac_X550EM_a)) + return -ENOSYS; + + PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", + (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", + queue_id, stat_idx); + + n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); + if (n >= IXGBE_NB_STAT_MAPPING_REGS) { + PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); + return -EIO; + } + offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); + + /* Now clear any previous stat_idx set */ + clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); + if (!is_rx) + stat_mappings->tqsm[n] &= ~clearing_mask; + else + stat_mappings->rqsmr[n] &= ~clearing_mask; + + q_map = (uint32_t)stat_idx; + q_map &= QMAP_FIELD_RESERVED_BITS_MASK; + qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); + if (!is_rx) + stat_mappings->tqsm[n] |= qsmr_mask; + else + stat_mappings->rqsmr[n] |= qsmr_mask; + + PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", + (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", + queue_id, stat_idx); + PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, + is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); + + /* Now write the mapping in the appropriate register */ + if (is_rx) { + PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", + stat_mappings->rqsmr[n], n); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); + } + else { + PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", + stat_mappings->tqsm[n], n); + IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); + } + return 0; +} + +static void +ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev) +{ + struct ixgbe_stat_mapping_registers *stat_mappings = + IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i; + + /* write whatever was in stat mapping table to the NIC */ + for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) { + /* rx */ + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]); + + /* tx */ + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]); + } +} + +static void +ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) +{ + uint8_t i; + struct ixgbe_dcb_tc_config *tc; + uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; + + dcb_config->num_tcs.pg_tcs = dcb_max_tc; + dcb_config->num_tcs.pfc_tcs = dcb_max_tc; + for (i = 0; i < dcb_max_tc; i++) { + tc = &dcb_config->tc_config[i]; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = + (uint8_t)(100/dcb_max_tc + (i & 1)); + tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = + (uint8_t)(100/dcb_max_tc + (i & 1)); + tc->pfc = ixgbe_dcb_pfc_disabled; + } + + /* Initialize default user to priority mapping, UPx->TC0 */ + tc = &dcb_config->tc_config[0]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; + for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) { + dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; + dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; + } + dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal; + dcb_config->pfc_mode_enable = false; + dcb_config->vt_mode = true; + dcb_config->round_robin_enable = false; + /* support all DCB capabilities in 82599 */ + dcb_config->support.capabilities = 0xFF; + + /*we only support 4 Tcs for X540, X550 */ + if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { + dcb_config->num_tcs.pg_tcs = 4; + dcb_config->num_tcs.pfc_tcs = 4; + } +} + +/* + * Ensure that all locks are released before first NVM or PHY access + */ +static void +ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) +{ + uint16_t mask; + + /* + * Phy lock should not fail in this early stage. If this is the case, + * it is due to an improper exit of the application. + * So force the release of the faulty lock. Release of common lock + * is done automatically by swfw_sync function. + */ + mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; + if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { + PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); + } + ixgbe_release_swfw_semaphore(hw, mask); + + /* + * These ones are more tricky since they are common to all ports; but + * swfw_sync retries last long enough (1s) to be almost sure that if + * lock can not be taken it is due to an improper lock of the + * semaphore. + */ + mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; + if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { + PMD_DRV_LOG(DEBUG, "SWFW common locks released"); + } + ixgbe_release_swfw_semaphore(hw, mask); +} + +/* + * This function is based on code in ixgbe_attach() in base/ixgbe.c. + * It returns 0 on success. + */ +static int +eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct ixgbe_vfta * shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); + struct ixgbe_hwstrip *hwstrip = + IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); + struct ixgbe_dcb_config *dcb_config = + IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + uint32_t ctrl_ext; + uint16_t csum; + int diag, i; + + PMD_INIT_FUNC_TRACE(); + + eth_dev->dev_ops = &ixgbe_eth_dev_ops; + eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; + eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; + + /* + * For secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX and TX function. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + struct ixgbe_tx_queue *txq; + /* TX queue function in primary, set by last queue initialized + * Tx queue may not initialized by primary process */ + if (eth_dev->data->tx_queues) { + txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; + ixgbe_set_tx_function(eth_dev, txq); + } else { + /* Use default TX function if we get here */ + PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " + "Using default TX function."); + } + + ixgbe_set_rx_function(eth_dev); + + return 0; + } + pci_dev = eth_dev->pci_dev; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + /* Vendor and Device ID need to be set before init of shared code */ + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; + hw->allow_unsupported_sfp = 1; + + /* Initialize the shared code (base driver) */ +#ifdef RTE_NIC_BYPASS + diag = ixgbe_bypass_init_shared_code(hw); +#else + diag = ixgbe_init_shared_code(hw); +#endif /* RTE_NIC_BYPASS */ + + if (diag != IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); + return -EIO; + } + + /* pick up the PCI bus settings for reporting later */ + ixgbe_get_bus_info(hw); + + /* Unlock any pending hardware semaphore */ + ixgbe_swfw_lock_reset(hw); + + /* Initialize DCB configuration*/ + memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); + ixgbe_dcb_init(hw,dcb_config); + /* Get Hardware Flow Control setting */ + hw->fc.requested_mode = ixgbe_fc_full; + hw->fc.current_mode = ixgbe_fc_full; + hw->fc.pause_time = IXGBE_FC_PAUSE; + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + hw->fc.low_water[i] = IXGBE_FC_LO; + hw->fc.high_water[i] = IXGBE_FC_HI; + } + hw->fc.send_xon = 1; + + /* Make sure we have a good EEPROM before we read from it */ + diag = ixgbe_validate_eeprom_checksum(hw, &csum); + if (diag != IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); + return -EIO; + } + +#ifdef RTE_NIC_BYPASS + diag = ixgbe_bypass_init_hw(hw); +#else + diag = ixgbe_init_hw(hw); +#endif /* RTE_NIC_BYPASS */ + + /* + * Devices with copper phys will fail to initialise if ixgbe_init_hw() + * is called too soon after the kernel driver unbinding/binding occurs. + * The failure occurs in ixgbe_identify_phy_generic() for all devices, + * but for non-copper devies, ixgbe_identify_sfp_module_generic() is + * also called. See ixgbe_identify_phy_82599(). The reason for the + * failure is not known, and only occuts when virtualisation features + * are disabled in the bios. A delay of 100ms was found to be enough by + * trial-and-error, and is doubled to be safe. + */ + if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { + rte_delay_ms(200); + diag = ixgbe_init_hw(hw); + } + + if (diag == IXGBE_ERR_EEPROM_VERSION) { + PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" + "LOM. Please be aware there may be issues associated " + "with your hardware."); + PMD_INIT_LOG(ERR, "If you are experiencing problems " + "please contact your Intel or hardware representative " + "who provided you with this hardware."); + } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) + PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); + if (diag) { + PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); + return -EIO; + } + + /* Reset the hw statistics */ + ixgbe_dev_stats_reset(eth_dev); + + /* disable interrupt */ + ixgbe_disable_intr(hw); + + /* reset mappings for queue statistics hw counters*/ + ixgbe_reset_qstat_mappings(hw); + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * + hw->mac.num_rar_entries, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate %u bytes needed to store " + "MAC addresses", + ETHER_ADDR_LEN * hw->mac.num_rar_entries); + return -ENOMEM; + } + /* Copy the permanent MAC address */ + ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, + ð_dev->data->mac_addrs[0]); + + /* Allocate memory for storing hash filter MAC addresses */ + eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * + IXGBE_VMDQ_NUM_UC_MAC, 0); + if (eth_dev->data->hash_mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate %d bytes needed to store MAC addresses", + ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); + return -ENOMEM; + } + + /* initialize the vfta */ + memset(shadow_vfta, 0, sizeof(*shadow_vfta)); + + /* initialize the hw strip bitmap*/ + memset(hwstrip, 0, sizeof(*hwstrip)); + + /* initialize PF if max_vfs not zero */ + ixgbe_pf_host_init(eth_dev); + + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + /* let hardware know driver is loaded */ + ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); + + if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) + PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", + (int) hw->mac.type, (int) hw->phy.type, + (int) hw->phy.sfp_type); + else + PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", + (int) hw->mac.type, (int) hw->phy.type); + + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + rte_intr_callback_register(&pci_dev->intr_handle, + ixgbe_dev_interrupt_handler, + (void *)eth_dev); + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(&pci_dev->intr_handle); + + /* enable support intr */ + ixgbe_enable_intr(eth_dev); + + /* initialize 5tuple filter list */ + TAILQ_INIT(&filter_info->fivetuple_list); + memset(filter_info->fivetuple_mask, 0, + sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); + + return 0; +} + +static int +eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct ixgbe_hw *hw; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + pci_dev = eth_dev->pci_dev; + + if (hw->adapter_stopped == 0) + ixgbe_dev_close(eth_dev); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + /* Unlock any pending hardware semaphore */ + ixgbe_swfw_lock_reset(hw); + + /* disable uio intr before callback unregister */ + rte_intr_disable(&(pci_dev->intr_handle)); + rte_intr_callback_unregister(&(pci_dev->intr_handle), + ixgbe_dev_interrupt_handler, (void *)eth_dev); + + /* uninitialize PF if max_vfs not zero */ + ixgbe_pf_host_uninit(eth_dev); + + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + + rte_free(eth_dev->data->hash_mac_addrs); + eth_dev->data->hash_mac_addrs = NULL; + + return 0; +} + +/* + * Negotiate mailbox API version with the PF. + * After reset API version is always set to the basic one (ixgbe_mbox_api_10). + * Then we try to negotiate starting with the most recent one. + * If all negotiation attempts fail, then we will proceed with + * the default one (ixgbe_mbox_api_10). + */ +static void +ixgbevf_negotiate_api(struct ixgbe_hw *hw) +{ + int32_t i; + + /* start with highest supported, proceed down */ + static const enum ixgbe_pfvf_api_rev sup_ver[] = { + ixgbe_mbox_api_12, + ixgbe_mbox_api_11, + ixgbe_mbox_api_10, + }; + + for (i = 0; + i != RTE_DIM(sup_ver) && + ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; + i++) + ; +} + +static void +generate_random_mac_addr(struct ether_addr *mac_addr) +{ + uint64_t random; + + /* Set Organizationally Unique Identifier (OUI) prefix. */ + mac_addr->addr_bytes[0] = 0x00; + mac_addr->addr_bytes[1] = 0x09; + mac_addr->addr_bytes[2] = 0xC0; + /* Force indication of locally assigned MAC address. */ + mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR; + /* Generate the last 3 bytes of the MAC address with a random number. */ + random = rte_rand(); + memcpy(&mac_addr->addr_bytes[3], &random, 3); +} + +/* + * Virtual Function device init + */ +static int +eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) +{ + int diag; + uint32_t tc, tcs; + struct rte_pci_device *pci_dev; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct ixgbe_vfta * shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); + struct ixgbe_hwstrip *hwstrip = + IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); + struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; + + PMD_INIT_FUNC_TRACE(); + + eth_dev->dev_ops = &ixgbevf_eth_dev_ops; + eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; + eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + struct ixgbe_tx_queue *txq; + /* TX queue function in primary, set by last queue initialized + * Tx queue may not initialized by primary process + */ + if (eth_dev->data->tx_queues) { + txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; + ixgbe_set_tx_function(eth_dev, txq); + } else { + /* Use default TX function if we get here */ + PMD_INIT_LOG(NOTICE, + "No TX queues configured yet. Using default TX function."); + } + + ixgbe_set_rx_function(eth_dev); + + return 0; + } + + pci_dev = eth_dev->pci_dev; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; + + /* initialize the vfta */ + memset(shadow_vfta, 0, sizeof(*shadow_vfta)); + + /* initialize the hw strip bitmap*/ + memset(hwstrip, 0, sizeof(*hwstrip)); + + /* Initialize the shared code (base driver) */ + diag = ixgbe_init_shared_code(hw); + if (diag != IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); + return -EIO; + } + + /* init_mailbox_params */ + hw->mbx.ops.init_params(hw); + + /* Reset the hw statistics */ + ixgbevf_dev_stats_reset(eth_dev); + + /* Disable the interrupts for VF */ + ixgbevf_intr_disable(hw); + + hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ + diag = hw->mac.ops.reset_hw(hw); + + /* + * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when + * the underlying PF driver has not assigned a MAC address to the VF. + * In this case, assign a random MAC address. + */ + if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { + PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); + return diag; + } + + /* negotiate mailbox API version to use with the PF. */ + ixgbevf_negotiate_api(hw); + + /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ + ixgbevf_get_queues(hw, &tcs, &tc); + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * + hw->mac.num_rar_entries, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate %u bytes needed to store " + "MAC addresses", + ETHER_ADDR_LEN * hw->mac.num_rar_entries); + return -ENOMEM; + } + + /* Generate a random MAC address, if none was assigned by PF. */ + if (is_zero_ether_addr(perm_addr)) { + generate_random_mac_addr(perm_addr); + diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); + if (diag) { + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + return diag; + } + PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); + PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " + "%02x:%02x:%02x:%02x:%02x:%02x", + perm_addr->addr_bytes[0], + perm_addr->addr_bytes[1], + perm_addr->addr_bytes[2], + perm_addr->addr_bytes[3], + perm_addr->addr_bytes[4], + perm_addr->addr_bytes[5]); + } + + /* Copy the permanent MAC address */ + ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); + + /* reset the hardware with the new settings */ + diag = hw->mac.ops.start_hw(hw); + switch (diag) { + case 0: + break; + + default: + PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); + return -EIO; + } + + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id, "ixgbe_mac_82599_vf"); + + return 0; +} + +/* Virtual Function device uninit */ + +static int +eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_hw *hw; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + if (hw->adapter_stopped == 0) + ixgbevf_dev_close(eth_dev); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + /* Disable the interrupts for VF */ + ixgbevf_intr_disable(hw); + + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + + return 0; +} + +static struct eth_driver rte_ixgbe_pmd = { + .pci_drv = { + .name = "rte_ixgbe_pmd", + .id_table = pci_id_ixgbe_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_DETACHABLE, + }, + .eth_dev_init = eth_ixgbe_dev_init, + .eth_dev_uninit = eth_ixgbe_dev_uninit, + .dev_private_size = sizeof(struct ixgbe_adapter), +}; + +/* + * virtual function driver struct + */ +static struct eth_driver rte_ixgbevf_pmd = { + .pci_drv = { + .name = "rte_ixgbevf_pmd", + .id_table = pci_id_ixgbevf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, + }, + .eth_dev_init = eth_ixgbevf_dev_init, + .eth_dev_uninit = eth_ixgbevf_dev_uninit, + .dev_private_size = sizeof(struct ixgbe_adapter), +}; + +/* + * Driver initialization routine. + * Invoked once at EAL init time. + * Register itself as the [Poll Mode] Driver of PCI IXGBE devices. + */ +static int +rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + + rte_eth_driver_register(&rte_ixgbe_pmd); + return 0; +} + +/* + * VF Driver initialization routine. + * Invoked one at EAL init time. + * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices. + */ +static int +rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + + rte_eth_driver_register(&rte_ixgbevf_pmd); + return 0; +} + +static int +ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vfta * shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vfta; + uint32_t vid_idx; + uint32_t vid_bit; + + vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); + vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); + if (on) + vfta |= vid_bit; + else + vfta &= ~vid_bit; + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); + + /* update local VFTA copy */ + shadow_vfta->vfta[vid_idx] = vfta; + + return 0; +} + +static void +ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) +{ + if (on) + ixgbe_vlan_hw_strip_enable(dev, queue); + else + ixgbe_vlan_hw_strip_disable(dev, queue); +} + +static int +ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret = 0; + + switch (vlan_type) { + case ETH_VLAN_TYPE_INNER: + /* Only the high 16-bits is valid */ + IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16); + break; + default: + ret = -EINVAL; + PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type); + break; + } + + return ret; +} + +void +ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vlnctrl; + + PMD_INIT_FUNC_TRACE(); + + /* Filter Table Disable */ + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~IXGBE_VLNCTRL_VFE; + + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +} + +void +ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vfta * shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vlnctrl; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + /* Filter Table Enable */ + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + vlnctrl |= IXGBE_VLNCTRL_VFE; + + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + + /* write whatever is in local vfta copy */ + for (i = 0; i < IXGBE_VFTA_SIZE; i++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); +} + +static void +ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) +{ + struct ixgbe_hwstrip *hwstrip = + IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); + + if (queue >= IXGBE_MAX_RX_QUEUE_NUM) + return; + + if (on) + IXGBE_SET_HWSTRIP(hwstrip, queue); + else + IXGBE_CLEAR_HWSTRIP(hwstrip, queue); +} + +static void +ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type == ixgbe_mac_82598EB) { + /* No queue level support */ + PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); + return; + } + else { + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + ctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + } + /* record those setting for HW strip per queue */ + ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); +} + +static void +ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type == ixgbe_mac_82598EB) { + /* No queue level supported */ + PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); + return; + } + else { + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + ctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + } + /* record those setting for HW strip per queue */ + ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); +} + +void +ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ctrl; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type == ixgbe_mac_82598EB) { + ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + ctrl &= ~IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); + } + else { + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + ctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl); + + /* record those setting for HW strip per queue */ + ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0); + } + } +} + +void +ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ctrl; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type == ixgbe_mac_82598EB) { + ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + ctrl |= IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); + } + else { + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + ctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl); + + /* record those setting for HW strip per queue */ + ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1); + } + } +} + +static void +ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + /* DMATXCTRL: Geric Double VLAN Disable */ + ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + ctrl &= ~IXGBE_DMATXCTL_GDV; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); + + /* CTRL_EXT: Global Double VLAN Disable */ + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl &= ~IXGBE_EXTENDED_VLAN; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); + +} + +static void +ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + /* DMATXCTRL: Geric Double VLAN Enable */ + ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + ctrl |= IXGBE_DMATXCTL_GDV; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); + + /* CTRL_EXT: Global Double VLAN Enable */ + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl |= IXGBE_EXTENDED_VLAN; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); + + /* Clear pooling mode of PFVTCTL. It's required by X550. */ + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { + ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); + } + + /* + * VET EXT field in the EXVET register = 0x8100 by default + * So no need to change. Same to VT field of DMATXCTL register + */ +} + +static void +ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + if (mask & ETH_VLAN_STRIP_MASK) { + if (dev->data->dev_conf.rxmode.hw_vlan_strip) + ixgbe_vlan_hw_strip_enable_all(dev); + else + ixgbe_vlan_hw_strip_disable_all(dev); + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + ixgbe_vlan_hw_filter_enable(dev); + else + ixgbe_vlan_hw_filter_disable(dev); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + if (dev->data->dev_conf.rxmode.hw_vlan_extend) + ixgbe_vlan_hw_extend_enable(dev); + else + ixgbe_vlan_hw_extend_disable(dev); + } +} + +static void +ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ + uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); +} + +static int +ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) +{ + switch (nb_rx_q) { + case 1: + case 2: + RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; + break; + case 4: + RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; + break; + default: + return -EINVAL; + } + + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q; + + return 0; +} + +static int +ixgbe_check_mq_mode(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t nb_rx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; + + if (RTE_ETH_DEV_SRIOV(dev).active != 0) { + /* check multi-queue mode */ + switch (dev_conf->rxmode.mq_mode) { + case ETH_MQ_RX_VMDQ_DCB: + case ETH_MQ_RX_VMDQ_DCB_RSS: + /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ + PMD_INIT_LOG(ERR, "SRIOV active," + " unsupported mq_mode rx %d.", + dev_conf->rxmode.mq_mode); + return -EINVAL; + case ETH_MQ_RX_RSS: + case ETH_MQ_RX_VMDQ_RSS: + dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; + if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) + if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { + PMD_INIT_LOG(ERR, "SRIOV is active," + " invalid queue number" + " for VMDQ RSS, allowed" + " value are 1, 2 or 4."); + return -EINVAL; + } + break; + case ETH_MQ_RX_VMDQ_ONLY: + case ETH_MQ_RX_NONE: + /* if nothing mq mode configure, use default scheme */ + dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; + if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1) + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; + break; + default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ + /* SRIOV only works in VMDq enable mode */ + PMD_INIT_LOG(ERR, "SRIOV is active," + " wrong mq_mode rx %d.", + dev_conf->rxmode.mq_mode); + return -EINVAL; + } + + switch (dev_conf->txmode.mq_mode) { + case ETH_MQ_TX_VMDQ_DCB: + /* DCB VMDQ in SRIOV mode, not implement yet */ + PMD_INIT_LOG(ERR, "SRIOV is active," + " unsupported VMDQ mq_mode tx %d.", + dev_conf->txmode.mq_mode); + return -EINVAL; + default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ + dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; + break; + } + + /* check valid queue number */ + if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || + (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { + PMD_INIT_LOG(ERR, "SRIOV is active," + " nb_rx_q=%d nb_tx_q=%d queue number" + " must be less than or equal to %d.", + nb_rx_q, nb_tx_q, + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); + return -EINVAL; + } + } else { + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { + PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" + " not supported."); + return -EINVAL; + } + /* check configuration for vmdb+dcb mode */ + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { + const struct rte_eth_vmdq_dcb_conf *conf; + + if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { + PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", + IXGBE_VMDQ_DCB_NB_QUEUES); + return -EINVAL; + } + conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; + if (!(conf->nb_queue_pools == ETH_16_POOLS || + conf->nb_queue_pools == ETH_32_POOLS)) { + PMD_INIT_LOG(ERR, "VMDQ+DCB selected," + " nb_queue_pools must be %d or %d.", + ETH_16_POOLS, ETH_32_POOLS); + return -EINVAL; + } + } + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { + const struct rte_eth_vmdq_dcb_tx_conf *conf; + + if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { + PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", + IXGBE_VMDQ_DCB_NB_QUEUES); + return -EINVAL; + } + conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; + if (!(conf->nb_queue_pools == ETH_16_POOLS || + conf->nb_queue_pools == ETH_32_POOLS)) { + PMD_INIT_LOG(ERR, "VMDQ+DCB selected," + " nb_queue_pools != %d and" + " nb_queue_pools != %d.", + ETH_16_POOLS, ETH_32_POOLS); + return -EINVAL; + } + } + + /* For DCB mode check our configuration before we go further */ + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { + const struct rte_eth_dcb_rx_conf *conf; + + if (nb_rx_q != IXGBE_DCB_NB_QUEUES) { + PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.", + IXGBE_DCB_NB_QUEUES); + return -EINVAL; + } + conf = &dev_conf->rx_adv_conf.dcb_rx_conf; + if (!(conf->nb_tcs == ETH_4_TCS || + conf->nb_tcs == ETH_8_TCS)) { + PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" + " and nb_tcs != %d.", + ETH_4_TCS, ETH_8_TCS); + return -EINVAL; + } + } + + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { + const struct rte_eth_dcb_tx_conf *conf; + + if (nb_tx_q != IXGBE_DCB_NB_QUEUES) { + PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.", + IXGBE_DCB_NB_QUEUES); + return -EINVAL; + } + conf = &dev_conf->tx_adv_conf.dcb_tx_conf; + if (!(conf->nb_tcs == ETH_4_TCS || + conf->nb_tcs == ETH_8_TCS)) { + PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" + " and nb_tcs != %d.", + ETH_4_TCS, ETH_8_TCS); + return -EINVAL; + } + } + + /* + * When DCB/VT is off, maximum number of queues changes, + * except for 82598EB, which remains constant. + */ + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && + hw->mac.type != ixgbe_mac_82598EB) { + if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { + PMD_INIT_LOG(ERR, + "Neither VT nor DCB are enabled, " + "nb_tx_q > %d.", + IXGBE_NONE_MODE_TX_NB_QUEUES); + return -EINVAL; + } + } + } + return 0; +} + +static int +ixgbe_dev_configure(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + int ret; + + PMD_INIT_FUNC_TRACE(); + /* multipe queue mode checking */ + ret = ixgbe_check_mq_mode(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", + ret); + return ret; + } + + /* set flag to update link status after init */ + intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + + /* + * Initialize to TRUE. If any of Rx queues doesn't meet the bulk + * allocation or vector Rx preconditions we will reset it. + */ + adapter->rx_bulk_alloc_allowed = true; + adapter->rx_vec_allowed = true; + + return 0; +} + +static void +ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + uint32_t gpie; + + /* only set up it on X550EM_X */ + if (hw->mac.type == ixgbe_mac_X550EM_x) { + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie |= IXGBE_SDP0_GPIEN_X550EM_x; + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + if (hw->phy.type == ixgbe_phy_x550em_ext_t) + intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; + } +} + +/* + * Configure device link speed and setup link. + * It returns 0 on success. + */ +static int +ixgbe_dev_start(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + uint32_t intr_vector = 0; + int err, link_up = 0, negotiate = 0; + uint32_t speed = 0; + int mask = 0; + int status; + uint16_t vf, idx; + uint32_t *link_speeds; + + PMD_INIT_FUNC_TRACE(); + + /* IXGBE devices don't support: + * - half duplex (checked afterwards for valid speeds) + * - fixed speed: TODO implement + */ + if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { + PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported", + dev->data->port_id); + return -EINVAL; + } + + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + + /* stop adapter */ + hw->adapter_stopped = 0; + ixgbe_stop_adapter(hw); + + /* reinitialize adapter + * this calls reset and start */ + status = ixgbe_pf_reset_hw(hw); + if (status != 0) + return -1; + hw->mac.ops.start_hw(hw); + hw->mac.get_link_status = true; + + /* configure PF module if SRIOV enabled */ + ixgbe_pf_host_configure(dev); + + ixgbe_dev_phy_intr_setup(dev); + + /* check and configure queue intr-vector mapping */ + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) && + dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { + PMD_INIT_LOG(ERR, "At most %d intr queues supported", + IXGBE_MAX_INTR_QUEUE_NUM); + return -ENOTSUP; + } + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec\n", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + /* confiugre msix for sleep until rx interrupt */ + ixgbe_configure_msix(dev); + + /* initialize transmission unit */ + ixgbe_dev_tx_init(dev); + + /* This can fail when allocating mbufs for descriptor rings */ + err = ixgbe_dev_rx_init(dev); + if (err) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); + goto error; + } + + err = ixgbe_dev_rxtx_start(dev); + if (err < 0) { + PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); + goto error; + } + + /* Skip link setup if loopback mode is enabled for 82599. */ + if (hw->mac.type == ixgbe_mac_82599EB && + dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) + goto skip_link_setup; + + if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { + err = hw->mac.ops.setup_sfp(hw); + if (err) + goto error; + } + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { + /* Turn on the copper */ + ixgbe_set_phy_power(hw, true); + } else { + /* Turn on the laser */ + ixgbe_enable_tx_laser(hw); + } + + err = ixgbe_check_link(hw, &speed, &link_up, 0); + if (err) + goto error; + dev->data->dev_link.link_status = link_up; + + err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); + if (err) + goto error; + + link_speeds = &dev->data->dev_conf.link_speeds; + if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G)) { + PMD_INIT_LOG(ERR, "Invalid link setting"); + goto error; + } + + speed = 0x0; + if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { + speed = (hw->mac.type != ixgbe_mac_82598EB) ? + IXGBE_LINK_SPEED_82599_AUTONEG : + IXGBE_LINK_SPEED_82598_AUTONEG; + } else { + if (*link_speeds & ETH_LINK_SPEED_10G) + speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_1G) + speed |= IXGBE_LINK_SPEED_1GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_100M) + speed |= IXGBE_LINK_SPEED_100_FULL; + } + + err = ixgbe_setup_link(hw, speed, link_up); + if (err) + goto error; + +skip_link_setup: + + if (rte_intr_allow_others(intr_handle)) { + /* check if lsc interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.lsc != 0) + ixgbe_dev_lsc_interrupt_setup(dev); + } else { + rte_intr_callback_unregister(intr_handle, + ixgbe_dev_interrupt_handler, + (void *)dev); + if (dev->data->dev_conf.intr_conf.lsc != 0) + PMD_INIT_LOG(INFO, "lsc won't enable because of" + " no intr multiplex\n"); + } + + /* check if rxq interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.rxq != 0 && + rte_intr_dp_is_en(intr_handle)) + ixgbe_dev_rxq_interrupt_setup(dev); + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); + + /* resume enabled intr since hw reset */ + ixgbe_enable_intr(dev); + + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ + ETH_VLAN_EXTEND_MASK; + ixgbe_vlan_offload_set(dev, mask); + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + /* Enable vlan filtering for VMDq */ + ixgbe_vmdq_vlan_hw_filter_enable(dev); + } + + /* Configure DCB hw */ + ixgbe_configure_dcb(dev); + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { + err = ixgbe_fdir_configure(dev); + if (err) + goto error; + } + + /* Restore vf rate limit */ + if (vfinfo != NULL) { + for (vf = 0; vf < dev->pci_dev->max_vfs; vf++) + for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) + if (vfinfo[vf].tx_rate[idx] != 0) + ixgbe_set_vf_rate_limit(dev, vf, + vfinfo[vf].tx_rate[idx], + 1 << idx); + } + + ixgbe_restore_statistics_mapping(dev); + + return 0; + +error: + PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); + ixgbe_dev_clear_queues(dev); + return -EIO; +} + +/* + * Stop device: disable rx and tx functions to allow for reconfiguring. + */ +static void +ixgbe_dev_stop(struct rte_eth_dev *dev) +{ + struct rte_eth_link link; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + int vf; + + PMD_INIT_FUNC_TRACE(); + + /* disable interrupts */ + ixgbe_disable_intr(hw); + + /* reset the NIC */ + ixgbe_pf_reset_hw(hw); + hw->adapter_stopped = 0; + + /* stop adapter */ + ixgbe_stop_adapter(hw); + + for (vf = 0; vfinfo != NULL && + vf < dev->pci_dev->max_vfs; vf++) + vfinfo[vf].clear_to_send = false; + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { + /* Turn off the copper */ + ixgbe_set_phy_power(hw, false); + } else { + /* Turn off the laser */ + ixgbe_disable_tx_laser(hw); + } + + ixgbe_dev_clear_queues(dev); + + /* Clear stored conf */ + dev->data->scattered_rx = 0; + dev->data->lro = 0; + + /* Clear recorded link status */ + memset(&link, 0, sizeof(link)); + rte_ixgbe_dev_atomic_write_link_status(dev, &link); + + /* Remove all ntuple filters of the device */ + for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list); + p_5tuple != NULL; p_5tuple = p_5tuple_next) { + p_5tuple_next = TAILQ_NEXT(p_5tuple, entries); + TAILQ_REMOVE(&filter_info->fivetuple_list, + p_5tuple, entries); + rte_free(p_5tuple); + } + memset(filter_info->fivetuple_mask, 0, + sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); + + if (!rte_intr_allow_others(intr_handle)) + /* resume to the default handler */ + rte_intr_callback_register(intr_handle, + ixgbe_dev_interrupt_handler, + (void *)dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + +/* + * Set device link up: enable tx. + */ +static int +ixgbe_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (hw->mac.type == ixgbe_mac_82599EB) { +#ifdef RTE_NIC_BYPASS + if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + /* Not suported in bypass mode */ + PMD_INIT_LOG(ERR, "Set link up is not supported " + "by device id 0x%x", hw->device_id); + return -ENOTSUP; + } +#endif + } + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { + /* Turn on the copper */ + ixgbe_set_phy_power(hw, true); + } else { + /* Turn on the laser */ + ixgbe_enable_tx_laser(hw); + } + + return 0; +} + +/* + * Set device link down: disable tx. + */ +static int +ixgbe_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (hw->mac.type == ixgbe_mac_82599EB) { +#ifdef RTE_NIC_BYPASS + if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + /* Not suported in bypass mode */ + PMD_INIT_LOG(ERR, "Set link down is not supported " + "by device id 0x%x", hw->device_id); + return -ENOTSUP; + } +#endif + } + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { + /* Turn off the copper */ + ixgbe_set_phy_power(hw, false); + } else { + /* Turn off the laser */ + ixgbe_disable_tx_laser(hw); + } + + return 0; +} + +/* + * Reest and stop device. + */ +static void +ixgbe_dev_close(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + ixgbe_pf_reset_hw(hw); + + ixgbe_dev_stop(dev); + hw->adapter_stopped = 1; + + ixgbe_dev_free_queues(dev); + + ixgbe_disable_pcie_master(hw); + + /* reprogram the RAR[0] in case user changed it. */ + ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); +} + +static void +ixgbe_read_stats_registers(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *hw_stats, + uint64_t *total_missed_rx, uint64_t *total_qbrc, + uint64_t *total_qprc, uint64_t *total_qprdc) +{ + uint32_t bprc, lxon, lxoff, total; + uint32_t delta_gprc = 0; + unsigned i; + /* Workaround for RX byte count not including CRC bytes when CRC ++ * strip is enabled. CRC bytes are removed from counters when crc_strip + * is disabled. ++ */ + int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & + IXGBE_HLREG0_RXCRCSTRP); + + hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); + hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); + hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); + hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); + + for (i = 0; i < 8; i++) { + uint32_t mp; + mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + /* global total per queue */ + hw_stats->mpc[i] += mp; + /* Running comprehensive total for stats display */ + *total_missed_rx += hw_stats->mpc[i]; + if (hw->mac.type == ixgbe_mac_82598EB) { + hw_stats->rnbc[i] += + IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + hw_stats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + hw_stats->pxoffrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + } else { + hw_stats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + hw_stats->pxoffrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + hw_stats->pxon2offc[i] += + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); + } + hw_stats->pxontxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + hw_stats->pxofftxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + } + for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { + uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + + delta_gprc += delta_qprc; + + hw_stats->qprc[i] += delta_qprc; + hw_stats->qptc[i] += delta_qptc; + + hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); + hw_stats->qbrc[i] += + ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); + if (crc_strip == 0) + hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN; + + hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); + hw_stats->qbtc[i] += + ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); + + hw_stats->qprdc[i] += delta_qprdc; + *total_qprdc += hw_stats->qprdc[i]; + + *total_qprc += hw_stats->qprc[i]; + *total_qbrc += hw_stats->qbrc[i]; + } + hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); + hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); + hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); + + /* + * An errata states that gprc actually counts good + missed packets: + * Workaround to set gprc to summated queue packet receives + */ + hw_stats->gprc = *total_qprc; + + if (hw->mac.type != ixgbe_mac_82598EB) { + hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); + hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); + hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); + hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); + hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); + hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); + hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } else { + hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); + hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + /* 82598 only has a counter in the high register */ + hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); + hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); + hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); + } + uint64_t old_tpr = hw_stats->tpr; + + hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); + hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); + + if (crc_strip == 0) + hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN; + + uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); + hw_stats->gptc += delta_gptc; + hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN; + hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN; + + /* + * Workaround: mprc hardware is incorrectly counting + * broadcasts, so for now we subtract those. + */ + bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); + hw_stats->bprc += bprc; + hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); + if (hw->mac.type == ixgbe_mac_82598EB) + hw_stats->mprc -= bprc; + + hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); + hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); + hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); + hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); + hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); + hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); + + lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); + hw_stats->lxontxc += lxon; + lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + hw_stats->lxofftxc += lxoff; + total = lxon + lxoff; + + hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); + hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); + hw_stats->gptc -= total; + hw_stats->mptc -= total; + hw_stats->ptc64 -= total; + hw_stats->gotc -= total * ETHER_MIN_LEN; + + hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); + hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); + hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); + hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); + hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); + hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); + hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); + hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); + hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); + hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); + hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); + hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); + hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); + hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); + hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); + /* Only read FCOE on 82599 */ + if (hw->mac.type != ixgbe_mac_82598EB) { + hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); + hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); + hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); + hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); + hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); + } + + /* Flow Director Stats registers */ + hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); +} + +/* + * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c + */ +static void +ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_stats *hw_stats = + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; + unsigned i; + + total_missed_rx = 0; + total_qbrc = 0; + total_qprc = 0; + total_qprdc = 0; + + ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc, + &total_qprc, &total_qprdc); + + if (stats == NULL) + return; + + /* Fill out the rte_eth_stats statistics structure */ + stats->ipackets = total_qprc; + stats->ibytes = total_qbrc; + stats->opackets = hw_stats->gptc; + stats->obytes = hw_stats->gotc; + + for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { + stats->q_ipackets[i] = hw_stats->qprc[i]; + stats->q_opackets[i] = hw_stats->qptc[i]; + stats->q_ibytes[i] = hw_stats->qbrc[i]; + stats->q_obytes[i] = hw_stats->qbtc[i]; + stats->q_errors[i] = hw_stats->qprdc[i]; + } + + /* Rx Errors */ + stats->imissed = total_missed_rx; + stats->ierrors = hw_stats->crcerrs + + hw_stats->mspdc + + hw_stats->rlec + + hw_stats->ruc + + hw_stats->roc + + hw_stats->illerrc + + hw_stats->errbc + + hw_stats->rfc + + hw_stats->fccrc + + hw_stats->fclast; + + /* Tx Errors */ + stats->oerrors = 0; +} + +static void +ixgbe_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct ixgbe_hw_stats *stats = + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* HW registers are cleared on read */ + ixgbe_dev_stats_get(dev, NULL); + + /* Reset software totals */ + memset(stats, 0, sizeof(*stats)); +} + +/* This function calculates the number of xstats based on the current config */ +static unsigned +ixgbe_xstats_calc_num(void) { + return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) + + (IXGBE_NB_TXQ_PRIO_STATS * 8); +} + +static int +ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, + unsigned n) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_stats *hw_stats = + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; + unsigned i, stat, count = 0; + + count = ixgbe_xstats_calc_num(); + + if (n < count) + return count; + + total_missed_rx = 0; + total_qbrc = 0; + total_qprc = 0; + total_qprdc = 0; + + ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc, + &total_qprc, &total_qprdc); + + /* If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + if (!xstats) + return 0; + + /* Extended stats from ixgbe_hw_stats */ + count = 0; + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + snprintf(xstats[count].name, sizeof(xstats[count].name), "%s", + rte_ixgbe_stats_strings[i].name); + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_stats_strings[i].offset); + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < 8; i++) { + snprintf(xstats[count].name, sizeof(xstats[count].name), + "rx_priority%u_%s", i, + rte_ixgbe_rxq_strings[stat].name); + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_rxq_strings[stat].offset + + (sizeof(uint64_t) * i)); + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < 8; i++) { + snprintf(xstats[count].name, sizeof(xstats[count].name), + "tx_priority%u_%s", i, + rte_ixgbe_txq_strings[stat].name); + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_txq_strings[stat].offset + + (sizeof(uint64_t) * i)); + count++; + } + } + + return count; +} + +static void +ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) +{ + struct ixgbe_hw_stats *stats = + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + unsigned count = ixgbe_xstats_calc_num(); + + /* HW registers are cleared on read */ + ixgbe_dev_xstats_get(dev, NULL, count); + + /* Reset software totals */ + memset(stats, 0, sizeof(*stats)); +} + +static void +ixgbevf_update_stats(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* Good Rx packet, include VF loopback */ + UPDATE_VF_STAT(IXGBE_VFGPRC, + hw_stats->last_vfgprc, hw_stats->vfgprc); + + /* Good Rx octets, include VF loopback */ + UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, + hw_stats->last_vfgorc, hw_stats->vfgorc); + + /* Good Tx packet, include VF loopback */ + UPDATE_VF_STAT(IXGBE_VFGPTC, + hw_stats->last_vfgptc, hw_stats->vfgptc); + + /* Good Tx octets, include VF loopback */ + UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, + hw_stats->last_vfgotc, hw_stats->vfgotc); + + /* Rx Multicst Packet */ + UPDATE_VF_STAT(IXGBE_VFMPRC, + hw_stats->last_vfmprc, hw_stats->vfmprc); +} + +static int +ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, + unsigned n) +{ + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + unsigned i; + + if (n < IXGBEVF_NB_XSTATS) + return IXGBEVF_NB_XSTATS; + + ixgbevf_update_stats(dev); + + if (!xstats) + return 0; + + /* Extended stats */ + for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { + snprintf(xstats[i].name, sizeof(xstats[i].name), + "%s", rte_ixgbevf_stats_strings[i].name); + xstats[i].value = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbevf_stats_strings[i].offset); + } + + return IXGBEVF_NB_XSTATS; +} + +static void +ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + ixgbevf_update_stats(dev); + + if (stats == NULL) + return; + + stats->ipackets = hw_stats->vfgprc; + stats->ibytes = hw_stats->vfgorc; + stats->opackets = hw_stats->vfgptc; + stats->obytes = hw_stats->vfgotc; + stats->imcasts = hw_stats->vfmprc; + /* stats->imcasts should be removed as imcasts is deprecated */ +} + +static void +ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* Sync HW register to the last stats */ + ixgbevf_dev_stats_get(dev, NULL); + + /* reset HW current stats*/ + hw_stats->vfgprc = 0; + hw_stats->vfgorc = 0; + hw_stats->vfgptc = 0; + hw_stats->vfgotc = 0; + hw_stats->vfmprc = 0; + +} + +static void +ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + + dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * When DCB/VT is off, maximum number of queues changes, + * except for 82598EB, which remains constant. + */ + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && + hw->mac.type != ixgbe_mac_82598EB) + dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; + } + dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ + dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ + dev_info->max_mac_addrs = hw->mac.num_rar_entries; + dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; + dev_info->max_vfs = dev->pci_dev->max_vfs; + if (hw->mac.type == ixgbe_mac_82598EB) + dev_info->max_vmdq_pools = ETH_16_POOLS; + else + dev_info->max_vmdq_pools = ETH_64_POOLS; + dev_info->vmdq_queue_num = dev_info->max_rx_queues; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + + /* + * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV + * mode. + */ + if ((hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) && + !RTE_ETH_DEV_SRIOV(dev).active) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = IXGBE_DEFAULT_RX_PTHRESH, + .hthresh = IXGBE_DEFAULT_RX_HTHRESH, + .wthresh = IXGBE_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IXGBE_DEFAULT_TX_PTHRESH, + .hthresh = IXGBE_DEFAULT_TX_HTHRESH, + .wthresh = IXGBE_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, + .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOOFFLOADS, + }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + + dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); + dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); + dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; + + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; + if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X540_vf || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550_vf) { + dev_info->speed_capa |= ETH_LINK_SPEED_100M; + } +} + +static const uint32_t * +ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* For non-vec functions, + * refers to ixgbe_rxd_pkt_info_to_pkt_type(); + * for vec functions, + * refers to _recv_raw_pkts_vec(). + */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_IP, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV6_EXT, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == ixgbe_recv_pkts || + dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || + dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || + dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) + return ptypes; + return NULL; +} + +static void +ixgbevf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ + dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */ + dev_info->max_mac_addrs = hw->mac.num_rar_entries; + dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; + dev_info->max_vfs = dev->pci_dev->max_vfs; + if (hw->mac.type == ixgbe_mac_82598EB) + dev_info->max_vmdq_pools = ETH_16_POOLS; + else + dev_info->max_vmdq_pools = ETH_64_POOLS; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = IXGBE_DEFAULT_RX_PTHRESH, + .hthresh = IXGBE_DEFAULT_RX_HTHRESH, + .wthresh = IXGBE_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IXGBE_DEFAULT_TX_PTHRESH, + .hthresh = IXGBE_DEFAULT_TX_HTHRESH, + .wthresh = IXGBE_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, + .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOOFFLOADS, + }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; +} + +/* return 0 means link status changed, -1 means not changed */ +static int +ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link, old; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + int link_up; + int diag; + + link.link_status = ETH_LINK_DOWN; + link.link_speed = 0; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + memset(&old, 0, sizeof(old)); + rte_ixgbe_dev_atomic_read_link_status(dev, &old); + + hw->mac.get_link_status = true; + + /* check if it needs to wait to complete, if lsc interrupt is enabled */ + if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) + diag = ixgbe_check_link(hw, &link_speed, &link_up, 0); + else + diag = ixgbe_check_link(hw, &link_speed, &link_up, 1); + + if (diag != 0) { + link.link_speed = ETH_SPEED_NUM_100M; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + rte_ixgbe_dev_atomic_write_link_status(dev, &link); + if (link.link_status == old.link_status) + return -1; + return 0; + } + + if (link_up == 0) { + rte_ixgbe_dev_atomic_write_link_status(dev, &link); + if (link.link_status == old.link_status) + return -1; + return 0; + } + link.link_status = ETH_LINK_UP; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + + switch (link_speed) { + default: + case IXGBE_LINK_SPEED_UNKNOWN: + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = ETH_SPEED_NUM_100M; + break; + + case IXGBE_LINK_SPEED_100_FULL: + link.link_speed = ETH_SPEED_NUM_100M; + break; + + case IXGBE_LINK_SPEED_1GB_FULL: + link.link_speed = ETH_SPEED_NUM_1G; + break; + + case IXGBE_LINK_SPEED_10GB_FULL: + link.link_speed = ETH_SPEED_NUM_10G; + break; + } + rte_ixgbe_dev_atomic_write_link_status(dev, &link); + + if (link.link_status == old.link_status) + return -1; + + return 0; +} + +static void +ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fctrl; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); +} + +static void +ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fctrl; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl &= (~IXGBE_FCTRL_UPE); + if (dev->data->all_multicast == 1) + fctrl |= IXGBE_FCTRL_MPE; + else + fctrl &= (~IXGBE_FCTRL_MPE); + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); +} + +static void +ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fctrl; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_MPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); +} + +static void +ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fctrl; + + if (dev->data->promiscuous == 1) + return; /* must remain in all_multicast mode */ + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl &= (~IXGBE_FCTRL_MPE); + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); +} + +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + ixgbe_dev_link_status_print(dev); + intr->mask |= IXGBE_EICR_LSC; + + return 0; +} + +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + intr->mask |= IXGBE_EICR_RTX_QUEUE; + + return 0; +} + +/* + * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t eicr; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + /* clear all cause mask */ + ixgbe_disable_intr(hw); + + /* read-on-clear nic registers here */ + eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + PMD_DRV_LOG(DEBUG, "eicr %x", eicr); + + intr->flags = 0; + + /* set flag for async link update */ + if (eicr & IXGBE_EICR_LSC) + intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + + if (eicr & IXGBE_EICR_MAILBOX) + intr->flags |= IXGBE_FLAG_MAILBOX; + + if (hw->mac.type == ixgbe_mac_X550EM_x && + hw->phy.type == ixgbe_phy_x550em_ext_t && + (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) + intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; + + return 0; +} + +/** + * It gets and then prints the link status. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static void +ixgbe_dev_link_status_print(struct rte_eth_dev *dev) +{ + struct rte_eth_link link; + + memset(&link, 0, sizeof(link)); + rte_ixgbe_dev_atomic_read_link_status(dev, &link); + if (link.link_status) { + PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", + (int)(dev->data->port_id), + (unsigned)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + } else { + PMD_INIT_LOG(INFO, " Port %d: Link Down", + (int)(dev->data->port_id)); + } + PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", + dev->pci_dev->addr.domain, + dev->pci_dev->addr.bus, + dev->pci_dev->addr.devid, + dev->pci_dev->addr.function); +} + +/* + * It executes link_update after knowing an interrupt occurred. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + int64_t timeout; + struct rte_eth_link link; + int intr_enable_delay = false; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); + + if (intr->flags & IXGBE_FLAG_MAILBOX) { + ixgbe_pf_mbx_process(dev); + intr->flags &= ~IXGBE_FLAG_MAILBOX; + } + + if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { + ixgbe_handle_lasi(hw); + intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; + } + + if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { + /* get the link status before link update, for predicting later */ + memset(&link, 0, sizeof(link)); + rte_ixgbe_dev_atomic_read_link_status(dev, &link); + + ixgbe_dev_link_update(dev, 0); + + /* likely to up */ + if (!link.link_status) + /* handle it 1 sec later, wait it being stable */ + timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; + /* likely to down */ + else + /* handle it 4 sec later, wait it being stable */ + timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; + + ixgbe_dev_link_status_print(dev); + + intr_enable_delay = true; + } + + if (intr_enable_delay) { + if (rte_eal_alarm_set(timeout * 1000, + ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0) + PMD_DRV_LOG(ERR, "Error setting alarm"); + } else { + PMD_DRV_LOG(DEBUG, "enable intr immediately"); + ixgbe_enable_intr(dev); + rte_intr_enable(&(dev->pci_dev->intr_handle)); + } + + + return 0; +} + +/** + * Interrupt handler which shall be registered for alarm callback for delayed + * handling specific interrupt to wait for the stable nic state. As the + * NIC interrupt state is not stable for ixgbe after link is just down, + * it needs to wait 4 seconds to get the stable status. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +ixgbe_dev_interrupt_delayed_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t eicr; + + eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + if (eicr & IXGBE_EICR_MAILBOX) + ixgbe_pf_mbx_process(dev); + + if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { + ixgbe_handle_lasi(hw); + intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; + } + + if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { + ixgbe_dev_link_update(dev, 0); + intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + ixgbe_dev_link_status_print(dev); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); + } + + PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); + ixgbe_enable_intr(dev); + rte_intr_enable(&(dev->pci_dev->intr_handle)); +} + +/** + * Interrupt handler triggered by NIC for handling + * specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + ixgbe_dev_interrupt_get_status(dev); + ixgbe_dev_interrupt_action(dev); +} + +static int +ixgbe_dev_led_on(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; +} + +static int +ixgbe_dev_led_off(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; +} + +static int +ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct ixgbe_hw *hw; + uint32_t mflcn_reg; + uint32_t fccfg_reg; + int rx_pause; + int tx_pause; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + fc_conf->pause_time = hw->fc.pause_time; + fc_conf->high_water = hw->fc.high_water[0]; + fc_conf->low_water = hw->fc.low_water[0]; + fc_conf->send_xon = hw->fc.send_xon; + fc_conf->autoneg = !hw->fc.disable_fc_autoneg; + + /* + * Return rx_pause status according to actual setting of + * MFLCN register. + */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) + rx_pause = 1; + else + rx_pause = 0; + + /* + * Return tx_pause status according to actual setting of + * FCCFG register. + */ + fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) + tx_pause = 1; + else + tx_pause = 0; + + if (rx_pause && tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + + return 0; +} + +static int +ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct ixgbe_hw *hw; + int err; + uint32_t rx_buf_size; + uint32_t max_high_water; + uint32_t mflcn; + enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { + ixgbe_fc_none, + ixgbe_fc_rx_pause, + ixgbe_fc_tx_pause, + ixgbe_fc_full + }; + + PMD_INIT_FUNC_TRACE(); + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); + + /* + * At least reserve one Ethernet frame for watermark + * high_water/low_water in kilo bytes for ixgbe + */ + max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; + if ((fc_conf->high_water > max_high_water) || + (fc_conf->high_water < fc_conf->low_water)) { + PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); + PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); + return -EINVAL; + } + + hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; + hw->fc.pause_time = fc_conf->pause_time; + hw->fc.high_water[0] = fc_conf->high_water; + hw->fc.low_water[0] = fc_conf->low_water; + hw->fc.send_xon = fc_conf->send_xon; + hw->fc.disable_fc_autoneg = !fc_conf->autoneg; + + err = ixgbe_fc_enable(hw); + + /* Not negotiated is not an error case */ + if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) { + + /* check if we want to forward MAC frames - driver doesn't have native + * capability to do that, so we'll write the registers ourselves */ + + mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); + + /* set or clear MFLCN.PMCF bit depending on configuration */ + if (fc_conf->mac_ctrl_frame_fwd != 0) + mflcn |= IXGBE_MFLCN_PMCF; + else + mflcn &= ~IXGBE_MFLCN_PMCF; + + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); + IXGBE_WRITE_FLUSH(hw); + + return 0; + } + + PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err); + return -EIO; +} + +/** + * ixgbe_pfc_enable_generic - Enable flow control + * @hw: pointer to hardware structure + * @tc_num: traffic class number + * Enable flow control according to the current settings. + */ +static int +ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) +{ + int ret_val = 0; + uint32_t mflcn_reg, fccfg_reg; + uint32_t reg; + uint32_t fcrtl, fcrth; + uint8_t i; + uint8_t nb_rx_en; + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* Low water mark of zero causes XOFF floods */ + if (hw->fc.current_mode & ixgbe_fc_tx_pause) { + /* High/Low water can not be 0 */ + if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { + PMD_INIT_LOG(ERR, "Invalid water mark configuration"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { + PMD_INIT_LOG(ERR, "Invalid water mark configuration"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + /* Negotiate the fc mode to use */ + ixgbe_fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE); + + fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); + + switch (hw->fc.current_mode) { + case ixgbe_fc_none: + /* + * If the count of enabled RX Priority Flow control >1, + * and the TX pause can not be disabled + */ + nb_rx_en = 0; + for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); + if (reg & IXGBE_FCRTH_FCEN) + nb_rx_en++; + } + if (nb_rx_en > 1) + fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= IXGBE_MFLCN_RPFCE; + /* + * If the count of enabled RX Priority Flow control >1, + * and the TX pause can not be disabled + */ + nb_rx_en = 0; + for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); + if (reg & IXGBE_FCRTH_FCEN) + nb_rx_en++; + } + if (nb_rx_en > 1) + fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= IXGBE_MFLCN_RPFCE; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; + break; + default: + PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + + /* Set 802.3x based flow control settings. */ + mflcn_reg |= IXGBE_MFLCN_DPF; + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[tc_num]) { + fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl); + fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN; + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0); + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the maximum FCRTH value. This allows the Tx + * switch to function even under heavy Rx workloads. + */ + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32; + } + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth); + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + +out: + return ret_val; +} + +static int +ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int32_t ret_val = IXGBE_NOT_IMPLEMENTED; + + if (hw->mac.type != ixgbe_mac_82598EB) { + ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num); + } + return ret_val; +} + +static int +ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) +{ + int err; + uint32_t rx_buf_size; + uint32_t max_high_water; + uint8_t tc_num; + uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_dcb_config *dcb_config = + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + + enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { + ixgbe_fc_none, + ixgbe_fc_rx_pause, + ixgbe_fc_tx_pause, + ixgbe_fc_full + }; + + PMD_INIT_FUNC_TRACE(); + + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); + tc_num = map[pfc_conf->priority]; + rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); + /* + * At least reserve one Ethernet frame for watermark + * high_water/low_water in kilo bytes for ixgbe + */ + max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; + if ((pfc_conf->fc.high_water > max_high_water) || + (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { + PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); + PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); + return -EINVAL; + } + + hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; + hw->fc.pause_time = pfc_conf->fc.pause_time; + hw->fc.send_xon = pfc_conf->fc.send_xon; + hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; + hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; + + err = ixgbe_dcb_pfc_enable(dev,tc_num); + + /* Not negotiated is not an error case */ + if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) + return 0; + + PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); + return -EIO; +} + +static int +ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + uint16_t i, sp_reta_size; + uint8_t j, mask; + uint32_t reta, r; + uint16_t idx, shift; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reta_reg; + + PMD_INIT_FUNC_TRACE(); + + if (!ixgbe_rss_update_sp(hw->mac.type)) { + PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " + "NIC."); + return -ENOTSUP; + } + + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + if (reta_size != sp_reta_size) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)\n", reta_size, sp_reta_size); + return -EINVAL; + } + + for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IXGBE_4_BIT_MASK); + if (!mask) + continue; + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + if (mask == IXGBE_4_BIT_MASK) + r = 0; + else + r = IXGBE_READ_REG(hw, reta_reg); + for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta |= reta_conf[idx].reta[shift + j] << + (CHAR_BIT * j); + else + reta |= r & (IXGBE_8_BIT_MASK << + (CHAR_BIT * j)); + } + IXGBE_WRITE_REG(hw, reta_reg, reta); + } + + return 0; +} + +static int +ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + uint16_t i, sp_reta_size; + uint8_t j, mask; + uint32_t reta; + uint16_t idx, shift; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reta_reg; + + PMD_INIT_FUNC_TRACE(); + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + if (reta_size != sp_reta_size) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)\n", reta_size, sp_reta_size); + return -EINVAL; + } + + for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IXGBE_4_BIT_MASK); + if (!mask) + continue; + + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + reta = IXGBE_READ_REG(hw, reta_reg); + for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta_conf[idx].reta[shift + j] = + ((reta >> (CHAR_BIT * j)) & + IXGBE_8_BIT_MASK); + } + } + + return 0; +} + +static void +ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t pool) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t enable_addr = 1; + + ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr); +} + +static void +ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ixgbe_clear_rar(hw, index); +} + +static void +ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) +{ + ixgbe_remove_rar(dev, 0); + + ixgbe_add_rar(dev, addr, 0, 0); +} + +static int +ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + uint32_t hlreg0; + uint32_t maxfrs; + struct ixgbe_hw *hw; + struct rte_eth_dev_info dev_info; + uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + + ixgbe_dev_info_get(dev, &dev_info); + + /* check that mtu is within the allowed range */ + if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + return -EINVAL; + + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. */ + if (!dev->data->scattered_rx && + (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + + /* switch to jumbo mode if needed */ + if (frame_size > ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.jumbo_frame = 1; + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + } else { + dev->data->dev_conf.rxmode.jumbo_frame = 0; + hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; + } + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); + maxfrs &= 0x0000FFFF; + maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16); + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); + + return 0; +} + +/* + * Virtual Function operations + */ +static void +ixgbevf_intr_disable(struct ixgbe_hw *hw) +{ + PMD_INIT_FUNC_TRACE(); + + /* Clear interrupt mask to stop from interrupts being generated */ + IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); + + IXGBE_WRITE_FLUSH(hw); +} + +static void +ixgbevf_intr_enable(struct ixgbe_hw *hw) +{ + PMD_INIT_FUNC_TRACE(); + + /* VF enable interrupt autoclean */ + IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); + IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); + + IXGBE_WRITE_FLUSH(hw); +} + +static int +ixgbevf_dev_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_conf* conf = &dev->data->dev_conf; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + + PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", + dev->data->port_id); + + /* + * VF has no ability to enable/disable HW CRC + * Keep the persistent behavior the same as Host PF + */ +#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC + if (!conf->rxmode.hw_strip_crc) { + PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); + conf->rxmode.hw_strip_crc = 1; + } +#else + if (conf->rxmode.hw_strip_crc) { + PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); + conf->rxmode.hw_strip_crc = 0; + } +#endif + + /* + * Initialize to TRUE. If any of Rx queues doesn't meet the bulk + * allocation or vector Rx preconditions we will reset it. + */ + adapter->rx_bulk_alloc_allowed = true; + adapter->rx_vec_allowed = true; + + return 0; +} + +static int +ixgbevf_dev_start(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t intr_vector = 0; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + + int err, mask = 0; + + PMD_INIT_FUNC_TRACE(); + + hw->mac.ops.reset_hw(hw); + hw->mac.get_link_status = true; + + /* negotiate mailbox API version to use with the PF. */ + ixgbevf_negotiate_api(hw); + + ixgbevf_dev_tx_init(dev); + + /* This can fail when allocating mbufs for descriptor rings */ + err = ixgbevf_dev_rx_init(dev); + if (err) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); + ixgbe_dev_clear_queues(dev); + return err; + } + + /* Set vfta */ + ixgbevf_set_vfta_all(dev,1); + + /* Set HW strip */ + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ + ETH_VLAN_EXTEND_MASK; + ixgbevf_vlan_offload_set(dev, mask); + + ixgbevf_dev_rxtx_start(dev); + + /* check and configure queue intr-vector mapping */ + if (dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec\n", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + ixgbevf_configure_msix(dev); + + rte_intr_enable(intr_handle); + + /* Re-enable interrupt for VF */ + ixgbevf_intr_enable(hw); + + return 0; +} + +static void +ixgbevf_dev_stop(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + + PMD_INIT_FUNC_TRACE(); + + hw->adapter_stopped = 1; + ixgbe_stop_adapter(hw); + + /* + * Clear what we set, but we still keep shadow_vfta to + * restore after device starts + */ + ixgbevf_set_vfta_all(dev,0); + + /* Clear stored conf */ + dev->data->scattered_rx = 0; + + ixgbe_dev_clear_queues(dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + +static void +ixgbevf_dev_close(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + ixgbe_reset_hw(hw); + + ixgbevf_dev_stop(dev); + + ixgbe_dev_free_queues(dev); + + /** + * Remove the VF MAC address ro ensure + * that the VF traffic goes to the PF + * after stop, close and detach of the VF + **/ + ixgbevf_remove_mac_addr(dev, 0); +} + +static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vfta * shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + int i = 0, j = 0, vfta = 0, mask = 1; + + for (i = 0; i < IXGBE_VFTA_SIZE; i++){ + vfta = shadow_vfta->vfta[i]; + if (vfta) { + mask = 1; + for (j = 0; j < 32; j++){ + if (vfta & mask) + ixgbe_set_vfta(hw, (i<<5)+j, 0, on); + mask<<=1; + } + } + } + +} + +static int +ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vfta * shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vid_idx = 0; + uint32_t vid_bit = 0; + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + + /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ + ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to set VF vlan"); + return ret; + } + vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); + vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); + + /* Save what we set and retore it after device reset */ + if (on) + shadow_vfta->vfta[vid_idx] |= vid_bit; + else + shadow_vfta->vfta[vid_idx] &= ~vid_bit; + + return 0; +} + +static void +ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + if (queue >= hw->mac.max_rx_queues) + return; + + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + if (on) + ctrl |= IXGBE_RXDCTL_VME; + else + ctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + + ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on); +} + +static void +ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t i; + int on = 0; + + /* VF function only support hw strip feature, others are not support */ + if (mask & ETH_VLAN_STRIP_MASK) { + on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); + + for (i = 0; i < hw->mac.max_rx_queues; i++) + ixgbevf_vlan_strip_queue_set(dev,i,on); + } +} + +static int +ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) +{ + uint32_t reg_val; + + /* we only need to do this if VMDq is enabled */ + reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { + PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting"); + return -1; + } + + return 0; +} + +static uint32_t +ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) +{ + uint32_t vector = 0; + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((uc_addr->addr_bytes[4] >> 4) | + (((uint16_t)uc_addr->addr_bytes[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((uc_addr->addr_bytes[4] >> 3) | + (((uint16_t)uc_addr->addr_bytes[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((uc_addr->addr_bytes[4] >> 2) | + (((uint16_t)uc_addr->addr_bytes[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((uc_addr->addr_bytes[4]) | + (((uint16_t)uc_addr->addr_bytes[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +static int +ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, + uint8_t on) +{ + uint32_t vector; + uint32_t uta_idx; + uint32_t reg_val; + uint32_t uta_shift; + uint32_t rc; + const uint32_t ixgbe_uta_idx_mask = 0x7F; + const uint32_t ixgbe_uta_bit_shift = 5; + const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; + const uint32_t bit1 = 0x1; + + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_uta_info *uta_info = + IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); + + /* The UTA table only exists on 82599 hardware and newer */ + if (hw->mac.type < ixgbe_mac_82599EB) + return -ENOTSUP; + + vector = ixgbe_uta_vector(hw,mac_addr); + uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; + uta_shift = vector & ixgbe_uta_bit_mask; + + rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); + if (rc == on) + return 0; + + reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); + if (on) { + uta_info->uta_in_use++; + reg_val |= (bit1 << uta_shift); + uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); + } else { + uta_info->uta_in_use--; + reg_val &= ~(bit1 << uta_shift); + uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); + } + + IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); + + if (uta_info->uta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + else + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type); + + return 0; +} + +static int +ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) +{ + int i; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_uta_info *uta_info = + IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); + + /* The UTA table only exists on 82599 hardware and newer */ + if (hw->mac.type < ixgbe_mac_82599EB) + return -ENOTSUP; + + if (on) { + for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { + uta_info->uta_shadow[i] = ~0; + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); + } + } else { + for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { + uta_info->uta_shadow[i] = 0; + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); + } + } + return 0; + +} + +uint32_t +ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) +{ + uint32_t new_val = orig_val; + + if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) + new_val |= IXGBE_VMOLR_AUPE; + if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) + new_val |= IXGBE_VMOLR_ROMPE; + if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) + new_val |= IXGBE_VMOLR_ROPE; + if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) + new_val |= IXGBE_VMOLR_BAM; + if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) + new_val |= IXGBE_VMOLR_MPE; + + return new_val; +} + +static int +ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, + uint16_t rx_mask, uint8_t on) +{ + int val = 0; + + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); + + if (hw->mac.type == ixgbe_mac_82598EB) { + PMD_INIT_LOG(ERR, "setting VF receive mode set should be done" + " on 82599 hardware and newer"); + return -ENOTSUP; + } + if (ixgbe_vmdq_mode_check(hw) < 0) + return -ENOTSUP; + + val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val); + + if (on) + vmolr |= val; + else + vmolr &= ~val; + + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); + + return 0; +} + +static int +ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) +{ + uint32_t reg,addr; + uint32_t val; + const uint8_t bit1 = 0x1; + + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (ixgbe_vmdq_mode_check(hw) < 0) + return -ENOTSUP; + + addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2); + reg = IXGBE_READ_REG(hw, addr); + val = bit1 << pool; + + if (on) + reg |= val; + else + reg &= ~val; + + IXGBE_WRITE_REG(hw, addr,reg); + + return 0; +} + +static int +ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) +{ + uint32_t reg,addr; + uint32_t val; + const uint8_t bit1 = 0x1; + + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (ixgbe_vmdq_mode_check(hw) < 0) + return -ENOTSUP; + + addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2); + reg = IXGBE_READ_REG(hw, addr); + val = bit1 << pool; + + if (on) + reg |= val; + else + reg &= ~val; + + IXGBE_WRITE_REG(hw, addr,reg); + + return 0; +} + +static int +ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, + uint64_t pool_mask, uint8_t vlan_on) +{ + int ret = 0; + uint16_t pool_idx; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (ixgbe_vmdq_mode_check(hw) < 0) + return -ENOTSUP; + for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) { + if (pool_mask & ((uint64_t)(1ULL << pool_idx))) { + ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on); + if (ret < 0) + return ret; + } + } + + return ret; +} + +#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ +#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ +#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ +#define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */ +#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \ + ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \ + ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)) + +static int +ixgbe_mirror_rule_set(struct rte_eth_dev *dev, + struct rte_eth_mirror_conf *mirror_conf, + uint8_t rule_id, uint8_t on) +{ + uint32_t mr_ctl,vlvf; + uint32_t mp_lsb = 0; + uint32_t mv_msb = 0; + uint32_t mv_lsb = 0; + uint32_t mp_msb = 0; + uint8_t i = 0; + int reg_index = 0; + uint64_t vlan_mask = 0; + + const uint8_t pool_mask_offset = 32; + const uint8_t vlan_mask_offset = 32; + const uint8_t dst_pool_offset = 8; + const uint8_t rule_mr_offset = 4; + const uint8_t mirror_rule_mask= 0x0F; + + struct ixgbe_mirror_info *mr_info = + (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t mirror_type = 0; + + if (ixgbe_vmdq_mode_check(hw) < 0) + return -ENOTSUP; + + if (rule_id >= IXGBE_MAX_MIRROR_RULES) + return -EINVAL; + + if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) { + PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.", + mirror_conf->rule_type); + return -EINVAL; + } + + if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { + mirror_type |= IXGBE_MRCTL_VLME; + /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */ + for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) { + if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { + /* search vlan id related pool vlan filter index */ + reg_index = ixgbe_find_vlvf_slot(hw, + mirror_conf->vlan.vlan_id[i]); + if (reg_index < 0) + return -EINVAL; + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index)); + if ((vlvf & IXGBE_VLVF_VIEN) && + ((vlvf & IXGBE_VLVF_VLANID_MASK) == + mirror_conf->vlan.vlan_id[i])) + vlan_mask |= (1ULL << reg_index); + else + return -EINVAL; + } + } + + if (on) { + mv_lsb = vlan_mask & 0xFFFFFFFF; + mv_msb = vlan_mask >> vlan_mask_offset; + + mr_info->mr_conf[rule_id].vlan.vlan_mask = + mirror_conf->vlan.vlan_mask; + for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { + if (mirror_conf->vlan.vlan_mask & (1ULL << i)) + mr_info->mr_conf[rule_id].vlan.vlan_id[i] = + mirror_conf->vlan.vlan_id[i]; + } + } else { + mv_lsb = 0; + mv_msb = 0; + mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; + for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) + mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; + } + } + + /* + * if enable pool mirror, write related pool mask register,if disable + * pool mirror, clear PFMRVM register + */ + if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { + mirror_type |= IXGBE_MRCTL_VPME; + if (on) { + mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; + mp_msb = mirror_conf->pool_mask >> pool_mask_offset; + mr_info->mr_conf[rule_id].pool_mask = + mirror_conf->pool_mask; + + } else { + mp_lsb = 0; + mp_msb = 0; + mr_info->mr_conf[rule_id].pool_mask = 0; + } + } + if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) + mirror_type |= IXGBE_MRCTL_UPME; + if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) + mirror_type |= IXGBE_MRCTL_DPME; + + /* read mirror control register and recalculate it */ + mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id)); + + if (on) { + mr_ctl |= mirror_type; + mr_ctl &= mirror_rule_mask; + mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; + } else + mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask); + + mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type; + mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; + + /* write mirrror control register */ + IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + + /* write pool mirrror control register */ + if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) { + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), + mp_msb); + } + /* write VLAN mirrror control register */ + if (mirror_conf->rule_type == ETH_MIRROR_VLAN) { + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), + mv_msb); + } + + return 0; +} + +static int +ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) +{ + int mr_ctl = 0; + uint32_t lsb_val = 0; + uint32_t msb_val = 0; + const uint8_t rule_mr_offset = 4; + + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_mirror_info *mr_info = + (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); + + if (ixgbe_vmdq_mode_check(hw) < 0) + return -ENOTSUP; + + memset(&mr_info->mr_conf[rule_id], 0, + sizeof(struct rte_eth_mirror_conf)); + + /* clear PFVMCTL register */ + IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + + /* clear pool mask register */ + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); + + /* clear vlan mask register */ + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); + + return 0; +} + +static int +ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + uint32_t mask; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); + mask |= (1 << IXGBE_MISC_VEC_ID); + RTE_SET_USED(queue_id); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + + rte_intr_enable(&dev->pci_dev->intr_handle); + + return 0; +} + +static int +ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + uint32_t mask; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); + mask &= ~(1 << IXGBE_MISC_VEC_ID); + RTE_SET_USED(queue_id); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + + return 0; +} + +static int +ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + uint32_t mask; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (queue_id < 16) { + ixgbe_disable_intr(hw); + intr->mask |= (1 << queue_id); + ixgbe_enable_intr(dev); + } else if (queue_id < 32) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); + mask &= (1 << queue_id); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + } else if (queue_id < 64) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); + mask &= (1 << (queue_id - 32)); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + } + rte_intr_enable(&dev->pci_dev->intr_handle); + + return 0; +} + +static int +ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + uint32_t mask; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (queue_id < 16) { + ixgbe_disable_intr(hw); + intr->mask &= ~(1 << queue_id); + ixgbe_enable_intr(dev); + } else if (queue_id < 32) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); + mask &= ~(1 << queue_id); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + } else if (queue_id < 64) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); + mask &= ~(1 << (queue_id - 32)); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + } + + return 0; +} + +static void +ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector) +{ + uint32_t tmp, idx; + + if (direction == -1) { + /* other causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); + tmp &= ~0xFF; + tmp |= msix_vector; + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); + } else { + /* rx or tx cause */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + idx = ((16 * (queue & 1)) + (8 * direction)); + tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); + tmp &= ~(0xFF << idx); + tmp |= (msix_vector << idx); + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); + } +} + +/** + * set the IVAR registers, mapping interrupt causes to vectors + * @param hw + * pointer to ixgbe_hw struct + * @direction + * 0 for Rx, 1 for Tx, -1 for other causes + * @queue + * queue to map the corresponding interrupt to + * @msix_vector + * the vector to map to the corresponding queue + */ +static void +ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector) +{ + uint32_t tmp, idx; + + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + if (hw->mac.type == ixgbe_mac_82598EB) { + if (direction == -1) + direction = 0; + idx = (((direction * 64) + queue) >> 2) & 0x1F; + tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); + tmp &= ~(0xFF << (8 * (queue & 0x3))); + tmp |= (msix_vector << (8 * (queue & 0x3))); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); + } else if ((hw->mac.type == ixgbe_mac_82599EB) || + (hw->mac.type == ixgbe_mac_X540)) { + if (direction == -1) { + /* other causes */ + idx = ((queue & 1) * 8); + tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); + tmp &= ~(0xFF << idx); + tmp |= (msix_vector << idx); + IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); + } else { + /* rx or tx causes */ + idx = ((16 * (queue & 1)) + (8 * direction)); + tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); + tmp &= ~(0xFF << idx); + tmp |= (msix_vector << idx); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); + } + } +} + +static void +ixgbevf_configure_msix(struct rte_eth_dev *dev) +{ + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t q_idx; + uint32_t vector_idx = IXGBE_MISC_VEC_ID; + + /* won't configure msix register if no mapping is done + * between intr vector and event fd. + */ + if (!rte_intr_dp_is_en(intr_handle)) + return; + + /* Configure all RX queues of VF */ + for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { + /* Force all queue use vector 0, + * as IXGBE_VF_MAXMSIVECOTR = 1 + */ + ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); + intr_handle->intr_vec[q_idx] = vector_idx; + } + + /* Configure VF other cause ivar */ + ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); +} + +/** + * Sets up the hardware to properly generate MSI-X interrupts + * @hw + * board private structure + */ +static void +ixgbe_configure_msix(struct rte_eth_dev *dev) +{ + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t queue_id, base = IXGBE_MISC_VEC_ID; + uint32_t vec = IXGBE_MISC_VEC_ID; + uint32_t mask; + uint32_t gpie; + + /* won't configure msix register if no mapping is done + * between intr vector and event fd + */ + if (!rte_intr_dp_is_en(intr_handle)) + return; + + if (rte_intr_allow_others(intr_handle)) + vec = base = IXGBE_RX_VEC_START; + + /* setup GPIE for MSI-x mode */ + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | + IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; + /* auto clearing and auto setting corresponding bits in EIMS + * when MSI-X interrupt is triggered + */ + if (hw->mac.type == ixgbe_mac_82598EB) { + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + } else { + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); + } + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + + /* Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; + queue_id++) { + /* by default, 1:1 mapping */ + ixgbe_set_ivar_map(hw, 0, queue_id, vec); + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, + IXGBE_MISC_VEC_ID); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); + break; + default: + break; + } + IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), + IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF); + + /* set up to autoclear timer, and the vectors */ + mask = IXGBE_EIMS_ENABLE_MASK; + mask &= ~(IXGBE_EIMS_OTHER | + IXGBE_EIMS_MAILBOX | + IXGBE_EIMS_LSC); + + IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); +} + +static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t tx_rate) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rf_dec, rf_int; + uint32_t bcnrc_val; + uint16_t link_speed = dev->data->dev_link.link_speed; + + if (queue_idx >= hw->mac.max_tx_queues) + return -EINVAL; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; + rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; + rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; + + bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; + bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & + IXGBE_RTTBCNRC_RF_INT_MASK_M); + bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); + } else { + bcnrc_val = 0; + } + + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise + * set as 0x4. + */ + if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) && + (dev->data->dev_conf.rxmode.max_rx_pkt_len >= + IXGBE_MAX_JUMBO_FRAME_SIZE)) + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, + IXGBE_MMW_SIZE_JUMBO_FRAME); + else + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, + IXGBE_MMW_SIZE_DEFAULT); + + /* Set RTTBCNRC of queue X */ + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + uint32_t queue_stride = + IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; + uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx; + uint32_t queue_end = queue_idx + nb_q_per_pool - 1; + uint16_t total_rate = 0; + + if (queue_end >= hw->mac.max_tx_queues) + return -EINVAL; + + if (vfinfo != NULL) { + for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) { + if (vf_idx == vf) + continue; + for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); + idx++) + total_rate += vfinfo[vf_idx].tx_rate[idx]; + } + } else + return -EINVAL; + + /* Store tx_rate for this vf. */ + for (idx = 0; idx < nb_q_per_pool; idx++) { + if (((uint64_t)0x1 << idx) & q_msk) { + if (vfinfo[vf].tx_rate[idx] != tx_rate) + vfinfo[vf].tx_rate[idx] = tx_rate; + total_rate += tx_rate; + } + } + + if (total_rate > dev->data->dev_link.link_speed) { + /* + * Reset stored TX rate of the VF if it causes exceed + * link speed. + */ + memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); + return -EINVAL; + } + + /* Set RTTBCNRC of each queue/pool for vf X */ + for (; queue_idx <= queue_end; queue_idx++) { + if (0x1 & q_msk) + ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); + q_msk = q_msk >> 1; + } + + return 0; +} + +static void +ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + __attribute__((unused)) uint32_t index, + __attribute__((unused)) uint32_t pool) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int diag; + + /* + * On a 82599 VF, adding again the same MAC addr is not an idempotent + * operation. Trap this case to avoid exhausting the [very limited] + * set of PF resources used to store VF MAC addresses. + */ + if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + return; + diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); + if (diag == 0) + return; + PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag); +} + +static void +ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; + struct ether_addr *mac_addr; + uint32_t i; + int diag; + + /* + * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does + * not support the deletion of a given MAC address. + * Instead, it imposes to delete all MAC addresses, then to add again + * all MAC addresses with the exception of the one to be deleted. + */ + (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); + + /* + * Add again all MAC addresses, with the exception of the deleted one + * and of the permanent MAC address. + */ + for (i = 0, mac_addr = dev->data->mac_addrs; + i < hw->mac.num_rar_entries; i++, mac_addr++) { + /* Skip the deleted MAC address */ + if (i == index) + continue; + /* Skip NULL MAC addresses */ + if (is_zero_ether_addr(mac_addr)) + continue; + /* Skip the permanent MAC address */ + if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + continue; + diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); + if (diag != 0) + PMD_DRV_LOG(ERR, + "Adding again MAC address " + "%02x:%02x:%02x:%02x:%02x:%02x failed " + "diag=%d", + mac_addr->addr_bytes[0], + mac_addr->addr_bytes[1], + mac_addr->addr_bytes[2], + mac_addr->addr_bytes[3], + mac_addr->addr_bytes[4], + mac_addr->addr_bytes[5], + diag); + } +} + +static void +ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); +} + +#define MAC_TYPE_FILTER_SUP(type) do {\ + if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\ + (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\ + (type) != ixgbe_mac_X550EM_a)\ + return -ENOTSUP;\ +} while (0) + +static int +ixgbe_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t synqf; + + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + + if (add) { + if (synqf & IXGBE_SYN_FILTER_ENABLE) + return -EINVAL; + synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & + IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); + + if (filter->hig_pri) + synqf |= IXGBE_SYN_FILTER_SYNQFP; + else + synqf &= ~IXGBE_SYN_FILTER_SYNQFP; + } else { + if (!(synqf & IXGBE_SYN_FILTER_ENABLE)) + return -ENOENT; + synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); + } + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); + IXGBE_WRITE_FLUSH(hw); + return 0; +} + +static int +ixgbe_syn_filter_get(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + + if (synqf & IXGBE_SYN_FILTER_ENABLE) { + filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0; + filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1); + return 0; + } + return -ENOENT; +} + +static int +ixgbe_syn_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_syn_filter_get(dev, + (struct rte_eth_syn_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); + ret = -EINVAL; + break; + } + + return ret; +} + + +static inline enum ixgbe_5tuple_protocol +convert_protocol_type(uint8_t protocol_value) +{ + if (protocol_value == IPPROTO_TCP) + return IXGBE_FILTER_PROTOCOL_TCP; + else if (protocol_value == IPPROTO_UDP) + return IXGBE_FILTER_PROTOCOL_UDP; + else if (protocol_value == IPPROTO_SCTP) + return IXGBE_FILTER_PROTOCOL_SCTP; + else + return IXGBE_FILTER_PROTOCOL_NONE; +} + +/* + * add a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * index: the index the filter allocates. + * filter: ponter to the filter that will be added. + * rx_queue: the queue id the filter assigned to. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i, idx, shift; + uint32_t ftqf, sdpqf; + uint32_t l34timir = 0; + uint8_t mask = 0xff; + + /* + * look for an unused 5tuple filter index, + * and insert the filter to list. + */ + for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { + idx = i / (sizeof(uint32_t) * NBBY); + shift = i % (sizeof(uint32_t) * NBBY); + if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { + filter_info->fivetuple_mask[idx] |= 1 << shift; + filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, + filter, + entries); + break; + } + } + if (i >= IXGBE_MAX_FTQF_FILTERS) { + PMD_DRV_LOG(ERR, "5tuple filters are full."); + return -ENOSYS; + } + + sdpqf = (uint32_t)(filter->filter_info.dst_port << + IXGBE_SDPQF_DSTPORT_SHIFT); + sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); + + ftqf = (uint32_t)(filter->filter_info.proto & + IXGBE_FTQF_PROTOCOL_MASK); + ftqf |= (uint32_t)((filter->filter_info.priority & + IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); + if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ + mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; + if (filter->filter_info.dst_ip_mask == 0) + mask &= IXGBE_FTQF_DEST_ADDR_MASK; + if (filter->filter_info.src_port_mask == 0) + mask &= IXGBE_FTQF_SOURCE_PORT_MASK; + if (filter->filter_info.dst_port_mask == 0) + mask &= IXGBE_FTQF_DEST_PORT_MASK; + if (filter->filter_info.proto_mask == 0) + mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; + ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; + ftqf |= IXGBE_FTQF_POOL_MASK_EN; + ftqf |= IXGBE_FTQF_QUEUE_ENABLE; + + IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); + IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); + IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); + IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); + + l34timir |= IXGBE_L34T_IMIR_RESERVE; + l34timir |= (uint32_t)(filter->queue << + IXGBE_L34T_IMIR_QUEUE_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); + return 0; +} + +/* + * remove a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * filter: the pointer of the filter will be removed. + */ +static void +ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint16_t index = filter->index; + + filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= + ~(1 << (index % (sizeof(uint32_t) * NBBY))); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); + rte_free(filter); + + IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); +} + +static int +ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct ixgbe_hw *hw; + uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) + return -EINVAL; + + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. */ + if (!dev->data->scattered_rx && + (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + return -EINVAL; + + /* + * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU + * request of the version 2.0 of the mailbox API. + * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 + * of the mailbox API. + * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers + * prior to 3.11.33 which contains the following change: + * "ixgbe: Enable jumbo frames support w/ SR-IOV" + */ + ixgbevf_rlpml_set_vf(hw, max_frame); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; + return 0; +} + +#define MAC_TYPE_FILTER_SUP_EXT(type) do {\ + if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\ + return -ENOTSUP;\ +} while (0) + +static inline struct ixgbe_5tuple_filter * +ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, + struct ixgbe_5tuple_filter_info *key) +{ + struct ixgbe_5tuple_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct ixgbe_5tuple_filter_info)) == 0) { + return it; + } + } + return NULL; +} + +/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ +static inline int +ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, + struct ixgbe_5tuple_filter_info *filter_info) +{ + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || + filter->priority > IXGBE_5TUPLE_MAX_PRI || + filter->priority < IXGBE_5TUPLE_MIN_PRI) + return -EINVAL; + + switch (filter->dst_ip_mask) { + case UINT32_MAX: + filter_info->dst_ip_mask = 0; + filter_info->dst_ip = filter->dst_ip; + break; + case 0: + filter_info->dst_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_ip mask."); + return -EINVAL; + } + + switch (filter->src_ip_mask) { + case UINT32_MAX: + filter_info->src_ip_mask = 0; + filter_info->src_ip = filter->src_ip; + break; + case 0: + filter_info->src_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_ip mask."); + return -EINVAL; + } + + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->src_port_mask) { + case UINT16_MAX: + filter_info->src_port_mask = 0; + filter_info->src_port = filter->src_port; + break; + case 0: + filter_info->src_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_port mask."); + return -EINVAL; + } + + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = + convert_protocol_type(filter->proto); + break; + case 0: + filter_info->proto_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } + + filter_info->priority = (uint8_t)filter->priority; + return 0; +} + +/* + * add or delete a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * add: if true, add filter, if false, remove filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter, + bool add) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter_info filter_5tuple; + struct ixgbe_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); + return -EINVAL; + } + + memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; + + filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter != NULL && add) { + PMD_DRV_LOG(ERR, "filter exists."); + return -EEXIST; + } + if (filter == NULL && !add) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + + if (add) { + filter = rte_zmalloc("ixgbe_5tuple_filter", + sizeof(struct ixgbe_5tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; + (void)rte_memcpy(&filter->filter_info, + &filter_5tuple, + sizeof(struct ixgbe_5tuple_filter_info)); + filter->queue = ntuple_filter->queue; + ret = ixgbe_add_5tuple_filter(dev, filter); + if (ret < 0) { + rte_free(filter); + return ret; + } + } else + ixgbe_remove_5tuple_filter(dev, filter); + + return 0; +} + +/* + * get a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter_info filter_5tuple; + struct ixgbe_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); + return -EINVAL; + } + + memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; + + filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + ntuple_filter->queue = filter->queue; + return 0; +} + +/* + * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_get_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static inline int +ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, + uint16_t ethertype) +{ + int i; + + for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_filters[i] == ethertype && + (filter_info->ethertype_mask & (1 << i))) + return i; + } + return -1; +} + +static inline int +ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info, + uint16_t ethertype) +{ + int i; + + for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { + if (!(filter_info->ethertype_mask & (1 << i))) { + filter_info->ethertype_mask |= 1 << i; + filter_info->ethertype_filters[i] = ethertype; + return i; + } + } + return -1; +} + +static inline int +ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info, + uint8_t idx) +{ + if (idx >= IXGBE_MAX_ETQF_FILTERS) + return -1; + filter_info->ethertype_mask &= ~(1 << idx); + filter_info->ethertype_filters[idx] = 0; + return idx; +} + +static int +ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf = 0; + uint32_t etqs = 0; + int ret; + + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + if (filter->ether_type == ETHER_TYPE_IPv4 || + filter->ether_type == ETHER_TYPE_IPv6) { + PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" + " ethertype filter.", filter->ether_type); + return -EINVAL; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + PMD_DRV_LOG(ERR, "mac compare is unsupported."); + return -EINVAL; + } + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + PMD_DRV_LOG(ERR, "drop option is unsupported."); + return -EINVAL; + } + + ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret >= 0 && add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", + filter->ether_type); + return -EEXIST; + } + if (ret < 0 && !add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + if (add) { + ret = ixgbe_ethertype_filter_insert(filter_info, + filter->ether_type); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype filters are full."); + return -ENOSYS; + } + etqf = IXGBE_ETQF_FILTER_EN; + etqf |= (uint32_t)filter->ether_type; + etqs |= (uint32_t)((filter->queue << + IXGBE_ETQS_RX_QUEUE_SHIFT) & + IXGBE_ETQS_RX_QUEUE); + etqs |= IXGBE_ETQS_QUEUE_EN; + } else { + ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); + if (ret < 0) + return -ENOSYS; + } + IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +static int +ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf, etqs; + int ret; + + ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret)); + if (etqf & IXGBE_ETQF_FILTER_EN) { + etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret)); + filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE; + filter->flags = 0; + filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> + IXGBE_ETQS_RX_QUEUE_SHIFT; + return 0; + } + return -ENOENT; +} + +/* + * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +static int +ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_get_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = -EINVAL; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_SYN: + ret = ixgbe_syn_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_FDIR: + ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_L2_TUNNEL: + ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg); + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + break; + } + + return ret; +} + +static u8 * +ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw, + u8 **mc_addr_ptr, u32 *vmdq) +{ + u8 *mc_addr; + + *vmdq = 0; + mc_addr = *mc_addr_ptr; + *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr)); + return mc_addr; +} + +static int +ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct ixgbe_hw *hw; + u8 *mc_addr_list; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mc_addr_list = (u8 *)mc_addr_set; + return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, + ixgbe_dev_addr_list_itr, TRUE); +} + +static uint64_t +ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t systime_cycles; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ + systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) + * NSEC_PER_SEC; + break; + default: + systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) + << 32; + } + + return systime_cycles; +} + +static uint64_t +ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_tstamp_cycles; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* RXSTMPL stores ns and RXSTMPH stores seconds. */ + rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); + rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) + * NSEC_PER_SEC; + break; + default: + /* RXSTMPL stores ns and RXSTMPH stores seconds. */ + rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); + rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) + << 32; + } + + return rx_tstamp_cycles; +} + +static uint64_t +ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t tx_tstamp_cycles; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* TXSTMPL stores ns and TXSTMPH stores seconds. */ + tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); + tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) + * NSEC_PER_SEC; + break; + default: + /* TXSTMPL stores ns and TXSTMPH stores seconds. */ + tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); + tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) + << 32; + } + + return tx_tstamp_cycles; +} + +static void +ixgbe_start_timecounters(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + struct rte_eth_link link; + uint32_t incval = 0; + uint32_t shift = 0; + + /* Get current link speed. */ + memset(&link, 0, sizeof(link)); + ixgbe_dev_link_update(dev, 1); + rte_ixgbe_dev_atomic_read_link_status(dev, &link); + + switch (link.link_speed) { + case ETH_SPEED_NUM_100M: + incval = IXGBE_INCVAL_100; + shift = IXGBE_INCVAL_SHIFT_100; + break; + case ETH_SPEED_NUM_1G: + incval = IXGBE_INCVAL_1GB; + shift = IXGBE_INCVAL_SHIFT_1GB; + break; + case ETH_SPEED_NUM_10G: + default: + incval = IXGBE_INCVAL_10GB; + shift = IXGBE_INCVAL_SHIFT_10GB; + break; + } + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* Independent of link speed. */ + incval = 1; + /* Cycles read will be interpreted as ns. */ + shift = 0; + /* Fall-through */ + case ixgbe_mac_X540: + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); + break; + case ixgbe_mac_82599EB: + incval >>= IXGBE_INCVAL_SHIFT_82599; + shift -= IXGBE_INCVAL_SHIFT_82599; + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, + (1 << IXGBE_INCPER_SHIFT_82599) | incval); + break; + default: + /* Not supported. */ + return; + } + + memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + + adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; + adapter->systime_tc.cc_shift = shift; + adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; + + adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; + adapter->rx_tstamp_tc.cc_shift = shift; + adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; + + adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; + adapter->tx_tstamp_tc.cc_shift = shift; + adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; +} + +static int +ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) +{ + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + + adapter->systime_tc.nsec += delta; + adapter->rx_tstamp_tc.nsec += delta; + adapter->tx_tstamp_tc.nsec += delta; + + return 0; +} + +static int +ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) +{ + uint64_t ns; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + + ns = rte_timespec_to_ns(ts); + /* Set the timecounters to a new value. */ + adapter->systime_tc.nsec = ns; + adapter->rx_tstamp_tc.nsec = ns; + adapter->tx_tstamp_tc.nsec = ns; + + return 0; +} + +static int +ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +{ + uint64_t ns, systime_cycles; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + + systime_cycles = ixgbe_read_systime_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); + *ts = rte_ns_to_timespec(ns); + + return 0; +} + +static int +ixgbe_timesync_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl; + uint32_t tsauxc; + + /* Stop the timesync system time. */ + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); + /* Reset the timesync system time value. */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); + + /* Enable system time for platforms where it isn't on by default. */ + tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); + tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); + + ixgbe_start_timecounters(dev); + + /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), + (ETHER_TYPE_1588 | + IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_1588)); + + /* Enable timestamping of received PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); + + /* Enable timestamping of transmitted PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); + + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +static int +ixgbe_timesync_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl; + + /* Disable timestamping of transmitted PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); + + /* Disable timestamping of received PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); + + /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); + + /* Stop incrementating the System Time registers. */ + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); + + return 0; +} + +static int +ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags __rte_unused) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + uint32_t tsync_rxctl; + uint64_t rx_tstamp_cycles; + uint64_t ns; + + tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) + return -EINVAL; + + rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + uint32_t tsync_txctl; + uint64_t tx_tstamp_cycles; + uint64_t ns; + + tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) + return -EINVAL; + + tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +ixgbe_get_reg_length(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int count = 0; + int g_ind = 0; + const struct reg_info *reg_group; + const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? + ixgbe_regs_mac_82598EB : ixgbe_regs_others; + + while ((reg_group = reg_set[g_ind++])) + count += ixgbe_regs_group_count(reg_group); + + return count; +} + +static int +ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) +{ + int count = 0; + int g_ind = 0; + const struct reg_info *reg_group; + + while ((reg_group = ixgbevf_regs[g_ind++])) + count += ixgbe_regs_group_count(reg_group); + + return count; +} + +static int +ixgbe_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *data = regs->data; + int g_ind = 0; + int count = 0; + const struct reg_info *reg_group; + const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? + ixgbe_regs_mac_82598EB : ixgbe_regs_others; + + /* Support only full register dump */ + if ((regs->length == 0) || + (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { + regs->version = hw->mac.type << 24 | hw->revision_id << 16 | + hw->device_id; + while ((reg_group = reg_set[g_ind++])) + count += ixgbe_read_regs_group(dev, &data[count], + reg_group); + return 0; + } + + return -ENOTSUP; +} + +static int +ixgbevf_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *data = regs->data; + int g_ind = 0; + int count = 0; + const struct reg_info *reg_group; + + /* Support only full register dump */ + if ((regs->length == 0) || + (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { + regs->version = hw->mac.type << 24 | hw->revision_id << 16 | + hw->device_id; + while ((reg_group = ixgbevf_regs[g_ind++])) + count += ixgbe_read_regs_group(dev, &data[count], + reg_group); + return 0; + } + + return -ENOTSUP; +} + +static int +ixgbe_get_eeprom_length(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Return unit is byte count */ + return hw->eeprom.word_size * 2; +} + +static int +ixgbe_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + uint16_t *data = in_eeprom->data; + int first, length; + + first = in_eeprom->offset >> 1; + length = in_eeprom->length >> 1; + if ((first > hw->eeprom.word_size) || + ((first + length) > hw->eeprom.word_size)) + return -EINVAL; + + in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + return eeprom->ops.read_buffer(hw, first, length, data); +} + +static int +ixgbe_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + uint16_t *data = in_eeprom->data; + int first, length; + + first = in_eeprom->offset >> 1; + length = in_eeprom->length >> 1; + if ((first > hw->eeprom.word_size) || + ((first + length) > hw->eeprom.word_size)) + return -EINVAL; + + in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + return eeprom->ops.write_buffer(hw, first, length, data); +} + +uint16_t +ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { + switch (mac_type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + return ETH_RSS_RETA_SIZE_512; + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return ETH_RSS_RETA_SIZE_64; + default: + return ETH_RSS_RETA_SIZE_128; + } +} + +uint32_t +ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { + switch (mac_type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + if (reta_idx < ETH_RSS_RETA_SIZE_128) + return IXGBE_RETA(reta_idx >> 2); + else + return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return IXGBE_VFRETA(reta_idx >> 2); + default: + return IXGBE_RETA(reta_idx >> 2); + } +} + +uint32_t +ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { + switch (mac_type) { + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return IXGBE_VFMRQC; + default: + return IXGBE_MRQC; + } +} + +uint32_t +ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { + switch (mac_type) { + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return IXGBE_VFRSSRK(i); + default: + return IXGBE_RSSRK(i); + } +} + +bool +ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { + switch (mac_type) { + case ixgbe_mac_82599_vf: + case ixgbe_mac_X540_vf: + return 0; + default: + return 1; + } +} + +static int +ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, + struct rte_eth_dcb_info *dcb_info) +{ + struct ixgbe_dcb_config *dcb_config = + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + struct ixgbe_dcb_tc_config *tc; + uint8_t i, j; + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) + dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; + else + dcb_info->nb_tcs = 1; + + if (dcb_config->vt_mode) { /* vt is enabled*/ + struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = + &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; + for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { + for (j = 0; j < dcb_info->nb_tcs; j++) { + dcb_info->tc_queue.tc_rxq[i][j].base = + i * dcb_info->nb_tcs + j; + dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1; + dcb_info->tc_queue.tc_txq[i][j].base = + i * dcb_info->nb_tcs + j; + dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1; + } + } + } else { /* vt is disabled*/ + struct rte_eth_dcb_rx_conf *rx_conf = + &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; + if (dcb_info->nb_tcs == ETH_4_TCS) { + for (i = 0; i < dcb_info->nb_tcs; i++) { + dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; + dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; + } + dcb_info->tc_queue.tc_txq[0][0].base = 0; + dcb_info->tc_queue.tc_txq[0][1].base = 64; + dcb_info->tc_queue.tc_txq[0][2].base = 96; + dcb_info->tc_queue.tc_txq[0][3].base = 112; + dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; + dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; + } else if (dcb_info->nb_tcs == ETH_8_TCS) { + for (i = 0; i < dcb_info->nb_tcs; i++) { + dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; + dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; + } + dcb_info->tc_queue.tc_txq[0][0].base = 0; + dcb_info->tc_queue.tc_txq[0][1].base = 32; + dcb_info->tc_queue.tc_txq[0][2].base = 64; + dcb_info->tc_queue.tc_txq[0][3].base = 80; + dcb_info->tc_queue.tc_txq[0][4].base = 96; + dcb_info->tc_queue.tc_txq[0][5].base = 104; + dcb_info->tc_queue.tc_txq[0][6].base = 112; + dcb_info->tc_queue.tc_txq[0][7].base = 120; + dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; + } + } + for (i = 0; i < dcb_info->nb_tcs; i++) { + tc = &dcb_config->tc_config[i]; + dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; + } + return 0; +} + +/* Update e-tag ether type */ +static int +ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, + uint16_t ether_type) +{ + uint32_t etag_etype; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); + etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; + etag_etype |= ether_type; + IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/* Config l2 tunnel ether type */ +static int +ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (l2_tunnel == NULL) + return -EINVAL; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Enable e-tag tunnel */ +static int +ixgbe_e_tag_enable(struct ixgbe_hw *hw) +{ + uint32_t etag_etype; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); + etag_etype |= IXGBE_ETAG_ETYPE_VALID; + IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/* Enable l2 tunnel */ +static int +ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_enable(hw); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Disable e-tag tunnel */ +static int +ixgbe_e_tag_disable(struct ixgbe_hw *hw) +{ + uint32_t etag_etype; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); + etag_etype &= ~IXGBE_ETAG_ETYPE_VALID; + IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/* Disable l2 tunnel */ +static int +ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_disable(hw); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t i, rar_entries; + uint32_t rar_low, rar_high; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + rar_entries = ixgbe_get_num_rx_addrs(hw); + + for (i = 1; i < rar_entries; i++) { + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); + if ((rar_high & IXGBE_RAH_AV) && + (rar_high & IXGBE_RAH_ADTYPE) && + ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == + l2_tunnel->tunnel_id)) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + + ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); + + return ret; + } + } + + return ret; +} + +static int +ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t i, rar_entries; + uint32_t rar_low, rar_high; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + /* One entry for one tunnel. Try to remove potential existing entry. */ + ixgbe_e_tag_filter_del(dev, l2_tunnel); + + rar_entries = ixgbe_get_num_rx_addrs(hw); + + for (i = 1; i < rar_entries; i++) { + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); + if (rar_high & IXGBE_RAH_AV) { + continue; + } else { + ixgbe_set_vmdq(hw, i, l2_tunnel->pool); + rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; + rar_low = l2_tunnel->tunnel_id; + + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); + + return ret; + } + } + + PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." + " Please remove a rule before adding a new one."); + return -EINVAL; +} + +/* Add l2 tunnel filter */ +static int +ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Delete l2 tunnel filter */ +static int +ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/** + * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +static int +ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_dev_l2_tunnel_filter_add + (dev, + (struct rte_eth_l2_tunnel_conf *)arg); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_dev_l2_tunnel_filter_del + (dev, + (struct rte_eth_l2_tunnel_conf *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) +{ + int ret = 0; + uint32_t ctrl; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; + if (en) + ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); + + return ret; +} + +/* Enable l2 tunnel forwarding */ +static int +ixgbe_dev_l2_tunnel_forwarding_enable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + int ret = 0; + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_forwarding_en_dis(dev, 1); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Disable l2 tunnel forwarding */ +static int +ixgbe_dev_l2_tunnel_forwarding_disable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + int ret = 0; + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_forwarding_en_dis(dev, 0); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + bool en) +{ + int ret = 0; + uint32_t vmtir, vmvir; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (l2_tunnel->vf_id >= dev->pci_dev->max_vfs) { + PMD_DRV_LOG(ERR, + "VF id %u should be less than %u", + l2_tunnel->vf_id, + dev->pci_dev->max_vfs); + return -EINVAL; + } + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + if (en) + vmtir = l2_tunnel->tunnel_id; + else + vmtir = 0; + + IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir); + + vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id)); + vmvir &= ~IXGBE_VMVIR_TAGA_MASK; + if (en) + vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT; + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir); + + return ret; +} + +/* Enable l2 tunnel tag insertion */ +static int +ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Disable l2 tunnel tag insertion */ +static int +ixgbe_dev_l2_tunnel_insertion_disable + (struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev, + bool en) +{ + int ret = 0; + uint32_t qde; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + qde = IXGBE_READ_REG(hw, IXGBE_QDE); + if (en) + qde |= IXGBE_QDE_STRIP_TAG; + else + qde &= ~IXGBE_QDE_STRIP_TAG; + qde &= ~IXGBE_QDE_READ; + qde |= IXGBE_QDE_WRITE; + IXGBE_WRITE_REG(hw, IXGBE_QDE, qde); + + return ret; +} + +/* Enable l2 tunnel tag stripping */ +static int +ixgbe_dev_l2_tunnel_stripping_enable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + int ret = 0; + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_stripping_en_dis(dev, 1); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Disable l2 tunnel tag stripping */ +static int +ixgbe_dev_l2_tunnel_stripping_disable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + int ret = 0; + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_stripping_en_dis(dev, 0); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Enable/disable l2 tunnel offload functions */ +static int +ixgbe_dev_l2_tunnel_offload_set + (struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + uint32_t mask, + uint8_t en) +{ + int ret = 0; + + if (l2_tunnel == NULL) + return -EINVAL; + + ret = -EINVAL; + if (mask & ETH_L2_TUNNEL_ENABLE_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_enable( + dev, + l2_tunnel->l2_tunnel_type); + else + ret = ixgbe_dev_l2_tunnel_disable( + dev, + l2_tunnel->l2_tunnel_type); + } + + if (mask & ETH_L2_TUNNEL_INSERTION_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_insertion_enable( + dev, + l2_tunnel); + else + ret = ixgbe_dev_l2_tunnel_insertion_disable( + dev, + l2_tunnel); + } + + if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_stripping_enable( + dev, + l2_tunnel->l2_tunnel_type); + else + ret = ixgbe_dev_l2_tunnel_stripping_disable( + dev, + l2_tunnel->l2_tunnel_type); + } + + if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_forwarding_enable( + dev, + l2_tunnel->l2_tunnel_type); + else + ret = ixgbe_dev_l2_tunnel_forwarding_disable( + dev, + l2_tunnel->l2_tunnel_type); + } + + return ret; +} + +static int +ixgbe_update_vxlan_port(struct ixgbe_hw *hw, + uint16_t port) +{ + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/* There's only one register for VxLAN UDP port. + * So, we cannot add several ports. Will update it. + */ +static int +ixgbe_add_vxlan_port(struct ixgbe_hw *hw, + uint16_t port) +{ + if (port == 0) { + PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); + return -EINVAL; + } + + return ixgbe_update_vxlan_port(hw, port); +} + +/* We cannot delete the VxLAN port. For there's a register for VxLAN + * UDP port, it must have a value. + * So, will reset it to the original value 0. + */ +static int +ixgbe_del_vxlan_port(struct ixgbe_hw *hw, + uint16_t port) +{ + uint16_t cur_port; + + cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); + + if (cur_port != port) { + PMD_DRV_LOG(ERR, "Port %u does not exist.", port); + return -EINVAL; + } + + return ixgbe_update_vxlan_port(hw, 0); +} + +/* Add UDP tunneling port */ +static int +ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); + break; + + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); + ret = -EINVAL; + break; + + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Remove UDP tunneling port */ +static int +ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); + break; + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); + ret = -EINVAL; + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* ixgbevf_update_xcast_mode - Update Multicast mode + * @hw: pointer to the HW structure + * @netdev: pointer to net device structure + * @xcast_mode: new multicast mode + * + * Updates the Multicast Mode of VF. + */ +static int ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, + int xcast_mode) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + switch (hw->api_version) { + case ixgbe_mbox_api_12: + break; + default: + return -EOPNOTSUPP; + } + + msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; + msgbuf[1] = xcast_mode; + + err = mbx->ops.write_posted(hw, msgbuf, 2, 0); + if (err) + return err; + + err = mbx->ops.read_posted(hw, msgbuf, 2, 0); + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + return 0; +} + +static void +ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI); +} + +static void +ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE); +} + +static struct rte_driver rte_ixgbe_driver = { + .type = PMD_PDEV, + .init = rte_ixgbe_pmd_init, +}; + +static struct rte_driver rte_ixgbevf_driver = { + .type = PMD_PDEV, + .init = rte_ixgbevf_pmd_init, +}; + +PMD_REGISTER_DRIVER(rte_ixgbe_driver); +PMD_REGISTER_DRIVER(rte_ixgbevf_driver); diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h new file mode 100644 index 00000000..4ff6338e --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -0,0 +1,445 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _IXGBE_ETHDEV_H_ +#define _IXGBE_ETHDEV_H_ +#include "base/ixgbe_dcb.h" +#include "base/ixgbe_dcb_82599.h" +#include "base/ixgbe_dcb_82598.h" +#include "ixgbe_bypass.h" +#include <rte_time.h> + +/* need update link, bit flag */ +#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) +#define IXGBE_FLAG_MAILBOX (uint32_t)(1 << 1) +#define IXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2) + +/* + * Defines that were not part of ixgbe_type.h as they are not used by the + * FreeBSD driver. + */ +#define IXGBE_ADVTXD_MAC_1588 0x00080000 /* IEEE1588 Timestamp packet */ +#define IXGBE_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */ +#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE, resvd */ +#define IXGBE_RXDADV_ERR_CKSUM_BIT 30 +#define IXGBE_RXDADV_ERR_CKSUM_MSK 3 +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */ +#define IXGBE_NB_STAT_MAPPING_REGS 32 +#define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */ +#define IXGBE_VFTA_SIZE 128 +#define IXGBE_VLAN_TAG_SIZE 4 +#define IXGBE_MAX_RX_QUEUE_NUM 128 +#define IXGBE_MAX_INTR_QUEUE_NUM 15 +#define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM +#define IXGBE_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM +#define IXGBE_NONE_MODE_TX_NB_QUEUES 64 + +#ifndef NBBY +#define NBBY 8 /* number of bits in a byte */ +#endif +#define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY)) + +/* EITR Inteval is in 2048ns uinits for 1G and 10G link */ +#define IXGBE_EITR_INTERVAL_UNIT_NS 2048 +#define IXGBE_EITR_ITR_INT_SHIFT 3 +#define IXGBE_EITR_INTERVAL_US(us) \ + (((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \ + IXGBE_EITR_ITR_INT_MASK) + + +/* Loopback operation modes */ +/* 82599 specific loopback operation types */ +#define IXGBE_LPBK_82599_NONE 0x0 /* Default value. Loopback is disabled. */ +#define IXGBE_LPBK_82599_TX_RX 0x1 /* Tx->Rx loopback operation is enabled. */ + +#define IXGBE_MAX_JUMBO_FRAME_SIZE 0x2600 /* Maximum Jumbo frame size. */ + +#define IXGBE_RTTBCNRC_RF_INT_MASK_BASE 0x000003FF +#define IXGBE_RTTBCNRC_RF_INT_MASK_M \ + (IXGBE_RTTBCNRC_RF_INT_MASK_BASE << IXGBE_RTTBCNRC_RF_INT_SHIFT) + +#define IXGBE_MAX_QUEUE_NUM_PER_VF 8 + +#define IXGBE_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */ +#define IXGBE_SYN_FILTER_QUEUE 0x000000FE /* syn filter queue field */ +#define IXGBE_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field shift */ +#define IXGBE_SYN_FILTER_SYNQFP 0x80000000 /* syn filter SYNQFP */ + +#define IXGBE_ETQF_UP 0x00070000 /* ethertype filter priority field */ +#define IXGBE_ETQF_SHIFT 16 +#define IXGBE_ETQF_UP_EN 0x00080000 +#define IXGBE_ETQF_ETHERTYPE 0x0000FFFF /* ethertype filter ethertype field */ +#define IXGBE_ETQF_MAX_PRI 7 + +#define IXGBE_SDPQF_DSTPORT 0xFFFF0000 /* dst port field */ +#define IXGBE_SDPQF_DSTPORT_SHIFT 16 /* dst port field shift */ +#define IXGBE_SDPQF_SRCPORT 0x0000FFFF /* src port field */ + +#define IXGBE_L34T_IMIR_SIZE_BP 0x00001000 +#define IXGBE_L34T_IMIR_RESERVE 0x00080000 /* bit 13 to 19 must be set to 1000000b. */ +#define IXGBE_L34T_IMIR_LLI 0x00100000 +#define IXGBE_L34T_IMIR_QUEUE 0x0FE00000 +#define IXGBE_L34T_IMIR_QUEUE_SHIFT 21 +#define IXGBE_5TUPLE_MAX_PRI 7 +#define IXGBE_5TUPLE_MIN_PRI 1 + +#define IXGBE_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP | \ + ETH_RSS_IPV6_EX | \ + ETH_RSS_IPV6_TCP_EX | \ + ETH_RSS_IPV6_UDP_EX) + +#define IXGBE_VF_IRQ_ENABLE_MASK 3 /* vf irq enable mask */ +#define IXGBE_VF_MAXMSIVECTOR 1 + +#define IXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define IXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + +/* + * Information about the fdir mode. + */ + +struct ixgbe_hw_fdir_mask { + uint16_t vlan_tci_mask; + uint32_t src_ipv4_mask; + uint32_t dst_ipv4_mask; + uint16_t src_ipv6_mask; + uint16_t dst_ipv6_mask; + uint16_t src_port_mask; + uint16_t dst_port_mask; + uint16_t flex_bytes_mask; + uint8_t mac_addr_byte_mask; + uint32_t tunnel_id_mask; + uint8_t tunnel_type_mask; +}; + +struct ixgbe_hw_fdir_info { + struct ixgbe_hw_fdir_mask mask; + uint8_t flex_bytes_offset; + uint16_t collision; + uint16_t free; + uint16_t maxhash; + uint8_t maxlen; + uint64_t add; + uint64_t remove; + uint64_t f_add; + uint64_t f_remove; +}; + +/* structure for interrupt relative data */ +struct ixgbe_interrupt { + uint32_t flags; + uint32_t mask; +}; + +struct ixgbe_stat_mapping_registers { + uint32_t tqsm[IXGBE_NB_STAT_MAPPING_REGS]; + uint32_t rqsmr[IXGBE_NB_STAT_MAPPING_REGS]; +}; + +struct ixgbe_vfta { + uint32_t vfta[IXGBE_VFTA_SIZE]; +}; + +struct ixgbe_hwstrip { + uint32_t bitmap[IXGBE_HWSTRIP_BITMAP_SIZE]; +}; + +/* + * VF data which used by PF host only + */ +#define IXGBE_MAX_VF_MC_ENTRIES 30 +#define IXGBE_MAX_MR_RULE_ENTRIES 4 /* number of mirroring rules supported */ +#define IXGBE_MAX_UTA 128 + +struct ixgbe_uta_info { + uint8_t uc_filter_type; + uint16_t uta_in_use; + uint32_t uta_shadow[IXGBE_MAX_UTA]; +}; + +#define IXGBE_MAX_MIRROR_RULES 4 /* Maximum nb. of mirror rules. */ + +struct ixgbe_mirror_info { + struct rte_eth_mirror_conf mr_conf[IXGBE_MAX_MIRROR_RULES]; + /**< store PF mirror rules configuration*/ +}; + +struct ixgbe_vf_info { + uint8_t vf_mac_addresses[ETHER_ADDR_LEN]; + uint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; + uint16_t num_vf_mc_hashes; + uint16_t default_vf_vlan_id; + uint16_t vlans_enabled; + bool clear_to_send; + uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF]; + uint16_t vlan_count; + uint8_t spoofchk_enabled; + uint8_t api_version; +}; + +/* + * Possible l4type of 5tuple filters. + */ +enum ixgbe_5tuple_protocol { + IXGBE_FILTER_PROTOCOL_TCP = 0, + IXGBE_FILTER_PROTOCOL_UDP, + IXGBE_FILTER_PROTOCOL_SCTP, + IXGBE_FILTER_PROTOCOL_NONE, +}; + +TAILQ_HEAD(ixgbe_5tuple_filter_list, ixgbe_5tuple_filter); + +struct ixgbe_5tuple_filter_info { + uint32_t dst_ip; + uint32_t src_ip; + uint16_t dst_port; + uint16_t src_port; + enum ixgbe_5tuple_protocol proto; /* l4 protocol. */ + uint8_t priority; /* seven levels (001b-111b), 111b is highest, + used when more than one filter matches. */ + uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */ + src_ip_mask:1, /* if mask is 1b, do not compare src ip. */ + dst_port_mask:1, /* if mask is 1b, do not compare dst port. */ + src_port_mask:1, /* if mask is 1b, do not compare src port. */ + proto_mask:1; /* if mask is 1b, do not compare protocol. */ +}; + +/* 5tuple filter structure */ +struct ixgbe_5tuple_filter { + TAILQ_ENTRY(ixgbe_5tuple_filter) entries; + uint16_t index; /* the index of 5tuple filter */ + struct ixgbe_5tuple_filter_info filter_info; + uint16_t queue; /* rx queue assigned to */ +}; + +#define IXGBE_5TUPLE_ARRAY_SIZE \ + (RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \ + (sizeof(uint32_t) * NBBY)) + +/* + * Structure to store filters' info. + */ +struct ixgbe_filter_info { + uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */ + /* store used ethertype filters*/ + uint16_t ethertype_filters[IXGBE_MAX_ETQF_FILTERS]; + /* Bit mask for every used 5tuple filter */ + uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE]; + struct ixgbe_5tuple_filter_list fivetuple_list; +}; + +/* + * Structure to store private data for each driver instance (for each port). + */ +struct ixgbe_adapter { + struct ixgbe_hw hw; + struct ixgbe_hw_stats stats; + struct ixgbe_hw_fdir_info fdir; + struct ixgbe_interrupt intr; + struct ixgbe_stat_mapping_registers stat_mappings; + struct ixgbe_vfta shadow_vfta; + struct ixgbe_hwstrip hwstrip; + struct ixgbe_dcb_config dcb_config; + struct ixgbe_mirror_info mr_data; + struct ixgbe_vf_info *vfdata; + struct ixgbe_uta_info uta_info; +#ifdef RTE_NIC_BYPASS + struct ixgbe_bypass_info bps; +#endif /* RTE_NIC_BYPASS */ + struct ixgbe_filter_info filter; + + bool rx_bulk_alloc_allowed; + bool rx_vec_allowed; + struct rte_timecounter systime_tc; + struct rte_timecounter rx_tstamp_tc; + struct rte_timecounter tx_tstamp_tc; +}; + +#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\ + (&((struct ixgbe_adapter *)adapter)->hw) + +#define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \ + (&((struct ixgbe_adapter *)adapter)->stats) + +#define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \ + (&((struct ixgbe_adapter *)adapter)->intr) + +#define IXGBE_DEV_PRIVATE_TO_FDIR_INFO(adapter) \ + (&((struct ixgbe_adapter *)adapter)->fdir) + +#define IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(adapter) \ + (&((struct ixgbe_adapter *)adapter)->stat_mappings) + +#define IXGBE_DEV_PRIVATE_TO_VFTA(adapter) \ + (&((struct ixgbe_adapter *)adapter)->shadow_vfta) + +#define IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(adapter) \ + (&((struct ixgbe_adapter *)adapter)->hwstrip) + +#define IXGBE_DEV_PRIVATE_TO_DCB_CFG(adapter) \ + (&((struct ixgbe_adapter *)adapter)->dcb_config) + +#define IXGBE_DEV_PRIVATE_TO_P_VFDATA(adapter) \ + (&((struct ixgbe_adapter *)adapter)->vfdata) + +#define IXGBE_DEV_PRIVATE_TO_PFDATA(adapter) \ + (&((struct ixgbe_adapter *)adapter)->mr_data) + +#define IXGBE_DEV_PRIVATE_TO_UTA(adapter) \ + (&((struct ixgbe_adapter *)adapter)->uta_info) + +#define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \ + (&((struct ixgbe_adapter *)adapter)->filter) + +/* + * RX/TX function prototypes + */ +void ixgbe_dev_clear_queues(struct rte_eth_dev *dev); + +void ixgbe_dev_free_queues(struct rte_eth_dev *dev); + +void ixgbe_dev_rx_queue_release(void *rxq); + +void ixgbe_dev_tx_queue_release(void *txq); + +int ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +int ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, + uint16_t rx_queue_id); + +int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); +int ixgbevf_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); + +int ixgbe_dev_rx_init(struct rte_eth_dev *dev); + +void ixgbe_dev_tx_init(struct rte_eth_dev *dev); + +int ixgbe_dev_rxtx_start(struct rte_eth_dev *dev); + +int ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); + +void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + +int ixgbevf_dev_rx_init(struct rte_eth_dev *dev); + +void ixgbevf_dev_tx_init(struct rte_eth_dev *dev); + +void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev); + +uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + +uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); + +int ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); + +uint16_t ixgbe_reta_size_get(enum ixgbe_mac_type mac_type); + +uint32_t ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx); + +uint32_t ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type); + +uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i); + +bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type); + +/* + * Flow director function prototypes + */ +int ixgbe_fdir_configure(struct rte_eth_dev *dev); + +void ixgbe_configure_dcb(struct rte_eth_dev *dev); + +/* + * misc function prototypes + */ +void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev); + +void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev); + +void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev); + +void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev); + +void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev); + +void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev); + +void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev); + +int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev); + +uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val); + +int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, void *arg); +#endif /* _IXGBE_ETHDEV_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c new file mode 100644 index 00000000..2e4c353a --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_fdir.c @@ -0,0 +1,1376 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdio.h> +#include <stdint.h> +#include <stdarg.h> +#include <errno.h> +#include <sys/queue.h> + +#include <rte_interrupts.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> + +#include "ixgbe_logs.h" +#include "base/ixgbe_api.h" +#include "base/ixgbe_common.h" +#include "ixgbe_ethdev.h" + +/* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */ +#define FDIRCTRL_PBALLOC_MASK 0x03 + +/* For calculating memory required for FDIR filters */ +#define PBALLOC_SIZE_SHIFT 15 + +/* Number of bits used to mask bucket hash for different pballoc sizes */ +#define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */ +#define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */ +#define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */ +#define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */ +#define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */ +#define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */ +#define IXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /* default flexbytes offset in bytes */ +#define IXGBE_FDIR_MAX_FLEX_LEN 2 /* len in bytes of flexbytes */ +#define IXGBE_MAX_FLX_SOURCE_OFF 62 +#define IXGBE_FDIRCTRL_FLEX_MASK (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT) +#define IXGBE_FDIRCMD_CMD_INTERVAL_US 10 + +#define IXGBE_FDIR_FLOW_TYPES ( \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)) + +#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \ + uint8_t ipv6_addr[16]; \ + uint8_t i; \ + rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\ + (ipv6m) = 0; \ + for (i = 0; i < sizeof(ipv6_addr); i++) { \ + if (ipv6_addr[i] == UINT8_MAX) \ + (ipv6m) |= 1 << i; \ + else if (ipv6_addr[i] != 0) { \ + PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \ + return -EINVAL; \ + } \ + } \ +} while (0) + +#define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \ + uint8_t ipv6_addr[16]; \ + uint8_t i; \ + for (i = 0; i < sizeof(ipv6_addr); i++) { \ + if ((ipv6m) & (1 << i)) \ + ipv6_addr[i] = UINT8_MAX; \ + else \ + ipv6_addr[i] = 0; \ + } \ + rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\ +} while (0) + +#define DEFAULT_VXLAN_PORT 4789 +#define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4 + +static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash); +static int fdir_set_input_mask(struct rte_eth_dev *dev, + const struct rte_eth_fdir_masks *input_mask); +static int fdir_set_input_mask_82599(struct rte_eth_dev *dev, + const struct rte_eth_fdir_masks *input_mask); +static int fdir_set_input_mask_x550(struct rte_eth_dev *dev, + const struct rte_eth_fdir_masks *input_mask); +static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, + const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl); +static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl); +static int ixgbe_fdir_filter_to_atr_input( + const struct rte_eth_fdir_filter *fdir_filter, + union ixgbe_atr_input *input, + enum rte_fdir_mode mode); +static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, + uint32_t key); +static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input, + enum rte_fdir_pballoc_type pballoc); +static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + enum rte_fdir_pballoc_type pballoc); +static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, uint8_t queue, + uint32_t fdircmd, uint32_t fdirhash, + enum rte_fdir_mode mode); +static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd, + uint32_t fdirhash); +static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *fdir_filter, + bool del, + bool update); +static int ixgbe_fdir_flush(struct rte_eth_dev *dev); +static void ixgbe_fdir_info_get(struct rte_eth_dev *dev, + struct rte_eth_fdir_info *fdir_info); +static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev, + struct rte_eth_fdir_stats *fdir_stats); + +/** + * This function is based on ixgbe_fdir_enable_82599() in base/ixgbe_82599.c. + * It adds extra configuration of fdirctrl that is common for all filter types. + * + * Initialize Flow Director control registers + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +static int +fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl) +{ + int i; + + PMD_INIT_FUNC_TRACE(); + + /* Prime the keys for hashing */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); + + /* + * Continue setup of fdirctrl register bits: + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + /* + * Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + msec_delay(1); + } + + if (i >= IXGBE_FDIR_INIT_DONE_POLL) { + PMD_INIT_LOG(ERR, "Flow Director poll time exceeded " + "during enabling!"); + return -ETIMEDOUT; + } + return 0; +} + +/* + * Set appropriate bits in fdirctrl for: variable reporting levels, moving + * flexbytes matching field, and drop queue (only for perfect matching mode). + */ +static inline int +configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl) +{ + *fdirctrl = 0; + + switch (conf->pballoc) { + case RTE_FDIR_PBALLOC_64K: + /* 8k - 1 signature filters */ + *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; + break; + case RTE_FDIR_PBALLOC_128K: + /* 16k - 1 signature filters */ + *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; + break; + case RTE_FDIR_PBALLOC_256K: + /* 32k - 1 signature filters */ + *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; + break; + default: + /* bad value */ + PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value"); + return -EINVAL; + }; + + /* status flags: write hash & swindex in the rx descriptor */ + switch (conf->status) { + case RTE_FDIR_NO_REPORT_STATUS: + /* do nothing, default mode */ + break; + case RTE_FDIR_REPORT_STATUS: + /* report status when the packet matches a fdir rule */ + *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; + break; + case RTE_FDIR_REPORT_STATUS_ALWAYS: + /* always report status */ + *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS; + break; + default: + /* bad value */ + PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value"); + return -EINVAL; + }; + + *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) << + IXGBE_FDIRCTRL_FLEX_SHIFT; + + if (conf->mode >= RTE_FDIR_MODE_PERFECT && + conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) { + *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; + *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); + if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) + *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN + << IXGBE_FDIRCTRL_FILTERMODE_SHIFT); + else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL) + *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD + << IXGBE_FDIRCTRL_FILTERMODE_SHIFT); + } + + return 0; +} + +/** + * Reverse the bits in FDIR registers that store 2 x 16 bit masks. + * + * @hi_dword: Bits 31:16 mask to be bit swapped. + * @lo_dword: Bits 15:0 mask to be bit swapped. + * + * Flow director uses several registers to store 2 x 16 bit masks with the + * bits reversed such as FDIRTCPM, FDIRUDPM. The LS bit of the + * mask affects the MS bit/byte of the target. This function reverses the + * bits in these masks. + * **/ +static inline uint32_t +reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword) +{ + uint32_t mask = hi_dword << 16; + mask |= lo_dword; + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); + return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); +} + +/* + * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c, + * but makes use of the rte_fdir_masks structure to see which bits to set. + */ +static int +fdir_set_input_mask_82599(struct rte_eth_dev *dev, + const struct rte_eth_fdir_masks *input_mask) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + /* + * mask VM pool and DIPv6 since there are currently not supported + * mask FLEX byte, it will be set in flex_conf + */ + uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX; + uint32_t fdirtcpm; /* TCP source and destination port masks. */ + uint32_t fdiripv6m; /* IPv6 source and destination masks. */ + uint16_t dst_ipv6m = 0; + uint16_t src_ipv6m = 0; + volatile uint32_t *reg; + + PMD_INIT_FUNC_TRACE(); + + /* + * Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + */ + if (input_mask->dst_port_mask == 0 && input_mask->src_port_mask == 0) + /* use the L4 protocol mask for raw IPv4/IPv6 traffic */ + fdirm |= IXGBE_FDIRM_L4P; + + if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF)) + /* mask VLAN Priority */ + fdirm |= IXGBE_FDIRM_VLANP; + else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000)) + /* mask VLAN ID */ + fdirm |= IXGBE_FDIRM_VLANID; + else if (input_mask->vlan_tci_mask == 0) + /* mask VLAN ID and Priority */ + fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP; + else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) { + PMD_INIT_LOG(ERR, "invalid vlan_tci_mask"); + return -EINVAL; + } + info->mask.vlan_tci_mask = input_mask->vlan_tci_mask; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + + /* store the TCP/UDP port masks, bit reversed from port layout */ + fdirtcpm = reverse_fdir_bitmasks( + rte_be_to_cpu_16(input_mask->dst_port_mask), + rte_be_to_cpu_16(input_mask->src_port_mask)); + + /* write all the same so that UDP, TCP and SCTP use the same mask + * (little-endian) + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); + info->mask.src_port_mask = input_mask->src_port_mask; + info->mask.dst_port_mask = input_mask->dst_port_mask; + + /* Store source and destination IPv4 masks (big-endian), + * can not use IXGBE_WRITE_REG. + */ + reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M); + *reg = ~(input_mask->ipv4_mask.src_ip); + reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M); + *reg = ~(input_mask->ipv4_mask.dst_ip); + info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip; + info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip; + + if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) { + /* + * Store source and destination IPv6 masks (bit reversed) + */ + IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m); + IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m); + fdiripv6m = (dst_ipv6m << 16) | src_ipv6m; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m); + info->mask.src_ipv6_mask = src_ipv6m; + info->mask.dst_ipv6_mask = dst_ipv6m; + } + + return IXGBE_SUCCESS; +} + +/* + * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c, + * but makes use of the rte_fdir_masks structure to see which bits to set. + */ +static int +fdir_set_input_mask_x550(struct rte_eth_dev *dev, + const struct rte_eth_fdir_masks *input_mask) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + /* mask VM pool and DIPv6 since there are currently not supported + * mask FLEX byte, it will be set in flex_conf + */ + uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | + IXGBE_FDIRM_FLEX; + uint32_t fdiripv6m; + enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + uint16_t mac_mask; + + PMD_INIT_FUNC_TRACE(); + + /* set the default UDP port for VxLAN */ + if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT); + + /* some bits must be set for mac vlan or tunnel mode */ + fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P; + + if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF)) + /* mask VLAN Priority */ + fdirm |= IXGBE_FDIRM_VLANP; + else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000)) + /* mask VLAN ID */ + fdirm |= IXGBE_FDIRM_VLANID; + else if (input_mask->vlan_tci_mask == 0) + /* mask VLAN ID and Priority */ + fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP; + else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) { + PMD_INIT_LOG(ERR, "invalid vlan_tci_mask"); + return -EINVAL; + } + info->mask.vlan_tci_mask = input_mask->vlan_tci_mask; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + + fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); + fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; + if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) + fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE | + IXGBE_FDIRIP6M_TNI_VNI; + + mac_mask = input_mask->mac_addr_byte_mask; + fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) + & IXGBE_FDIRIP6M_INNER_MAC; + info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask; + + if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) { + switch (input_mask->tunnel_type_mask) { + case 0: + /* Mask turnnel type */ + fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; + break; + case 1: + break; + default: + PMD_INIT_LOG(ERR, "invalid tunnel_type_mask"); + return -EINVAL; + } + info->mask.tunnel_type_mask = + input_mask->tunnel_type_mask; + + switch (rte_be_to_cpu_32(input_mask->tunnel_id_mask)) { + case 0x0: + /* Mask vxlan id */ + fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI; + break; + case 0x00FFFFFF: + fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24; + break; + case 0xFFFFFFFF: + break; + default: + PMD_INIT_LOG(ERR, "invalid tunnel_id_mask"); + return -EINVAL; + } + info->mask.tunnel_id_mask = + input_mask->tunnel_id_mask; + } + + IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m); + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); + + return IXGBE_SUCCESS; +} + +static int +fdir_set_input_mask(struct rte_eth_dev *dev, + const struct rte_eth_fdir_masks *input_mask) +{ + enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + + if (mode >= RTE_FDIR_MODE_SIGNATURE && + mode <= RTE_FDIR_MODE_PERFECT) + return fdir_set_input_mask_82599(dev, input_mask); + else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN && + mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + return fdir_set_input_mask_x550(dev, input_mask); + + PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode); + return -ENOTSUP; +} + +/* + * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration + * arguments are valid + */ +static int +ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, + const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + const struct rte_eth_flex_payload_cfg *flex_cfg; + const struct rte_eth_fdir_flex_mask *flex_mask; + uint32_t fdirm; + uint16_t flexbytes = 0; + uint16_t i; + + fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM); + + if (conf == NULL) { + PMD_DRV_LOG(ERR, "NULL pointer."); + return -EINVAL; + } + + for (i = 0; i < conf->nb_payloads; i++) { + flex_cfg = &conf->flex_set[i]; + if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) { + PMD_DRV_LOG(ERR, "unsupported payload type."); + return -EINVAL; + } + if (((flex_cfg->src_offset[0] & 0x1) == 0) && + (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) && + (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) { + *fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK; + *fdirctrl |= + (flex_cfg->src_offset[0] / sizeof(uint16_t)) << + IXGBE_FDIRCTRL_FLEX_SHIFT; + } else { + PMD_DRV_LOG(ERR, "invalid flexbytes arguments."); + return -EINVAL; + } + } + + for (i = 0; i < conf->nb_flexmasks; i++) { + flex_mask = &conf->flex_mask[i]; + if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) { + PMD_DRV_LOG(ERR, "flexmask should be set globally."); + return -EINVAL; + } + flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) | + ((flex_mask->mask[1]) & 0xFF)); + if (flexbytes == UINT16_MAX) + fdirm &= ~IXGBE_FDIRM_FLEX; + else if (flexbytes != 0) { + /* IXGBE_FDIRM_FLEX is set by default when set mask */ + PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments."); + return -EINVAL; + } + } + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0; + info->flex_bytes_offset = (uint8_t)((*fdirctrl & + IXGBE_FDIRCTRL_FLEX_MASK) >> + IXGBE_FDIRCTRL_FLEX_SHIFT); + return 0; +} + +int +ixgbe_fdir_configure(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int err; + uint32_t fdirctrl, pbsize; + int i; + enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X540 && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) + return -ENOSYS; + + /* x550 supports mac-vlan and tunnel mode but other NICs not */ + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a && + mode != RTE_FDIR_MODE_SIGNATURE && + mode != RTE_FDIR_MODE_PERFECT) + return -ENOSYS; + + err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl); + if (err) + return err; + + /* + * Before enabling Flow Director, the Rx Packet Buffer size + * must be reduced. The new value is the current size minus + * flow director memory usage size. + */ + pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK))); + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), + (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); + + /* + * The defaults in the HW for RX PB 1-7 are not zero and so should be + * intialized to zero for non DCB mode otherwise actual total RX PB + * would be bigger than programmed and filter space would run into + * the PB 0 region. + */ + for (i = 1; i < 8; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + + err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask); + if (err < 0) { + PMD_INIT_LOG(ERR, " Error on setting FD mask"); + return err; + } + err = ixgbe_set_fdir_flex_conf(dev, + &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl); + if (err < 0) { + PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments."); + return err; + } + + err = fdir_enable_82599(hw, fdirctrl); + if (err < 0) { + PMD_INIT_LOG(ERR, " Error on enabling FD."); + return err; + } + return 0; +} + +/* + * Convert DPDK rte_eth_fdir_filter struct to ixgbe_atr_input union that is used + * by the IXGBE driver code. + */ +static int +ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter, + union ixgbe_atr_input *input, enum rte_fdir_mode mode) +{ + input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci; + input->formatted.flex_bytes = (uint16_t)( + (fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) | + (fdir_filter->input.flow_ext.flexbytes[0] & 0xFF)); + + switch (fdir_filter->input.flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV6; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6; + break; + default: + break; + } + + switch (fdir_filter->input.flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + input->formatted.src_port = + fdir_filter->input.flow.udp4_flow.src_port; + input->formatted.dst_port = + fdir_filter->input.flow.udp4_flow.dst_port; + /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/ + case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + input->formatted.src_ip[0] = + fdir_filter->input.flow.ip4_flow.src_ip; + input->formatted.dst_ip[0] = + fdir_filter->input.flow.ip4_flow.dst_ip; + break; + + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + input->formatted.src_port = + fdir_filter->input.flow.udp6_flow.src_port; + input->formatted.dst_port = + fdir_filter->input.flow.udp6_flow.dst_port; + /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/ + case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + rte_memcpy(input->formatted.src_ip, + fdir_filter->input.flow.ipv6_flow.src_ip, + sizeof(input->formatted.src_ip)); + rte_memcpy(input->formatted.dst_ip, + fdir_filter->input.flow.ipv6_flow.dst_ip, + sizeof(input->formatted.dst_ip)); + break; + default: + break; + } + + if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + rte_memcpy( + input->formatted.inner_mac, + fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes, + sizeof(input->formatted.inner_mac)); + } else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) { + rte_memcpy( + input->formatted.inner_mac, + fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes, + sizeof(input->formatted.inner_mac)); + input->formatted.tunnel_type = + fdir_filter->input.flow.tunnel_flow.tunnel_type; + input->formatted.tni_vni = + fdir_filter->input.flow.tunnel_flow.tunnel_id; + } + + return 0; +} + +/* + * The below function is taken from the FreeBSD IXGBE drivers release + * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK + * before returning, as the signature hash can use 16bits. + * + * The newer driver has optimised functions for calculating bucket and + * signature hashes. However they don't support IPv6 type packets for signature + * filters so are not used here. + * + * Note that the bkt_hash field in the ixgbe_atr_input structure is also never + * set. + * + * Compute the hashes for SW ATR + * @stream: input bitstream to compute the hash on + * @key: 32-bit hash key + **/ +static uint32_t +ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, + uint32_t key) +{ + /* + * The algorithm is as follows: + * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 + * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] + * and A[n] x B[n] is bitwise AND between same length strings + * + * K[n] is 16 bits, defined as: + * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] + * for n modulo 32 < 15, K[n] = + * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] + * + * S[n] is 16 bits, defined as: + * for n >= 15, S[n] = S[n:n - 15] + * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] + * + * To simplify for programming, the algorithm is implemented + * in software this way: + * + * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0] + * + * for (i = 0; i < 352; i+=32) + * hi_hash_dword[31:0] ^= Stream[(i+31):i]; + * + * lo_hash_dword[15:0] ^= Stream[15:0]; + * lo_hash_dword[15:0] ^= hi_hash_dword[31:16]; + * lo_hash_dword[31:16] ^= hi_hash_dword[15:0]; + * + * hi_hash_dword[31:0] ^= Stream[351:320]; + * + * if (key[0]) + * hash[15:0] ^= Stream[15:0]; + * + * for (i = 0; i < 16; i++) { + * if (key[i]) + * hash[15:0] ^= lo_hash_dword[(i+15):i]; + * if (key[i + 16]) + * hash[15:0] ^= hi_hash_dword[(i+15):i]; + * } + * + */ + __be32 common_hash_dword = 0; + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 hash_result = 0; + u8 i; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]); + + /* generate common hash dword */ + for (i = 1; i <= 13; i++) + common_hash_dword ^= atr_input->dword_stream[i]; + + hi_hash_dword = IXGBE_NTOHL(common_hash_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + if (key & 0x0001) hash_result ^= lo_hash_dword; + if (key & 0x00010000) hash_result ^= hi_hash_dword; + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + + /* process the remaining 30 bits in the key 2 bits at a time */ + for (i = 15; i; i-- ) { + if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i; + if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i; + } + + return hash_result; +} + +static uint32_t +atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + enum rte_fdir_pballoc_type pballoc) +{ + if (pballoc == RTE_FDIR_PBALLOC_256K) + return ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + PERFECT_BUCKET_256KB_HASH_MASK; + else if (pballoc == RTE_FDIR_PBALLOC_128K) + return ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + PERFECT_BUCKET_128KB_HASH_MASK; + else + return ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + PERFECT_BUCKET_64KB_HASH_MASK; +} + +/** + * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete + * @hw: pointer to hardware structure + */ +static inline int +ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd) +{ + int i; + + for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { + *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); + if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) + return 0; + rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US); + } + + return -ETIMEDOUT; +} + +/* + * Calculate the hash value needed for signature-match filters. In the FreeBSD + * driver, this is done by the optimised function + * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it + * doesn't support calculating a hash for an IPv6 filter. + */ +static uint32_t +atr_compute_sig_hash_82599(union ixgbe_atr_input *input, + enum rte_fdir_pballoc_type pballoc) +{ + uint32_t bucket_hash, sig_hash; + + if (pballoc == RTE_FDIR_PBALLOC_256K) + bucket_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + SIG_BUCKET_256KB_HASH_MASK; + else if (pballoc == RTE_FDIR_PBALLOC_128K) + bucket_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + SIG_BUCKET_128KB_HASH_MASK; + else + bucket_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + SIG_BUCKET_64KB_HASH_MASK; + + sig_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_SIGNATURE_HASH_KEY); + + return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash; +} + +/* + * This is based on ixgbe_fdir_write_perfect_filter_82599() in + * base/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register + * added, and IPv6 support also added. The hash value is also pre-calculated + * as the pballoc value is needed to do it. + */ +static int +fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, uint8_t queue, + uint32_t fdircmd, uint32_t fdirhash, + enum rte_fdir_mode mode) +{ + uint32_t fdirport, fdirvlan; + u32 addr_low, addr_high; + u32 tunnel_type = 0; + int err = 0; + volatile uint32_t *reg; + + if (mode == RTE_FDIR_MODE_PERFECT) { + /* record the IPv4 address (big-endian) + * can not use IXGBE_WRITE_REG. + */ + reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPSA); + *reg = input->formatted.src_ip[0]; + reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPDA); + *reg = input->formatted.dst_ip[0]; + + /* record source and destination port (little-endian)*/ + fdirport = IXGBE_NTOHS(input->formatted.dst_port); + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; + fdirport |= IXGBE_NTOHS(input->formatted.src_port); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN && + mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) { + /* for mac vlan and tunnel modes */ + addr_low = ((u32)input->formatted.inner_mac[0] | + ((u32)input->formatted.inner_mac[1] << 8) | + ((u32)input->formatted.inner_mac[2] << 16) | + ((u32)input->formatted.inner_mac[3] << 24)); + addr_high = ((u32)input->formatted.inner_mac[4] | + ((u32)input->formatted.inner_mac[5] << 8)); + + if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0); + } else { + /* tunnel mode */ + if (input->formatted.tunnel_type != + RTE_FDIR_TUNNEL_TYPE_NVGRE) + tunnel_type = 0x80000000; + tunnel_type |= addr_high; + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), + input->formatted.tni_vni); + } + } + + /* record vlan (little-endian) and flex_bytes(big-endian) */ + fdirvlan = input->formatted.flex_bytes; + fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; + fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + + /* configure FDIRHASH register */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* + * flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + IXGBE_WRITE_FLUSH(hw); + + /* configure FDIRCMD register */ + fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + + PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash); + + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err < 0) + PMD_DRV_LOG(ERR, "Timeout writing flow director filter."); + + return err; +} + +/** + * This function is based on ixgbe_atr_add_signature_filter_82599() in + * base/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports + * setting extra fields in the FDIRCMD register, and removes the code that was + * verifying the flow_type field. According to the documentation, a flow type of + * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to + * work ok... + * + * Adds a signature hash filter + * @hw: pointer to hardware structure + * @input: unique input dword + * @queue: queue index to direct traffic to + * @fdircmd: any extra flags to set in fdircmd register + * @fdirhash: pre-calculated hash value for the filter + **/ +static int +fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd, + uint32_t fdirhash) +{ + int err = 0; + + PMD_INIT_FUNC_TRACE(); + + /* configure FDIRCMD register */ + fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + + PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash); + + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err < 0) + PMD_DRV_LOG(ERR, "Timeout writing flow director filter."); + + return err; +} + +/* + * This is based on ixgbe_fdir_erase_perfect_filter_82599() in + * base/ixgbe_82599.c. It is modified to take in the hash as a parameter so + * that it can be used for removing signature and perfect filters. + */ +static int +fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash) +{ + uint32_t fdircmd = 0; + int err = 0; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* flush hash to HW */ + IXGBE_WRITE_FLUSH(hw); + + /* Query if filter is present */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); + + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err < 0) { + PMD_INIT_LOG(ERR, "Timeout querying for flow director filter."); + return err; + } + + /* if filter exists in hardware then remove it */ + if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + IXGBE_FDIRCMD_CMD_REMOVE_FLOW); + } + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err < 0) + PMD_INIT_LOG(ERR, "Timeout erasing flow director filter."); + return err; + +} + +/* + * ixgbe_add_del_fdir_filter - add or remove a flow diretor filter. + * @dev: pointer to the structure rte_eth_dev + * @fdir_filter: fdir filter entry + * @del: 1 - delete, 0 - add + * @update: 1 - update + */ +static int +ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev, + const struct rte_eth_fdir_filter *fdir_filter, + bool del, + bool update) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fdircmd_flags; + uint32_t fdirhash; + union ixgbe_atr_input input; + uint8_t queue; + bool is_perfect = FALSE; + int err; + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; + + if (fdir_mode == RTE_FDIR_MODE_NONE) + return -ENOTSUP; + + /* + * Sanity check for x550. + * When adding a new filter with flow type set to IPv4-other, + * the flow director mask should be configed before, + * and the L4 protocol and ports are masked. + */ + if ((!del) && + (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) && + (fdir_filter->input.flow_type == + RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) && + (info->mask.src_port_mask != 0 || + info->mask.dst_port_mask != 0)) { + PMD_DRV_LOG(ERR, "By this device," + " IPv4-other is not supported without" + " L4 protocol and ports masked!"); + return -ENOTSUP; + } + + if (fdir_mode >= RTE_FDIR_MODE_PERFECT && + fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + is_perfect = TRUE; + + memset(&input, 0, sizeof(input)); + + err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input, + fdir_mode); + if (err) + return err; + + if (is_perfect) { + if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) { + PMD_DRV_LOG(ERR, "IPv6 is not supported in" + " perfect mode!"); + return -ENOTSUP; + } + fdirhash = atr_compute_perfect_hash_82599(&input, + dev->data->dev_conf.fdir_conf.pballoc); + fdirhash |= fdir_filter->soft_id << + IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + } else + fdirhash = atr_compute_sig_hash_82599(&input, + dev->data->dev_conf.fdir_conf.pballoc); + + if (del) { + err = fdir_erase_filter_82599(hw, fdirhash); + if (err < 0) + PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!"); + else + PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!"); + return err; + } + /* add or update an fdir filter*/ + fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0; + if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) { + if (is_perfect) { + queue = dev->data->dev_conf.fdir_conf.drop_queue; + fdircmd_flags |= IXGBE_FDIRCMD_DROP; + } else { + PMD_DRV_LOG(ERR, "Drop option is not supported in" + " signature mode."); + return -EINVAL; + } + } else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT && + fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM) + queue = (uint8_t)fdir_filter->action.rx_queue; + else + return -EINVAL; + + if (is_perfect) { + err = fdir_write_perfect_filter_82599(hw, &input, queue, + fdircmd_flags, fdirhash, + fdir_mode); + } else { + err = fdir_add_signature_filter_82599(hw, &input, queue, + fdircmd_flags, fdirhash); + } + if (err < 0) + PMD_DRV_LOG(ERR, "Fail to add FDIR filter!"); + else + PMD_DRV_LOG(DEBUG, "Success to add FDIR filter"); + + return err; +} + +static int +ixgbe_fdir_flush(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + int ret; + + ret = ixgbe_reinit_fdir_tables_82599(hw); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to re-initialize FD table."); + return ret; + } + + info->f_add = 0; + info->f_remove = 0; + info->add = 0; + info->remove = 0; + + return ret; +} + +#define FDIRENTRIES_NUM_SHIFT 10 +static void +ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + uint32_t fdirctrl, max_num; + uint8_t offset; + + fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >> + IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t); + + fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; + max_num = (1 << (FDIRENTRIES_NUM_SHIFT + + (fdirctrl & FDIRCTRL_PBALLOC_MASK))); + if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT && + fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + fdir_info->guarant_spc = max_num; + else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE) + fdir_info->guarant_spc = max_num * 4; + + fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask; + fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask; + fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask; + IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask, + fdir_info->mask.ipv6_mask.src_ip); + IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask, + fdir_info->mask.ipv6_mask.dst_ip); + fdir_info->mask.src_port_mask = info->mask.src_port_mask; + fdir_info->mask.dst_port_mask = info->mask.dst_port_mask; + fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask; + fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask; + fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask; + fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN; + + if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN || + fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL) + fdir_info->flow_types_mask[0] = 0; + else + fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES; + + fdir_info->flex_payload_unit = sizeof(uint16_t); + fdir_info->max_flex_payload_segment_num = 1; + fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF; + fdir_info->flex_conf.nb_payloads = 1; + fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD; + fdir_info->flex_conf.flex_set[0].src_offset[0] = offset; + fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1; + fdir_info->flex_conf.nb_flexmasks = 1; + fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN; + fdir_info->flex_conf.flex_mask[0].mask[0] = + (uint8_t)(info->mask.flex_bytes_mask & 0x00FF); + fdir_info->flex_conf.flex_mask[0].mask[1] = + (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8); +} + +static void +ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + uint32_t reg, max_num; + enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; + + /* Get the information from registers */ + reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE); + info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >> + IXGBE_FDIRFREE_COLL_SHIFT); + info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >> + IXGBE_FDIRFREE_FREE_SHIFT); + + reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN); + info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >> + IXGBE_FDIRLEN_MAXHASH_SHIFT); + info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >> + IXGBE_FDIRLEN_MAXLEN_SHIFT); + + reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); + info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >> + IXGBE_FDIRUSTAT_REMOVE_SHIFT; + info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >> + IXGBE_FDIRUSTAT_ADD_SHIFT; + + reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF; + info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >> + IXGBE_FDIRFSTAT_FREMOVE_SHIFT; + info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >> + IXGBE_FDIRFSTAT_FADD_SHIFT; + + /* Copy the new information in the fdir parameter */ + fdir_stats->collision = info->collision; + fdir_stats->free = info->free; + fdir_stats->maxhash = info->maxhash; + fdir_stats->maxlen = info->maxlen; + fdir_stats->remove = info->remove; + fdir_stats->add = info->add; + fdir_stats->f_remove = info->f_remove; + fdir_stats->f_add = info->f_add; + + reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + max_num = (1 << (FDIRENTRIES_NUM_SHIFT + + (reg & FDIRCTRL_PBALLOC_MASK))); + if (fdir_mode >= RTE_FDIR_MODE_PERFECT && + fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) + fdir_stats->guarant_cnt = max_num - fdir_stats->free; + else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE) + fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free; + +} + +/* + * ixgbe_fdir_ctrl_func - deal with all operations on flow director. + * @dev: pointer to the structure rte_eth_dev + * @filter_op:operation will be taken + * @arg: a pointer to specific structure corresponding to the filter_op + */ +int +ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret = 0; + + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X540 && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) + return -ENOTSUP; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) + return -EINVAL; + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_add_del_fdir_filter(dev, + (struct rte_eth_fdir_filter *)arg, FALSE, FALSE); + break; + case RTE_ETH_FILTER_UPDATE: + ret = ixgbe_add_del_fdir_filter(dev, + (struct rte_eth_fdir_filter *)arg, FALSE, TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_add_del_fdir_filter(dev, + (struct rte_eth_fdir_filter *)arg, TRUE, FALSE); + break; + case RTE_ETH_FILTER_FLUSH: + ret = ixgbe_fdir_flush(dev); + break; + case RTE_ETH_FILTER_INFO: + ixgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg); + break; + case RTE_ETH_FILTER_STATS: + ixgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unknown operation %u", filter_op); + ret = -EINVAL; + break; + } + return ret; +} diff --git a/drivers/net/ixgbe/ixgbe_logs.h b/drivers/net/ixgbe/ixgbe_logs.h new file mode 100644 index 00000000..53ba42d9 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_logs.h @@ -0,0 +1,77 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _IXGBE_LOGS_H_ +#define _IXGBE_LOGS_H_ + +#define PMD_INIT_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args) + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_INIT +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +#else +#define PMD_INIT_FUNC_TRACE() do { } while(0) +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_DRIVER +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) +#else +#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) +#endif + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +#endif /* _IXGBE_LOGS_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c new file mode 100644 index 00000000..a2787d90 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_pf.c @@ -0,0 +1,760 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <stdlib.h> +#include <unistd.h> +#include <stdarg.h> +#include <inttypes.h> + +#include <rte_interrupts.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_eal.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_memcpy.h> +#include <rte_malloc.h> +#include <rte_random.h> + +#include "base/ixgbe_common.h" +#include "ixgbe_ethdev.h" + +#define IXGBE_MAX_VFTA (128) +#define IXGBE_VF_MSG_SIZE_DEFAULT 1 +#define IXGBE_VF_GET_QUEUE_MSG_SIZE 5 +#define IXGBE_ETHERTYPE_FLOW_CTRL 0x8808 + +static inline uint16_t +dev_num_vf(struct rte_eth_dev *eth_dev) +{ + return eth_dev->pci_dev->max_vfs; +} + +static inline +int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num) +{ + unsigned char vf_mac_addr[ETHER_ADDR_LEN]; + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + uint16_t vfn; + + for (vfn = 0; vfn < vf_num; vfn++) { + eth_random_addr(vf_mac_addr); + /* keep the random address as default */ + memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr, + ETHER_ADDR_LEN); + } + + return 0; +} + +static inline int +ixgbe_mb_intr_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + intr->mask |= IXGBE_EICR_MAILBOX; + + return 0; +} + +void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_vf_info **vfinfo = + IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); + struct ixgbe_mirror_info *mirror_info = + IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private); + struct ixgbe_uta_info *uta_info = + IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + uint16_t vf_num; + uint8_t nb_queue; + + PMD_INIT_FUNC_TRACE(); + + RTE_ETH_DEV_SRIOV(eth_dev).active = 0; + if (0 == (vf_num = dev_num_vf(eth_dev))) + return; + + *vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0); + if (*vfinfo == NULL) + rte_panic("Cannot allocate memory for private VF data\n"); + + memset(mirror_info,0,sizeof(struct ixgbe_mirror_info)); + memset(uta_info,0,sizeof(struct ixgbe_uta_info)); + hw->mac.mc_filter_type = 0; + + if (vf_num >= ETH_32_POOLS) { + nb_queue = 2; + RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS; + } else if (vf_num >= ETH_16_POOLS) { + nb_queue = 4; + RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS; + } else { + nb_queue = 8; + RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS; + } + + RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue; + RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num; + RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue); + + ixgbe_vf_perm_addr_gen(eth_dev, vf_num); + + /* init_mailbox_params */ + hw->mbx.ops.init_params(hw); + + /* set mb interrupt mask */ + ixgbe_mb_intr_setup(eth_dev); + + return; +} + +void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_vf_info **vfinfo; + uint16_t vf_num; + + PMD_INIT_FUNC_TRACE(); + + vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); + + RTE_ETH_DEV_SRIOV(eth_dev).active = 0; + RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0; + RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0; + RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0; + + vf_num = dev_num_vf(eth_dev); + if (vf_num == 0) + return; + + rte_free(*vfinfo); + *vfinfo = NULL; +} + +static void +ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + uint16_t vf_num; + int i; + + if (!hw->mac.ops.set_ethertype_anti_spoofing) { + RTE_LOG(INFO, PMD, "ether type anti-spoofing is not" + " supported.\n"); + return; + } + + /* occupy an entity of ether type filter */ + for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { + if (!(filter_info->ethertype_mask & (1 << i))) { + filter_info->ethertype_mask |= 1 << i; + filter_info->ethertype_filters[i] = + IXGBE_ETHERTYPE_FLOW_CTRL; + break; + } + } + if (i == IXGBE_MAX_ETQF_FILTERS) { + RTE_LOG(ERR, PMD, "Cannot find an unused ether type filter" + " entity for flow control.\n"); + return; + } + + IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + IXGBE_ETHERTYPE_FLOW_CTRL)); + + vf_num = dev_num_vf(eth_dev); + for (i = 0; i < vf_num; i++) + hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i); +} + +int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) +{ + uint32_t vtctl, fcrth; + uint32_t vfre_slot, vfre_offset; + uint16_t vf_num; + const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */ + const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1); + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + uint32_t gpie, gcr_ext; + uint32_t vlanctrl; + int i; + + if (0 == (vf_num = dev_num_vf(eth_dev))) + return -1; + + /* enable VMDq and set the default pool for PF */ + vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vtctl |= IXGBE_VMD_CTL_VMDQ_EN; + vtctl &= ~IXGBE_VT_CTL_POOL_MASK; + vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx + << IXGBE_VT_CTL_POOL_SHIFT; + vtctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); + + vfre_offset = vf_num & VFRE_MASK; + vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0; + + /* Enable pools reserved to PF only */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0U) << vfre_offset); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0U) << vfre_offset); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot ^ 1), vfre_slot - 1); + + /* PFDMA Tx General Switch Control Enables VMDQ loopback */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + + /* clear VMDq map to perment rar 0 */ + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); + + /* clear VMDq map to scan rar 127 */ + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(hw->mac.num_rar_entries), 0); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(hw->mac.num_rar_entries), 0); + + /* set VMDq map to default PF pool */ + hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx); + + /* + * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode + */ + gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); + gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK; + + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie &= ~IXGBE_GPIE_VTMODE_MASK; + gpie |= IXGBE_GPIE_MSIX_MODE; + + switch (RTE_ETH_DEV_SRIOV(eth_dev).active) { + case ETH_64_POOLS: + gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; + gpie |= IXGBE_GPIE_VTMODE_64; + break; + case ETH_32_POOLS: + gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32; + gpie |= IXGBE_GPIE_VTMODE_32; + break; + case ETH_16_POOLS: + gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16; + gpie |= IXGBE_GPIE_VTMODE_16; + break; + } + + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + + /* + * enable vlan filtering and allow all vlan tags through + */ + vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); + + /* VFTA - enable all vlan filters */ + for (i = 0; i < IXGBE_MAX_VFTA; i++) { + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); + } + + /* Enable MAC Anti-Spoofing */ + hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num); + + /* set flow control threshold to max to avoid tx switch hang */ + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); + } + + ixgbe_add_tx_flow_control_drop_filter(eth_dev); + + return 0; +} + +static void +set_rx_mode(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *dev_data = dev->data; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; + uint16_t vfn = dev_num_vf(dev); + + /* Check for Promiscuous and All Multicast modes */ + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + + /* set all bits that we expect to always be set */ + fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ + fctrl |= IXGBE_FCTRL_BAM; + + /* clear the bits we are changing the status of */ + fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + + if (dev_data->promiscuous) { + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); + } else { + if (dev_data->all_multicast) { + fctrl |= IXGBE_FCTRL_MPE; + vmolr |= IXGBE_VMOLR_MPE; + } else { + vmolr |= IXGBE_VMOLR_ROMPE; + } + } + + if (hw->mac.type != ixgbe_mac_82598EB) { + vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(vfn)) & + ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_ROPE); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vfn), vmolr); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + if (dev->data->dev_conf.rxmode.hw_vlan_strip) + ixgbe_vlan_hw_strip_enable_all(dev); + else + ixgbe_vlan_hw_strip_disable_all(dev); +} + +static inline void +ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + int rar_entry = hw->mac.num_rar_entries - (vf + 1); + uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + + vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); + + /* reset multicast table array for vf */ + vfinfo[vf].num_vf_mc_hashes = 0; + + /* reset rx mode */ + set_rx_mode(dev); + + hw->mac.ops.clear_rar(hw, rar_entry); +} + +static inline void +ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + uint32_t reg_offset, vf_shift; + const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */ + const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1); + + vf_shift = vf & VFRE_MASK; + reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0; + + /* enable transmit and receive for vf */ + reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); + reg |= (reg | (1 << vf_shift)); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); + + reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); + reg |= (reg | (1 << vf_shift)); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); + + /* Enable counting of spoofed packets in the SSVPC register */ + reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); + reg |= (1 << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); + + ixgbe_vf_reset_event(dev, vf); +} + +static int +ixgbe_enable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vmolr; + + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + + RTE_LOG(INFO, PMD, "VF %u: enabling multicast promiscuous\n", vf); + + vmolr |= IXGBE_VMOLR_MPE; + + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + return 0; +} + +static int +ixgbe_disable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vmolr; + + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + + RTE_LOG(INFO, PMD, "VF %u: disabling multicast promiscuous\n", vf); + + vmolr &= ~IXGBE_VMOLR_MPE; + + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + return 0; +} + +static int +ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses; + int rar_entry = hw->mac.num_rar_entries - (vf + 1); + uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); + + ixgbe_vf_reset_msg(dev, vf); + + hw->mac.ops.set_rar(hw, rar_entry, vf_mac, vf, IXGBE_RAH_AV); + + /* Disable multicast promiscuous at reset */ + ixgbe_disable_vf_mc_promisc(dev, vf); + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; + rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN); + /* + * Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[3] = hw->mac.mc_filter_type; + ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int +ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + int rar_entry = hw->mac.num_rar_entries - (vf + 1); + uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); + + if (is_valid_assigned_ether_addr((struct ether_addr*)new_mac)) { + rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6); + return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV); + } + return -1; +} + +static int +ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> + IXGBE_VT_MSGINFO_SHIFT; + uint16_t *hash_list = (uint16_t *)&msgbuf[1]; + uint32_t mta_idx; + uint32_t mta_shift; + const uint32_t IXGBE_MTA_INDEX_MASK = 0x7F; + const uint32_t IXGBE_MTA_BIT_SHIFT = 5; + const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1; + uint32_t reg_val; + int i; + + /* Disable multicast promiscuous first */ + ixgbe_disable_vf_mc_promisc(dev, vf); + + /* only so many hash values supported */ + nb_entries = RTE_MIN(nb_entries, IXGBE_MAX_VF_MC_ENTRIES); + + /* store the mc entries */ + vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries; + for (i = 0; i < nb_entries; i++) { + vfinfo->vf_mc_hashes[i] = hash_list[i]; + } + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT) + & IXGBE_MTA_INDEX_MASK; + mta_shift = vfinfo->vf_mc_hashes[i] & IXGBE_MTA_BIT_MASK; + reg_val = IXGBE_READ_REG(hw, IXGBE_MTA(mta_idx)); + reg_val |= (1 << mta_shift); + IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val); + } + + return 0; +} + +static int +ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + int add, vid; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + + add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) + >> IXGBE_VT_MSGINFO_SHIFT; + vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); + + if (add) + vfinfo[vf].vlan_count++; + else if (vfinfo[vf].vlan_count) + vfinfo[vf].vlan_count--; + return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add); +} + +static int +ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t new_mtu = msgbuf[1]; + uint32_t max_frs; + int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + + /* X540 and X550 support jumbo frames in IOV mode */ + if (hw->mac.type != ixgbe_mac_X540 && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) + return -1; + + if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) + return -1; + + max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & + IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; + if (max_frs < new_mtu) { + max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); + } + + return 0; +} + +static int +ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + uint32_t api_version = msgbuf[1]; + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + + switch (api_version) { + case ixgbe_mbox_api_10: + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + vfinfo[vf].api_version = (uint8_t)api_version; + return 0; + default: + break; + } + + RTE_LOG(ERR, PMD, "Negotiate invalid api version %u from VF %d\n", + api_version, vf); + + return -1; +} + +static int +ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + + /* Verify if the PF supports the mbox APIs version or not */ + switch (vfinfo[vf].api_version) { + case ixgbe_mbox_api_20: + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + break; + default: + return -1; + } + + /* Notify VF of Rx and Tx queue number */ + msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + + /* Notify VF of default queue */ + msgbuf[IXGBE_VF_DEF_QUEUE] = default_q; + + /* + * FIX ME if it needs fill msgbuf[IXGBE_VF_TRANS_VLAN] + * for VLAN strip or VMDQ_DCB or VMDQ_DCB_RSS + */ + + return 0; +} + +static int +ixgbe_set_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + bool enable = !!msgbuf[1]; /* msgbuf contains the flag to enable */ + + switch (vfinfo[vf].api_version) { + case ixgbe_mbox_api_12: + break; + default: + return -1; + } + + if (enable) + return ixgbe_enable_vf_mc_promisc(dev, vf); + else + return ixgbe_disable_vf_mc_promisc(dev, vf); +} + +static int +ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) +{ + uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE; + uint16_t msg_size = IXGBE_VF_MSG_SIZE_DEFAULT; + uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE]; + int32_t retval; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + + retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); + if (retval) { + PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf); + return retval; + } + + /* do nothing with the message already been processed */ + if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) + return retval; + + /* flush the ack before we write any messages back */ + IXGBE_WRITE_FLUSH(hw); + + /* perform VF reset */ + if (msgbuf[0] == IXGBE_VF_RESET) { + int ret = ixgbe_vf_reset(dev, vf, msgbuf); + vfinfo[vf].clear_to_send = true; + return ret; + } + + /* check & process VF to PF mailbox message */ + switch ((msgbuf[0] & 0xFFFF)) { + case IXGBE_VF_SET_MAC_ADDR: + retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf); + break; + case IXGBE_VF_SET_MULTICAST: + retval = ixgbe_vf_set_multicast(dev, vf, msgbuf); + break; + case IXGBE_VF_SET_LPE: + retval = ixgbe_set_vf_lpe(dev, vf, msgbuf); + break; + case IXGBE_VF_SET_VLAN: + retval = ixgbe_vf_set_vlan(dev, vf, msgbuf); + break; + case IXGBE_VF_API_NEGOTIATE: + retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf); + break; + case IXGBE_VF_GET_QUEUES: + retval = ixgbe_get_vf_queues(dev, vf, msgbuf); + msg_size = IXGBE_VF_GET_QUEUE_MSG_SIZE; + break; + case IXGBE_VF_UPDATE_XCAST_MODE: + retval = ixgbe_set_vf_mc_promisc(dev, vf, msgbuf); + break; + default: + PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]); + retval = IXGBE_ERR_MBX; + break; + } + + /* response the VF according to the message process result */ + if (retval) + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + else + msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; + + msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; + + ixgbe_write_mbx(hw, msgbuf, msg_size, vf); + + return retval; +} + +static inline void +ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf) +{ + uint32_t msg = IXGBE_VT_MSGTYPE_NACK; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + + if (!vfinfo[vf].clear_to_send) + ixgbe_write_mbx(hw, &msg, 1, vf); +} + +void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev) +{ + uint16_t vf; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + for (vf = 0; vf < dev_num_vf(eth_dev); vf++) { + /* check & process vf function level reset */ + if (!ixgbe_check_for_rst(hw, vf)) + ixgbe_vf_reset_event(eth_dev, vf); + + /* check & process vf mailbox messages */ + if (!ixgbe_check_for_msg(hw, vf)) + ixgbe_rcv_msg_from_vf(eth_dev, vf); + + /* check & process acks from vf */ + if (!ixgbe_check_for_ack(hw, vf)) + ixgbe_rcv_ack_from_vf(eth_dev, vf); + } +} diff --git a/drivers/net/ixgbe/ixgbe_regs.h b/drivers/net/ixgbe/ixgbe_regs.h new file mode 100644 index 00000000..c7457a6f --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_regs.h @@ -0,0 +1,376 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _IXGBE_REGS_H_ +#define _IXGBE_REGS_H_ + +#include "ixgbe_ethdev.h" + +struct ixgbe_hw; +struct reg_info { + uint32_t base_addr; + uint32_t count; + uint32_t stride; + const char *name; +} reg_info; + +static const struct reg_info ixgbe_regs_general[] = { + {IXGBE_CTRL, 1, 1, "IXGBE_CTRL"}, + {IXGBE_STATUS, 1, 1, "IXGBE_STATUS"}, + {IXGBE_CTRL_EXT, 1, 1, "IXGBE_CTRL_EXT"}, + {IXGBE_ESDP, 1, 1, "IXGBE_ESDP"}, + {IXGBE_EODSDP, 1, 1, "IXGBE_EODSDP"}, + {IXGBE_LEDCTL, 1, 1, "IXGBE_LEDCTL"}, + {IXGBE_FRTIMER, 1, 1, "IXGBE_FRTIMER"}, + {IXGBE_TCPTIMER, 1, 1, "IXGBE_TCPTIMER"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbevf_regs_general[] = { + {IXGBE_CTRL, 1, 1, "IXGBE_CTRL"}, + {IXGBE_STATUS, 1, 1, "IXGBE_STATUS"}, + {IXGBE_VFLINKS, 1, 1, "IXGBE_VFLINKS"}, + {IXGBE_FRTIMER, 1, 1, "IXGBE_FRTIMER"}, + {IXGBE_VFMAILBOX, 1, 1, "IXGBE_VFMAILBOX"}, + {IXGBE_VFMBMEM, 16, 4, "IXGBE_VFMBMEM"}, + {IXGBE_VFRXMEMWRAP, 1, 1, "IXGBE_VFRXMEMWRAP"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_nvm[] = { + {IXGBE_EEC, 1, 1, "IXGBE_EEC"}, + {IXGBE_EERD, 1, 1, "IXGBE_EERD"}, + {IXGBE_FLA, 1, 1, "IXGBE_FLA"}, + {IXGBE_EEMNGCTL, 1, 1, "IXGBE_EEMNGCTL"}, + {IXGBE_EEMNGDATA, 1, 1, "IXGBE_EEMNGDATA"}, + {IXGBE_FLMNGCTL, 1, 1, "IXGBE_FLMNGCTL"}, + {IXGBE_FLMNGDATA, 1, 1, "IXGBE_FLMNGDATA"}, + {IXGBE_FLMNGCNT, 1, 1, "IXGBE_FLMNGCNT"}, + {IXGBE_FLOP, 1, 1, "IXGBE_FLOP"}, + {IXGBE_GRC, 1, 1, "IXGBE_GRC"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_interrupt[] = { + {IXGBE_EICS, 1, 1, "IXGBE_EICS"}, + {IXGBE_EIMS, 1, 1, "IXGBE_EIMS"}, + {IXGBE_EIMC, 1, 1, "IXGBE_EIMC"}, + {IXGBE_EIAC, 1, 1, "IXGBE_EIAC"}, + {IXGBE_EIAM, 1, 1, "IXGBE_EIAM"}, + {IXGBE_EITR(0), 24, 4, "IXGBE_EITR"}, + {IXGBE_IVAR(0), 24, 4, "IXGBE_IVAR"}, + {IXGBE_MSIXT, 1, 1, "IXGBE_MSIXT"}, + {IXGBE_MSIXPBA, 1, 1, "IXGBE_MSIXPBA"}, + {IXGBE_PBACL(0), 1, 4, "IXGBE_PBACL"}, + {IXGBE_GPIE, 1, 1, ""}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbevf_regs_interrupt[] = { + {IXGBE_VTEICR, 1, 1, "IXGBE_VTEICR"}, + {IXGBE_VTEICS, 1, 1, "IXGBE_VTEICS"}, + {IXGBE_VTEIMS, 1, 1, "IXGBE_VTEIMS"}, + {IXGBE_VTEIMC, 1, 1, "IXGBE_VTEIMC"}, + {IXGBE_VTEIAM, 1, 1, "IXGBE_VTEIAM"}, + {IXGBE_VTEITR(0), 2, 4, "IXGBE_VTEITR"}, + {IXGBE_VTIVAR(0), 4, 4, "IXGBE_VTIVAR"}, + {IXGBE_VTIVAR_MISC, 1, 1, "IXGBE_VTIVAR_MISC"}, + {IXGBE_VTRSCINT(0), 2, 4, "IXGBE_VTRSCINT"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_fctl_mac_82598EB[] = { + {IXGBE_PFCTOP, 1, 1, ""}, + {IXGBE_FCTTV(0), 4, 4, ""}, + {IXGBE_FCRTV, 1, 1, ""}, + {IXGBE_TFCS, 1, 1, ""}, + {IXGBE_FCRTL(0), 8, 8, "IXGBE_FCRTL"}, + {IXGBE_FCRTH(0), 8, 8, "IXGBE_FCRTH"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_fctl_others[] = { + {IXGBE_PFCTOP, 1, 1, ""}, + {IXGBE_FCTTV(0), 4, 4, ""}, + {IXGBE_FCRTV, 1, 1, ""}, + {IXGBE_TFCS, 1, 1, ""}, + {IXGBE_FCRTL_82599(0), 8, 4, "IXGBE_FCRTL"}, + {IXGBE_FCRTH_82599(0), 8, 4, "IXGBE_FCRTH"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_rxdma[] = { + {IXGBE_RDBAL(0), 64, 0x40, "IXGBE_RDBAL"}, + {IXGBE_RDBAH(0), 64, 0x40, "IXGBE_RDBAH"}, + {IXGBE_RDLEN(0), 64, 0x40, "IXGBE_RDLEN"}, + {IXGBE_RDH(0), 64, 0x40, "IXGBE_RDH"}, + {IXGBE_RDT(0), 64, 0x40, "IXGBE_RDT"}, + {IXGBE_RXDCTL(0), 64, 0x40, "IXGBE_RXDCTL"}, + {IXGBE_SRRCTL(0), 16, 0x4, "IXGBE_SRRCTL"}, + {IXGBE_DCA_RXCTRL(0), 16, 4, "IXGBE_DCA_RXCTRL"}, + {IXGBE_RDRXCTL, 1, 1, "IXGBE_RDRXCTL"}, + {IXGBE_RXPBSIZE(0), 8, 4, "IXGBE_RXPBSIZE"}, + {IXGBE_RXCTRL, 1, 1, "IXGBE_RXCTRL"}, + {IXGBE_DROPEN, 1, 1, "IXGBE_DROPEN"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbevf_regs_rxdma[] = { + {IXGBE_RDBAL(0), 8, 0x40, "IXGBE_RDBAL"}, + {IXGBE_RDBAH(0), 8, 0x40, "IXGBE_RDBAH"}, + {IXGBE_RDLEN(0), 8, 0x40, "IXGBE_RDLEN"}, + {IXGBE_RDH(0), 8, 0x40, "IXGBE_RDH"}, + {IXGBE_RDT(0), 8, 0x40, "IXGBE_RDT"}, + {IXGBE_RXDCTL(0), 8, 0x40, "IXGBE_RXDCTL"}, + {IXGBE_SRRCTL(0), 8, 0x40, "IXGBE_SRRCTL"}, + {IXGBE_VFPSRTYPE, 1, 1, "IXGBE_VFPSRTYPE"}, + {IXGBE_VFRSCCTL(0), 8, 0x40, "IXGBE_VFRSCCTL"}, + {IXGBE_PVFDCA_RXCTRL(0), 8, 0x40, "IXGBE_PVFDCA_RXCTRL"}, + {IXGBE_PVFDCA_TXCTRL(0), 8, 0x40, "IXGBE_PVFDCA_TXCTRL"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_rx[] = { + {IXGBE_RXCSUM, 1, 1, "IXGBE_RXCSUM"}, + {IXGBE_RFCTL, 1, 1, "IXGBE_RFCTL"}, + {IXGBE_RAL(0), 16, 8, "IXGBE_RAL"}, + {IXGBE_RAH(0), 16, 8, "IXGBE_RAH"}, + {IXGBE_PSRTYPE(0), 1, 4, "IXGBE_PSRTYPE"}, + {IXGBE_FCTRL, 1, 1, "IXGBE_FCTRL"}, + {IXGBE_VLNCTRL, 1, 1, "IXGBE_VLNCTRL"}, + {IXGBE_MCSTCTRL, 1, 1, "IXGBE_MCSTCTRL"}, + {IXGBE_MRQC, 1, 1, "IXGBE_MRQC"}, + {IXGBE_VMD_CTL, 1, 1, "IXGBE_VMD_CTL"}, + {IXGBE_IMIR(0), 8, 4, "IXGBE_IMIR"}, + {IXGBE_IMIREXT(0), 8, 4, "IXGBE_IMIREXT"}, + {IXGBE_IMIRVP, 1, 1, "IXGBE_IMIRVP"}, + {0, 0, 0, ""} +}; + +static struct reg_info ixgbe_regs_tx[] = { + {IXGBE_TDBAL(0), 32, 0x40, "IXGBE_TDBAL"}, + {IXGBE_TDBAH(0), 32, 0x40, "IXGBE_TDBAH"}, + {IXGBE_TDLEN(0), 32, 0x40, "IXGBE_TDLEN"}, + {IXGBE_TDH(0), 32, 0x40, "IXGBE_TDH"}, + {IXGBE_TDT(0), 32, 0x40, "IXGBE_TDT"}, + {IXGBE_TXDCTL(0), 32, 0x40, "IXGBE_TXDCTL"}, + {IXGBE_TDWBAL(0), 32, 0x40, "IXGBE_TDWBAL"}, + {IXGBE_TDWBAH(0), 32, 0x40, "IXGBE_TDWBAH"}, + {IXGBE_DTXCTL, 1, 1, "IXGBE_DTXCTL"}, + {IXGBE_DCA_TXCTRL(0), 16, 4, "IXGBE_DCA_TXCTRL"}, + {IXGBE_TXPBSIZE(0), 8, 4, "IXGBE_TXPBSIZE"}, + {IXGBE_MNGTXMAP, 1, 1, "IXGBE_MNGTXMAP"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbevf_regs_tx[] = { + {IXGBE_TDBAL(0), 4, 0x40, "IXGBE_TDBAL"}, + {IXGBE_TDBAH(0), 4, 0x40, "IXGBE_TDBAH"}, + {IXGBE_TDLEN(0), 4, 0x40, "IXGBE_TDLEN"}, + {IXGBE_TDH(0), 4, 0x40, "IXGBE_TDH"}, + {IXGBE_TDT(0), 4, 0x40, "IXGBE_TDT"}, + {IXGBE_TXDCTL(0), 4, 0x40, "IXGBE_TXDCTL"}, + {IXGBE_TDWBAL(0), 4, 0x40, "IXGBE_TDWBAL"}, + {IXGBE_TDWBAH(0), 4, 0x40, "IXGBE_TDWBAH"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_wakeup[] = { + {IXGBE_WUC, 1, 1, "IXGBE_WUC"}, + {IXGBE_WUFC, 1, 1, "IXGBE_WUFC"}, + {IXGBE_WUS, 1, 1, "IXGBE_WUS"}, + {IXGBE_IPAV, 1, 1, "IXGBE_IPAV"}, + {IXGBE_IP4AT, 1, 1, "IXGBE_IP4AT"}, + {IXGBE_IP6AT, 1, 1, "IXGBE_IP6AT"}, + {IXGBE_WUPL, 1, 1, "IXGBE_WUPL"}, + {IXGBE_WUPM, 1, 1, "IXGBE_WUPM"}, + {IXGBE_FHFT(0), 1, 1, "IXGBE_FHFT"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_dcb[] = { + {IXGBE_RMCS, 1, 1, "IXGBE_RMCS"}, + {IXGBE_DPMCS, 1, 1, "IXGBE_DPMCS"}, + {IXGBE_PDPMCS, 1, 1, "IXGBE_PDPMCS"}, + {IXGBE_RUPPBMR, 1, 1, "IXGBE_RUPPBMR"}, + {IXGBE_RT2CR(0), 8, 4, "IXGBE_RT2CR"}, + {IXGBE_RT2SR(0), 8, 4, "IXGBE_RT2SR"}, + {IXGBE_TDTQ2TCCR(0), 8, 0x40, "IXGBE_TDTQ2TCCR"}, + {IXGBE_TDTQ2TCSR(0), 8, 0x40, "IXGBE_TDTQ2TCSR"}, + {IXGBE_TDPT2TCCR(0), 8, 4, "IXGBE_TDPT2TCCR"}, + {IXGBE_TDPT2TCSR(0), 8, 4, "IXGBE_TDPT2TCSR"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_mac[] = { + {IXGBE_PCS1GCFIG, 1, 1, "IXGBE_PCS1GCFIG"}, + {IXGBE_PCS1GLCTL, 1, 1, "IXGBE_PCS1GLCTL"}, + {IXGBE_PCS1GLSTA, 1, 1, "IXGBE_PCS1GLSTA"}, + {IXGBE_PCS1GDBG0, 1, 1, "IXGBE_PCS1GDBG0"}, + {IXGBE_PCS1GDBG1, 1, 1, "IXGBE_PCS1GDBG1"}, + {IXGBE_PCS1GANA, 1, 1, "IXGBE_PCS1GANA"}, + {IXGBE_PCS1GANLP, 1, 1, "IXGBE_PCS1GANLP"}, + {IXGBE_PCS1GANNP, 1, 1, "IXGBE_PCS1GANNP"}, + {IXGBE_PCS1GANLPNP, 1, 1, "IXGBE_PCS1GANLPNP"}, + {IXGBE_HLREG0, 1, 1, "IXGBE_HLREG0"}, + {IXGBE_HLREG1, 1, 1, "IXGBE_HLREG1"}, + {IXGBE_PAP, 1, 1, "IXGBE_PAP"}, + {IXGBE_MACA, 1, 1, "IXGBE_MACA"}, + {IXGBE_APAE, 1, 1, "IXGBE_APAE"}, + {IXGBE_ARD, 1, 1, "IXGBE_ARD"}, + {IXGBE_AIS, 1, 1, "IXGBE_AIS"}, + {IXGBE_MSCA, 1, 1, "IXGBE_MSCA"}, + {IXGBE_MSRWD, 1, 1, "IXGBE_MSRWD"}, + {IXGBE_MLADD, 1, 1, "IXGBE_MLADD"}, + {IXGBE_MHADD, 1, 1, "IXGBE_MHADD"}, + {IXGBE_TREG, 1, 1, "IXGBE_TREG"}, + {IXGBE_PCSS1, 1, 1, "IXGBE_PCSS1"}, + {IXGBE_PCSS2, 1, 1, "IXGBE_PCSS2"}, + {IXGBE_XPCSS, 1, 1, "IXGBE_XPCSS"}, + {IXGBE_SERDESC, 1, 1, "IXGBE_SERDESC"}, + {IXGBE_MACS, 1, 1, "IXGBE_MACS"}, + {IXGBE_AUTOC, 1, 1, "IXGBE_AUTOC"}, + {IXGBE_LINKS, 1, 1, "IXGBE_LINKS"}, + {IXGBE_AUTOC2, 1, 1, "IXGBE_AUTOC2"}, + {IXGBE_AUTOC3, 1, 1, "IXGBE_AUTOC3"}, + {IXGBE_ANLP1, 1, 1, "IXGBE_ANLP1"}, + {IXGBE_ANLP2, 1, 1, "IXGBE_ANLP2"}, + {IXGBE_ATLASCTL, 1, 1, "IXGBE_ATLASCTL"}, + {0, 0, 0, ""} +}; + +static const struct reg_info ixgbe_regs_diagnostic[] = { + {IXGBE_RDSTATCTL, 1, 1, "IXGBE_RDSTATCTL"}, + {IXGBE_RDSTAT(0), 8, 4, "IXGBE_RDSTAT"}, + {IXGBE_RDHMPN, 1, 1, "IXGBE_RDHMPN"}, + {IXGBE_RIC_DW(0), 4, 4, "IXGBE_RIC_DW"}, + {IXGBE_RDPROBE, 1, 1, "IXGBE_RDPROBE"}, + {IXGBE_TDHMPN, 1, 1, "IXGBE_TDHMPN"}, + {IXGBE_TIC_DW(0), 4, 4, "IXGBE_TIC_DW"}, + {IXGBE_TDPROBE, 1, 1, "IXGBE_TDPROBE"}, + {IXGBE_TXBUFCTRL, 1, 1, "IXGBE_TXBUFCTRL"}, + {IXGBE_TXBUFDATA0, 1, 1, "IXGBE_TXBUFDATA0"}, + {IXGBE_TXBUFDATA1, 1, 1, "IXGBE_TXBUFDATA1"}, + {IXGBE_TXBUFDATA2, 1, 1, "IXGBE_TXBUFDATA2"}, + {IXGBE_TXBUFDATA3, 1, 1, "IXGBE_TXBUFDATA3"}, + {IXGBE_RXBUFCTRL, 1, 1, "IXGBE_RXBUFCTRL"}, + {IXGBE_RXBUFDATA0, 1, 1, "IXGBE_RXBUFDATA0"}, + {IXGBE_RXBUFDATA1, 1, 1, "IXGBE_RXBUFDATA1"}, + {IXGBE_RXBUFDATA2, 1, 1, "IXGBE_RXBUFDATA2"}, + {IXGBE_RXBUFDATA3, 1, 1, "IXGBE_RXBUFDATA3"}, + {IXGBE_PCIE_DIAG(0), 8, 4, ""}, + {IXGBE_RFVAL, 1, 1, "IXGBE_RFVAL"}, + {IXGBE_MDFTC1, 1, 1, "IXGBE_MDFTC1"}, + {IXGBE_MDFTC2, 1, 1, "IXGBE_MDFTC2"}, + {IXGBE_MDFTFIFO1, 1, 1, "IXGBE_MDFTFIFO1"}, + {IXGBE_MDFTFIFO2, 1, 1, "IXGBE_MDFTFIFO2"}, + {IXGBE_MDFTS, 1, 1, "IXGBE_MDFTS"}, + {IXGBE_PCIEECCCTL, 1, 1, "IXGBE_PCIEECCCTL"}, + {IXGBE_PBTXECC, 1, 1, "IXGBE_PBTXECC"}, + {IXGBE_PBRXECC, 1, 1, "IXGBE_PBRXECC"}, + {IXGBE_MFLCN, 1, 1, "IXGBE_MFLCN"}, + {0, 0, 0, ""}, +}; + +/* PF registers */ +static const struct reg_info *ixgbe_regs_others[] = { + ixgbe_regs_general, + ixgbe_regs_nvm, ixgbe_regs_interrupt, + ixgbe_regs_fctl_others, + ixgbe_regs_rxdma, + ixgbe_regs_rx, + ixgbe_regs_tx, + ixgbe_regs_wakeup, + ixgbe_regs_dcb, + ixgbe_regs_mac, + ixgbe_regs_diagnostic, + NULL}; + +static const struct reg_info *ixgbe_regs_mac_82598EB[] = { + ixgbe_regs_general, + ixgbe_regs_nvm, + ixgbe_regs_interrupt, + ixgbe_regs_fctl_mac_82598EB, + ixgbe_regs_rxdma, + ixgbe_regs_rx, + ixgbe_regs_tx, + ixgbe_regs_wakeup, + ixgbe_regs_dcb, + ixgbe_regs_mac, + ixgbe_regs_diagnostic, + NULL}; + +/* VF registers */ +static const struct reg_info *ixgbevf_regs[] = { + ixgbevf_regs_general, + ixgbevf_regs_interrupt, + ixgbevf_regs_rxdma, + ixgbevf_regs_tx, + NULL}; + +static inline int +ixgbe_read_regs(struct ixgbe_hw *hw, const struct reg_info *reg, + uint32_t *reg_buf) +{ + unsigned int i; + + for (i = 0; i < reg->count; i++) + reg_buf[i] = IXGBE_READ_REG(hw, + reg->base_addr + i * reg->stride); + return reg->count; +}; + +static inline int +ixgbe_regs_group_count(const struct reg_info *regs) +{ + int count = 0; + int i = 0; + + while (regs[i].count) + count += regs[i++].count; + return count; +}; + +static inline int +ixgbe_read_regs_group(struct rte_eth_dev *dev, uint32_t *reg_buf, + const struct reg_info *regs) +{ + int count = 0; + int i = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + while (regs[i].count) + count += ixgbe_read_regs(hw, ®s[i++], ®_buf[count]); + return count; +}; + +#endif /* _IXGBE_REGS_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c new file mode 100644 index 00000000..9fb38a6c --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -0,0 +1,5247 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright 2014 6WIND S.A. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> +#include <stdint.h> +#include <stdarg.h> +#include <unistd.h> +#include <inttypes.h> + +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_interrupts.h> +#include <rte_pci.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_ring.h> +#include <rte_mempool.h> +#include <rte_malloc.h> +#include <rte_mbuf.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_prefetch.h> +#include <rte_udp.h> +#include <rte_tcp.h> +#include <rte_sctp.h> +#include <rte_string_fns.h> +#include <rte_errno.h> +#include <rte_ip.h> + +#include "ixgbe_logs.h" +#include "base/ixgbe_api.h" +#include "base/ixgbe_vf.h" +#include "ixgbe_ethdev.h" +#include "base/ixgbe_dcb.h" +#include "base/ixgbe_common.h" +#include "ixgbe_rxtx.h" + +/* Bit Mask to indicate what bits required for building TX context */ +#define IXGBE_TX_OFFLOAD_MASK ( \ + PKT_TX_VLAN_PKT | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG | \ + PKT_TX_OUTER_IP_CKSUM) + +static inline struct rte_mbuf * +rte_rxmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + m = __rte_mbuf_raw_alloc(mp); + __rte_mbuf_sanity_check_raw(m, 0); + return m; +} + + +#if 1 +#define RTE_PMD_USE_PREFETCH +#endif + +#ifdef RTE_PMD_USE_PREFETCH +/* + * Prefetch a cache line into all cache levels. + */ +#define rte_ixgbe_prefetch(p) rte_prefetch0(p) +#else +#define rte_ixgbe_prefetch(p) do {} while (0) +#endif + +/********************************************************************* + * + * TX functions + * + **********************************************************************/ + +/* + * Check for descriptors with their DD bit set and free mbufs. + * Return the total number of buffers freed. + */ +static inline int __attribute__((always_inline)) +ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) +{ + struct ixgbe_tx_entry *txep; + uint32_t status; + int i, nb_free = 0; + struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ]; + + /* check DD bit on threshold descriptor */ + status = txq->tx_ring[txq->tx_next_dd].wb.status; + if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))) + return 0; + + /* + * first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1) + */ + txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]); + + for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { + /* free buffers one at a time */ + m = __rte_pktmbuf_prefree_seg(txep->mbuf); + txep->mbuf = NULL; + + if (unlikely(m == NULL)) + continue; + + if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ || + (nb_free > 0 && m->pool != free[0]->pool)) { + rte_mempool_put_bulk(free[0]->pool, + (void **)free, nb_free); + nb_free = 0; + } + + free[nb_free++] = m; + } + + if (nb_free > 0) + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + +/* Populate 4 descriptors with data from 4 mbufs */ +static inline void +tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) +{ + uint64_t buf_dma_addr; + uint32_t pkt_len; + int i; + + for (i = 0; i < 4; ++i, ++txdp, ++pkts) { + buf_dma_addr = rte_mbuf_data_dma_addr(*pkts); + pkt_len = (*pkts)->data_len; + + /* write data to descriptor */ + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + + txdp->read.cmd_type_len = + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + + txdp->read.olinfo_status = + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + + rte_prefetch0(&(*pkts)->pool); + } +} + +/* Populate 1 descriptor with data from 1 mbuf */ +static inline void +tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) +{ + uint64_t buf_dma_addr; + uint32_t pkt_len; + + buf_dma_addr = rte_mbuf_data_dma_addr(*pkts); + pkt_len = (*pkts)->data_len; + + /* write data to descriptor */ + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txdp->read.cmd_type_len = + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + txdp->read.olinfo_status = + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_prefetch0(&(*pkts)->pool); +} + +/* + * Fill H/W descriptor ring with mbuf data. + * Copy mbuf pointers to the S/W ring. + */ +static inline void +ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts, + uint16_t nb_pkts) +{ + volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]); + struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]); + const int N_PER_LOOP = 4; + const int N_PER_LOOP_MASK = N_PER_LOOP-1; + int mainpart, leftover; + int i, j; + + /* + * Process most of the packets in chunks of N pkts. Any + * leftover packets will get processed one at a time. + */ + mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK)); + leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK)); + for (i = 0; i < mainpart; i += N_PER_LOOP) { + /* Copy N mbuf pointers to the S/W ring */ + for (j = 0; j < N_PER_LOOP; ++j) { + (txep + i + j)->mbuf = *(pkts + i + j); + } + tx4(txdp + i, pkts + i); + } + + if (unlikely(leftover > 0)) { + for (i = 0; i < leftover; ++i) { + (txep + mainpart + i)->mbuf = *(pkts + mainpart + i); + tx1(txdp + mainpart + i, pkts + mainpart + i); + } + } +} + +static inline uint16_t +tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; + volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring; + uint16_t n = 0; + + /* + * Begin scanning the H/W ring for done descriptors when the + * number of available descriptors drops below tx_free_thresh. For + * each done descriptor, free the associated buffer. + */ + if (txq->nb_tx_free < txq->tx_free_thresh) + ixgbe_tx_free_bufs(txq); + + /* Only use descriptors that are available */ + nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + /* Use exactly nb_pkts descriptors */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + /* + * At this point, we know there are enough descriptors in the + * ring to transmit all the packets. This assumes that each + * mbuf contains a single segment, and that no new offloads + * are expected, which would require a new context descriptor. + */ + + /* + * See if we're going to wrap-around. If so, handle the top + * of the descriptor ring first, then do the bottom. If not, + * the processing looks just like the "bottom" part anyway... + */ + if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) { + n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail); + ixgbe_tx_fill_hw_ring(txq, tx_pkts, n); + + /* + * We know that the last descriptor in the ring will need to + * have its RS bit set because tx_rs_thresh has to be + * a divisor of the ring size + */ + tx_r[txq->tx_next_rs].read.cmd_type_len |= + rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + txq->tx_tail = 0; + } + + /* Fill H/W descriptor ring with mbuf data */ + ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n)); + txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n)); + + /* + * Determine if RS bit should be set + * This is what we actually want: + * if ((txq->tx_tail - 1) >= txq->tx_next_rs) + * but instead of subtracting 1 and doing >=, we can just do + * greater than without subtracting. + */ + if (txq->tx_tail > txq->tx_next_rs) { + tx_r[txq->tx_next_rs].read.cmd_type_len |= + rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS); + txq->tx_next_rs = (uint16_t)(txq->tx_next_rs + + txq->tx_rs_thresh); + if (txq->tx_next_rs >= txq->nb_tx_desc) + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + } + + /* + * Check for wrap-around. This would only happen if we used + * up to the last descriptor in the ring, no more, no less. + */ + if (txq->tx_tail >= txq->nb_tx_desc) + txq->tx_tail = 0; + + /* update tail pointer */ + rte_wmb(); + IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail); + + return nb_pkts; +} + +uint16_t +ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx; + + /* Try to transmit at least chunks of TX_MAX_BURST pkts */ + if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST)) + return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts); + + /* transmit more than the max burst, in chunks of TX_MAX_BURST */ + nb_tx = 0; + while (nb_pkts) { + uint16_t ret, n; + n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST); + ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n); + nb_tx = (uint16_t)(nb_tx + ret); + nb_pkts = (uint16_t)(nb_pkts - ret); + if (ret < n) + break; + } + + return nb_tx; +} + +static inline void +ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, + volatile struct ixgbe_adv_tx_context_desc *ctx_txd, + uint64_t ol_flags, union ixgbe_tx_offload tx_offload) +{ + uint32_t type_tucmd_mlhl; + uint32_t mss_l4len_idx = 0; + uint32_t ctx_idx; + uint32_t vlan_macip_lens; + union ixgbe_tx_offload tx_offload_mask; + uint32_t seqnum_seed = 0; + + ctx_idx = txq->ctx_curr; + tx_offload_mask.data[0] = 0; + tx_offload_mask.data[1] = 0; + type_tucmd_mlhl = 0; + + /* Specify which HW CTX to upload. */ + mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT); + + if (ol_flags & PKT_TX_VLAN_PKT) { + tx_offload_mask.vlan_tci |= ~0; + } + + /* check if TCP segmentation required for this packet */ + if (ol_flags & PKT_TX_TCP_SEG) { + /* implies IP cksum in IPv4 */ + if (ol_flags & PKT_TX_IP_CKSUM) + type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 | + IXGBE_ADVTXD_TUCMD_L4T_TCP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + else + type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 | + IXGBE_ADVTXD_TUCMD_L4T_TCP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + tx_offload_mask.l4_len |= ~0; + tx_offload_mask.tso_segsz |= ~0; + mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT; + mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT; + } else { /* no TSO, check if hardware checksum is needed */ + if (ol_flags & PKT_TX_IP_CKSUM) { + type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4; + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + } + + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + break; + case PKT_TX_TCP_CKSUM: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + break; + case PKT_TX_SCTP_CKSUM: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + break; + default: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + break; + } + } + + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) { + tx_offload_mask.outer_l2_len |= ~0; + tx_offload_mask.outer_l3_len |= ~0; + tx_offload_mask.l2_len |= ~0; + seqnum_seed |= tx_offload.outer_l3_len + << IXGBE_ADVTXD_OUTER_IPLEN; + seqnum_seed |= tx_offload.l2_len + << IXGBE_ADVTXD_TUNNEL_LEN; + } + + txq->ctx_cache[ctx_idx].flags = ol_flags; + txq->ctx_cache[ctx_idx].tx_offload.data[0] = + tx_offload_mask.data[0] & tx_offload.data[0]; + txq->ctx_cache[ctx_idx].tx_offload.data[1] = + tx_offload_mask.data[1] & tx_offload.data[1]; + txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask; + + ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl); + vlan_macip_lens = tx_offload.l3_len; + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + vlan_macip_lens |= (tx_offload.outer_l2_len << + IXGBE_ADVTXD_MACLEN_SHIFT); + else + vlan_macip_lens |= (tx_offload.l2_len << + IXGBE_ADVTXD_MACLEN_SHIFT); + vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT); + ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens); + ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx); + ctx_txd->seqnum_seed = seqnum_seed; +} + +/* + * Check which hardware context can be used. Use the existing match + * or create a new context descriptor. + */ +static inline uint32_t +what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags, + union ixgbe_tx_offload tx_offload) +{ + /* If match with the current used context */ + if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] + & tx_offload.data[0])) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] + & tx_offload.data[1])))) { + return txq->ctx_curr; + } + + /* What if match with the next context */ + txq->ctx_curr ^= 1; + if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] + & tx_offload.data[0])) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] + & tx_offload.data[1])))) { + return txq->ctx_curr; + } + + /* Mismatch, use the previous context */ + return IXGBE_CTX_NUM; +} + +static inline uint32_t +tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags) +{ + uint32_t tmp = 0; + if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) + tmp |= IXGBE_ADVTXD_POPTS_TXSM; + if (ol_flags & PKT_TX_IP_CKSUM) + tmp |= IXGBE_ADVTXD_POPTS_IXSM; + if (ol_flags & PKT_TX_TCP_SEG) + tmp |= IXGBE_ADVTXD_POPTS_TXSM; + return tmp; +} + +static inline uint32_t +tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) +{ + uint32_t cmdtype = 0; + if (ol_flags & PKT_TX_VLAN_PKT) + cmdtype |= IXGBE_ADVTXD_DCMD_VLE; + if (ol_flags & PKT_TX_TCP_SEG) + cmdtype |= IXGBE_ADVTXD_DCMD_TSE; + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT); + return cmdtype; +} + +/* Default RS bit threshold values */ +#ifndef DEFAULT_TX_RS_THRESH +#define DEFAULT_TX_RS_THRESH 32 +#endif +#ifndef DEFAULT_TX_FREE_THRESH +#define DEFAULT_TX_FREE_THRESH 32 +#endif + +/* Reset transmit descriptors after they have been used */ +static inline int +ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) +{ + struct ixgbe_tx_entry *sw_ring = txq->sw_ring; + volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring; + uint16_t last_desc_cleaned = txq->last_desc_cleaned; + uint16_t nb_tx_desc = txq->nb_tx_desc; + uint16_t desc_to_clean_to; + uint16_t nb_tx_to_clean; + uint32_t status; + + /* Determine the last descriptor needing to be cleaned */ + desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); + if (desc_to_clean_to >= nb_tx_desc) + desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); + + /* Check to make sure the last descriptor to clean is done */ + desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; + status = txr[desc_to_clean_to].wb.status; + if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) + { + PMD_TX_FREE_LOG(DEBUG, + "TX descriptor %4u is not done" + "(port=%d queue=%d)", + desc_to_clean_to, + txq->port_id, txq->queue_id); + /* Failed to clean any descriptors, better luck next time */ + return -(1); + } + + /* Figure out how many descriptors will be cleaned */ + if (last_desc_cleaned > desc_to_clean_to) + nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) + + desc_to_clean_to); + else + nb_tx_to_clean = (uint16_t)(desc_to_clean_to - + last_desc_cleaned); + + PMD_TX_FREE_LOG(DEBUG, + "Cleaning %4u TX descriptors: %4u to %4u " + "(port=%d queue=%d)", + nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to, + txq->port_id, txq->queue_id); + + /* + * The last descriptor to clean is done, so that means all the + * descriptors from the last descriptor that was cleaned + * up to the last descriptor with the RS bit set + * are done. Only reset the threshold descriptor. + */ + txr[desc_to_clean_to].wb.status = 0; + + /* Update the txq to reflect the last descriptor that was cleaned */ + txq->last_desc_cleaned = desc_to_clean_to; + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); + + /* No Error */ + return 0; +} + +uint16_t +ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_tx_queue *txq; + struct ixgbe_tx_entry *sw_ring; + struct ixgbe_tx_entry *txe, *txn; + volatile union ixgbe_adv_tx_desc *txr; + volatile union ixgbe_adv_tx_desc *txd, *txp; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint64_t buf_dma_addr; + uint32_t olinfo_status; + uint32_t cmd_type_len; + uint32_t pkt_len; + uint16_t slen; + uint64_t ol_flags; + uint16_t tx_id; + uint16_t tx_last; + uint16_t nb_tx; + uint16_t nb_used; + uint64_t tx_ol_req; + uint32_t ctx = 0; + uint32_t new_ctx; + union ixgbe_tx_offload tx_offload; + + tx_offload.data[0] = 0; + tx_offload.data[1] = 0; + txq = tx_queue; + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + txp = NULL; + + /* Determine if the descriptor ring needs to be cleaned. */ + if (txq->nb_tx_free < txq->tx_free_thresh) + ixgbe_xmit_cleanup(txq); + + rte_prefetch0(&txe->mbuf->pool); + + /* TX loop */ + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + new_ctx = 0; + tx_pkt = *tx_pkts++; + pkt_len = tx_pkt->pkt_len; + + /* + * Determine how many (if any) context descriptors + * are needed for offload functionality. + */ + ol_flags = tx_pkt->ol_flags; + + /* If hardware offload required */ + tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK; + if (tx_ol_req) { + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.vlan_tci = tx_pkt->vlan_tci; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + tx_offload.outer_l2_len = tx_pkt->outer_l2_len; + tx_offload.outer_l3_len = tx_pkt->outer_l3_len; + + /* If new context need be built or reuse the exist ctx. */ + ctx = what_advctx_update(txq, tx_ol_req, + tx_offload); + /* Only allocate context descriptor if required*/ + new_ctx = (ctx == IXGBE_CTX_NUM); + ctx = txq->ctx_curr; + } + + /* + * Keep track of how many descriptors are used this loop + * This will always be the number of segments + the number of + * Context descriptors required to transmit the packet + */ + nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx); + + if (txp != NULL && + nb_used + txq->nb_tx_used >= txq->tx_rs_thresh) + /* set RS on the previous packet in the burst */ + txp->read.cmd_type_len |= + rte_cpu_to_le_32(IXGBE_TXD_CMD_RS); + + /* + * The number of descriptors that must be allocated for a + * packet is the number of segments of that packet, plus 1 + * Context Descriptor for the hardware offload, if any. + * Determine the last TX descriptor to allocate in the TX ring + * for the packet, starting from the current position (tx_id) + * in the ring. + */ + tx_last = (uint16_t) (tx_id + nb_used - 1); + + /* Circular ring */ + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" + " tx_first=%u tx_last=%u", + (unsigned) txq->port_id, + (unsigned) txq->queue_id, + (unsigned) pkt_len, + (unsigned) tx_id, + (unsigned) tx_last); + + /* + * Make sure there are enough TX descriptors available to + * transmit the entire packet. + * nb_used better be less than or equal to txq->tx_rs_thresh + */ + if (nb_used > txq->nb_tx_free) { + PMD_TX_FREE_LOG(DEBUG, + "Not enough free TX descriptors " + "nb_used=%4u nb_free=%4u " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->port_id, txq->queue_id); + + if (ixgbe_xmit_cleanup(txq) != 0) { + /* Could not clean any descriptors */ + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + + /* nb_used better be <= txq->tx_rs_thresh */ + if (unlikely(nb_used > txq->tx_rs_thresh)) { + PMD_TX_FREE_LOG(DEBUG, + "The number of descriptors needed to " + "transmit the packet exceeds the " + "RS bit threshold. This will impact " + "performance." + "nb_used=%4u nb_free=%4u " + "tx_rs_thresh=%4u. " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->tx_rs_thresh, + txq->port_id, txq->queue_id); + /* + * Loop here until there are enough TX + * descriptors or until the ring cannot be + * cleaned. + */ + while (nb_used > txq->nb_tx_free) { + if (ixgbe_xmit_cleanup(txq) != 0) { + /* + * Could not clean any + * descriptors + */ + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + } + } + } + + /* + * By now there are enough free TX descriptors to transmit + * the packet. + */ + + /* + * Set common flags of all TX Data Descriptors. + * + * The following bits must be set in all Data Descriptors: + * - IXGBE_ADVTXD_DTYP_DATA + * - IXGBE_ADVTXD_DCMD_DEXT + * + * The following bits must be set in the first Data Descriptor + * and are ignored in the other ones: + * - IXGBE_ADVTXD_DCMD_IFCS + * - IXGBE_ADVTXD_MAC_1588 + * - IXGBE_ADVTXD_DCMD_VLE + * + * The following bits must only be set in the last Data + * Descriptor: + * - IXGBE_TXD_CMD_EOP + * + * The following bits can be set in any Data Descriptor, but + * are only set in the last Data Descriptor: + * - IXGBE_TXD_CMD_RS + */ + cmd_type_len = IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; + +#ifdef RTE_LIBRTE_IEEE1588 + if (ol_flags & PKT_TX_IEEE1588_TMST) + cmd_type_len |= IXGBE_ADVTXD_MAC_1588; +#endif + + olinfo_status = 0; + if (tx_ol_req) { + + if (ol_flags & PKT_TX_TCP_SEG) { + /* when TSO is on, paylen in descriptor is the + * not the packet len but the tcp payload len */ + pkt_len -= (tx_offload.l2_len + + tx_offload.l3_len + tx_offload.l4_len); + } + + /* + * Setup the TX Advanced Context Descriptor if required + */ + if (new_ctx) { + volatile struct ixgbe_adv_tx_context_desc * + ctx_txd; + + ctx_txd = (volatile struct + ixgbe_adv_tx_context_desc *) + &txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + rte_prefetch0(&txn->mbuf->pool); + + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, + tx_offload); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + /* + * Setup the TX Advanced Data Descriptor, + * This path will go through + * whatever new/reuse the context descriptor + */ + cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags); + olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags); + olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT; + } + + olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + + m_seg = tx_pkt; + do { + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + rte_prefetch0(&txn->mbuf->pool); + + if (txe->mbuf != NULL) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* + * Set up Transmit Data Descriptor. + */ + slen = m_seg->data_len; + buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); + txd->read.buffer_addr = + rte_cpu_to_le_64(buf_dma_addr); + txd->read.cmd_type_len = + rte_cpu_to_le_32(cmd_type_len | slen); + txd->read.olinfo_status = + rte_cpu_to_le_32(olinfo_status); + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->next; + } while (m_seg != NULL); + + /* + * The last packet data descriptor needs End Of Packet (EOP) + */ + cmd_type_len |= IXGBE_TXD_CMD_EOP; + txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used); + + /* Set RS bit only on threshold packets' last descriptor */ + if (txq->nb_tx_used >= txq->tx_rs_thresh) { + PMD_TX_FREE_LOG(DEBUG, + "Setting RS bit on TXD id=" + "%4u (port=%d queue=%d)", + tx_last, txq->port_id, txq->queue_id); + + cmd_type_len |= IXGBE_TXD_CMD_RS; + + /* Update txq RS bit counters */ + txq->nb_tx_used = 0; + txp = NULL; + } else + txp = txd; + + txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len); + } + +end_of_tx: + /* set RS on last packet in the burst */ + if (txp != NULL) + txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS); + + rte_wmb(); + + /* + * Set the Transmit Descriptor Tail (TDT) + */ + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + (unsigned) txq->port_id, (unsigned) txq->queue_id, + (unsigned) tx_id, (unsigned) nb_tx); + IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id); + txq->tx_tail = tx_id; + + return nb_tx; +} + +/********************************************************************* + * + * RX functions + * + **********************************************************************/ +#define IXGBE_PACKET_TYPE_IPV4 0X01 +#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11 +#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21 +#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41 +#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03 +#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43 +#define IXGBE_PACKET_TYPE_IPV6 0X04 +#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14 +#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24 +#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C +#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C +#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C +#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D + +#define IXGBE_PACKET_TYPE_NVGRE 0X00 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP 0X11 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT 0X0D +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D + +#define IXGBE_PACKET_TYPE_VXLAN 0X80 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4 0X81 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP 0x91 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT 0X8D +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD + +#define IXGBE_PACKET_TYPE_MAX 0X80 +#define IXGBE_PACKET_TYPE_TN_MAX 0X100 +#define IXGBE_PACKET_TYPE_SHIFT 0X04 + +/* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */ +static inline uint32_t +ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask) +{ + /** + * Use 2 different table for normal packet and tunnel packet + * to save the space. + */ + static const uint32_t + ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = { + [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4, + [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, + }; + + static const uint32_t + ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = { + [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER, + [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 | + RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_SCTP, + + [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER, + [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] = + RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP, + }; + + if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF)) + return RTE_PTYPE_UNKNOWN; + + pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask; + + /* For tunnel packet */ + if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) { + /* Remove the tunnel bit to save the space. */ + pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL; + return ptype_table_tn[pkt_info]; + } + + /** + * For x550, if it's not tunnel, + * tunnel type bit should be set to 0. + * Reuse 82599's mask. + */ + pkt_info &= IXGBE_PACKET_TYPE_MASK_82599; + + return ptype_table[pkt_info]; +} + +static inline uint64_t +ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info) +{ + static uint64_t ip_rss_types_map[16] __rte_cache_aligned = { + 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, 0, 0, 0, + 0, 0, 0, PKT_RX_FDIR, + }; +#ifdef RTE_LIBRTE_IEEE1588 + static uint64_t ip_pkt_etqf_map[8] = { + 0, 0, 0, PKT_RX_IEEE1588_PTP, + 0, 0, 0, 0, + }; + + if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF)) + return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] | + ip_rss_types_map[pkt_info & 0XF]; + else + return ip_rss_types_map[pkt_info & 0XF]; +#else + return ip_rss_types_map[pkt_info & 0XF]; +#endif +} + +static inline uint64_t +rx_desc_status_to_pkt_flags(uint32_t rx_status) +{ + uint64_t pkt_flags; + + /* + * Check if VLAN present only. + * Do not check whether L3/L4 rx checksum done by NIC or not, + * That can be found from rte_eth_rxmode.hw_ip_checksum flag + */ + pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0; + +#ifdef RTE_LIBRTE_IEEE1588 + if (rx_status & IXGBE_RXD_STAT_TMST) + pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST; +#endif + return pkt_flags; +} + +static inline uint64_t +rx_desc_error_to_pkt_flags(uint32_t rx_status) +{ + uint64_t pkt_flags; + + /* + * Bit 31: IPE, IPv4 checksum error + * Bit 30: L4I, L4I integrity error + */ + static uint64_t error_to_pkt_flags_map[4] = { + 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD + }; + pkt_flags = error_to_pkt_flags_map[(rx_status >> + IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK]; + + if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) && + (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) { + pkt_flags |= PKT_RX_EIP_CKSUM_BAD; + } + + return pkt_flags; +} + +/* + * LOOK_AHEAD defines how many desc statuses to check beyond the + * current descriptor. + * It must be a pound define for optimal performance. + * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring + * function only works with LOOK_AHEAD=8. + */ +#define LOOK_AHEAD 8 +#if (LOOK_AHEAD != 8) +#error "PMD IXGBE: LOOK_AHEAD must be 8\n" +#endif +static inline int +ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) +{ + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_entry *rxep; + struct rte_mbuf *mb; + uint16_t pkt_len; + uint64_t pkt_flags; + int nb_dd; + uint32_t s[LOOK_AHEAD]; + uint32_t pkt_info[LOOK_AHEAD]; + int i, j, nb_rx = 0; + uint32_t status; + + /* get references to current descriptor and S/W ring entry */ + rxdp = &rxq->rx_ring[rxq->rx_tail]; + rxep = &rxq->sw_ring[rxq->rx_tail]; + + status = rxdp->wb.upper.status_error; + /* check to make sure there is at least 1 packet to receive */ + if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + return 0; + + /* + * Scan LOOK_AHEAD descriptors at a time to determine which descriptors + * reference packets that are ready to be received. + */ + for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST; + i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) + { + /* Read desc statuses backwards to avoid race condition */ + for (j = LOOK_AHEAD-1; j >= 0; --j) + s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error); + + for (j = LOOK_AHEAD - 1; j >= 0; --j) + pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower. + lo_dword.data); + + /* Compute how many status bits were set */ + nb_dd = 0; + for (j = 0; j < LOOK_AHEAD; ++j) + nb_dd += s[j] & IXGBE_RXDADV_STAT_DD; + + nb_rx += nb_dd; + + /* Translate descriptor info to mbuf format */ + for (j = 0; j < nb_dd; ++j) { + mb = rxep[j].mbuf; + pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) - + rxq->crc_len; + mb->data_len = pkt_len; + mb->pkt_len = pkt_len; + mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan); + + /* convert descriptor fields to rte mbuf flags */ + pkt_flags = rx_desc_status_to_pkt_flags(s[j]); + pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); + pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags + ((uint16_t)pkt_info[j]); + mb->ol_flags = pkt_flags; + mb->packet_type = + ixgbe_rxd_pkt_info_to_pkt_type + (pkt_info[j], rxq->pkt_type_mask); + + if (likely(pkt_flags & PKT_RX_RSS_HASH)) + mb->hash.rss = rte_le_to_cpu_32( + rxdp[j].wb.lower.hi_dword.rss); + else if (pkt_flags & PKT_RX_FDIR) { + mb->hash.fdir.hash = rte_le_to_cpu_16( + rxdp[j].wb.lower.hi_dword.csum_ip.csum) & + IXGBE_ATR_HASH_MASK; + mb->hash.fdir.id = rte_le_to_cpu_16( + rxdp[j].wb.lower.hi_dword.csum_ip.ip_id); + } + } + + /* Move mbuf pointers from the S/W ring to the stage */ + for (j = 0; j < LOOK_AHEAD; ++j) { + rxq->rx_stage[i + j] = rxep[j].mbuf; + } + + /* stop if all requested packets could not be received */ + if (nb_dd != LOOK_AHEAD) + break; + } + + /* clear software ring entries so we can cleanup correctly */ + for (i = 0; i < nb_rx; ++i) { + rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL; + } + + + return nb_rx; +} + +static inline int +ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf) +{ + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_entry *rxep; + struct rte_mbuf *mb; + uint16_t alloc_idx; + __le64 dma_addr; + int diag, i; + + /* allocate buffers in bulk directly into the S/W ring */ + alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1); + rxep = &rxq->sw_ring[alloc_idx]; + diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep, + rxq->rx_free_thresh); + if (unlikely(diag != 0)) + return -ENOMEM; + + rxdp = &rxq->rx_ring[alloc_idx]; + for (i = 0; i < rxq->rx_free_thresh; ++i) { + /* populate the static rte mbuf fields */ + mb = rxep[i].mbuf; + if (reset_mbuf) { + mb->next = NULL; + mb->nb_segs = 1; + mb->port = rxq->port_id; + } + + rte_mbuf_refcnt_set(mb, 1); + mb->data_off = RTE_PKTMBUF_HEADROOM; + + /* populate the descriptors */ + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb)); + rxdp[i].read.hdr_addr = 0; + rxdp[i].read.pkt_addr = dma_addr; + } + + /* update state of internal queue structure */ + rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh; + if (rxq->rx_free_trigger >= rxq->nb_rx_desc) + rxq->rx_free_trigger = rxq->rx_free_thresh - 1; + + /* no errors */ + return 0; +} + +static inline uint16_t +ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail]; + int i; + + /* how many packets are ready to return? */ + nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail); + + /* copy mbuf pointers to the application's packet list */ + for (i = 0; i < nb_pkts; ++i) + rx_pkts[i] = stage[i]; + + /* update internal queue state */ + rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts); + rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts); + + return nb_pkts; +} + +static inline uint16_t +rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue; + uint16_t nb_rx = 0; + + /* Any previously recv'd pkts will be returned from the Rx stage */ + if (rxq->rx_nb_avail) + return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + + /* Scan the H/W ring for packets to receive */ + nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq); + + /* update internal queue state */ + rxq->rx_next_avail = 0; + rxq->rx_nb_avail = nb_rx; + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx); + + /* if required, allocate new buffers to replenish descriptors */ + if (rxq->rx_tail > rxq->rx_free_trigger) { + uint16_t cur_free_trigger = rxq->rx_free_trigger; + + if (ixgbe_rx_alloc_bufs(rxq, true) != 0) { + int i, j; + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + rxq->rx_free_thresh; + + /* + * Need to rewind any previous receives if we cannot + * allocate new buffers to replenish the old ones. + */ + rxq->rx_nb_avail = 0; + rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx); + for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j) + rxq->sw_ring[j].mbuf = rxq->rx_stage[i]; + + return 0; + } + + /* update tail pointer */ + rte_wmb(); + IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger); + } + + if (rxq->rx_tail >= rxq->nb_rx_desc) + rxq->rx_tail = 0; + + /* received any packets this loop? */ + if (rxq->rx_nb_avail) + return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + + return 0; +} + +/* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */ +uint16_t +ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_rx; + + if (unlikely(nb_pkts == 0)) + return 0; + + if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST)) + return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts); + + /* request is relatively large, chunk it up */ + nb_rx = 0; + while (nb_pkts) { + uint16_t ret, n; + n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST); + ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n); + nb_rx = (uint16_t)(nb_rx + ret); + nb_pkts = (uint16_t)(nb_pkts - ret); + if (ret < n) + break; + } + + return nb_rx; +} + +uint16_t +ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_rx_queue *rxq; + volatile union ixgbe_adv_rx_desc *rx_ring; + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_entry *sw_ring; + struct ixgbe_rx_entry *rxe; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + union ixgbe_adv_rx_desc rxd; + uint64_t dma_addr; + uint32_t staterr; + uint32_t pkt_info; + uint16_t pkt_len; + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint64_t pkt_flags; + + nb_rx = 0; + nb_hold = 0; + rxq = rx_queue; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + while (nb_rx < nb_pkts) { + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + staterr = rxdp->wb.upper.status_error; + if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + break; + rxd = *rxdp; + + /* + * End of packet. + * + * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet + * is likely to be invalid and to be dropped by the various + * validation checks performed by the network stack. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy do not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "ext_err_stat=0x%08x pkt_len=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) staterr, + (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); + + nmb = rte_rxmbuf_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_ixgbe_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_ixgbe_prefetch(&rx_ring[rx_id]); + rte_ixgbe_prefetch(&sw_ring[rx_id]); + } + + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + + /* + * Initialize the returned mbuf. + * 1) setup generic mbuf fields: + * - number of segments, + * - next segment, + * - packet length, + * - RX port identifier. + * 2) integrate hardware offload data, if any: + * - RSS flag & hash, + * - IP checksum flag, + * - VLAN TCI, if any, + * - error flags. + */ + pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) - + rxq->crc_len); + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = pkt_len; + rxm->data_len = pkt_len; + rxm->port = rxq->port_id; + + pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); + /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); + + pkt_flags = rx_desc_status_to_pkt_flags(staterr); + pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); + pkt_flags = pkt_flags | + ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); + rxm->ol_flags = pkt_flags; + rxm->packet_type = + ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, + rxq->pkt_type_mask); + + if (likely(pkt_flags & PKT_RX_RSS_HASH)) + rxm->hash.rss = rte_le_to_cpu_32( + rxd.wb.lower.hi_dword.rss); + else if (pkt_flags & PKT_RX_FDIR) { + rxm->hash.fdir.hash = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.csum) & + IXGBE_ATR_HASH_MASK; + rxm->hash.fdir.id = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.ip_id); + } + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return nb_rx; +} + +/** + * Detect an RSC descriptor. + */ +static inline uint32_t +ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx) +{ + return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) & + IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT; +} + +/** + * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet + * + * Fill the following info in the HEAD buffer of the Rx cluster: + * - RX port identifier + * - hardware offload data, if any: + * - RSS flag & hash + * - IP checksum flag + * - VLAN TCI, if any + * - error flags + * @head HEAD of the packet cluster + * @desc HW descriptor to get data from + * @rxq Pointer to the Rx queue + */ +static inline void +ixgbe_fill_cluster_head_buf( + struct rte_mbuf *head, + union ixgbe_adv_rx_desc *desc, + struct ixgbe_rx_queue *rxq, + uint32_t staterr) +{ + uint32_t pkt_info; + uint64_t pkt_flags; + + head->port = rxq->port_id; + + /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is + * set in the pkt_flags field. + */ + head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan); + pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data); + pkt_flags = rx_desc_status_to_pkt_flags(staterr); + pkt_flags |= rx_desc_error_to_pkt_flags(staterr); + pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); + head->ol_flags = pkt_flags; + head->packet_type = + ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask); + + if (likely(pkt_flags & PKT_RX_RSS_HASH)) + head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss); + else if (pkt_flags & PKT_RX_FDIR) { + head->hash.fdir.hash = + rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum) + & IXGBE_ATR_HASH_MASK; + head->hash.fdir.id = + rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id); + } +} + +/** + * ixgbe_recv_pkts_lro - receive handler for and LRO case. + * + * @rx_queue Rx queue handle + * @rx_pkts table of received packets + * @nb_pkts size of rx_pkts table + * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling + * + * Handles the Rx HW ring completions when RSC feature is configured. Uses an + * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info. + * + * We use the same logic as in Linux and in FreeBSD ixgbe drivers: + * 1) When non-EOP RSC completion arrives: + * a) Update the HEAD of the current RSC aggregation cluster with the new + * segment's data length. + * b) Set the "next" pointer of the current segment to point to the segment + * at the NEXTP index. + * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry + * in the sw_rsc_ring. + * 2) When EOP arrives we just update the cluster's total length and offload + * flags and deliver the cluster up to the upper layers. In our case - put it + * in the rx_pkts table. + * + * Returns the number of received packets/clusters (according to the "bulk + * receive" interface). + */ +static inline uint16_t +ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, + bool bulk_alloc) +{ + struct ixgbe_rx_queue *rxq = rx_queue; + volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring; + struct ixgbe_rx_entry *sw_ring = rxq->sw_ring; + struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring; + uint16_t rx_id = rxq->rx_tail; + uint16_t nb_rx = 0; + uint16_t nb_hold = rxq->nb_rx_hold; + uint16_t prev_id = rxq->rx_tail; + + while (nb_rx < nb_pkts) { + bool eop; + struct ixgbe_rx_entry *rxe; + struct ixgbe_scattered_rx_entry *sc_entry; + struct ixgbe_scattered_rx_entry *next_sc_entry; + struct ixgbe_rx_entry *next_rxe = NULL; + struct rte_mbuf *first_seg; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + union ixgbe_adv_rx_desc rxd; + uint16_t data_len; + uint16_t next_id; + volatile union ixgbe_adv_rx_desc *rxdp; + uint32_t staterr; + +next_desc: + /* + * The code in this whole file uses the volatile pointer to + * ensure the read ordering of the status and the rest of the + * descriptor fields (on the compiler level only!!!). This is so + * UGLY - why not to just use the compiler barrier instead? DPDK + * even has the rte_compiler_barrier() for that. + * + * But most importantly this is just wrong because this doesn't + * ensure memory ordering in a general case at all. For + * instance, DPDK is supposed to work on Power CPUs where + * compiler barrier may just not be enough! + * + * I tried to write only this function properly to have a + * starting point (as a part of an LRO/RSC series) but the + * compiler cursed at me when I tried to cast away the + * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm + * keeping it the way it is for now. + * + * The code in this file is broken in so many other places and + * will just not work on a big endian CPU anyway therefore the + * lines below will have to be revisited together with the rest + * of the ixgbe PMD. + * + * TODO: + * - Get rid of "volatile" crap and let the compiler do its + * job. + * - Use the proper memory barrier (rte_rmb()) to ensure the + * memory ordering below. + */ + rxdp = &rx_ring[rx_id]; + staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error); + + if (!(staterr & IXGBE_RXDADV_STAT_DD)) + break; + + rxd = *rxdp; + + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x data_len=%u", + rxq->port_id, rxq->queue_id, rx_id, staterr, + rte_le_to_cpu_16(rxd.wb.upper.length)); + + if (!bulk_alloc) { + nmb = rte_rxmbuf_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed " + "port_id=%u queue_id=%u", + rxq->port_id, rxq->queue_id); + + rte_eth_devices[rxq->port_id].data-> + rx_mbuf_alloc_failed++; + break; + } + } + else if (nb_hold > rxq->rx_free_thresh) { + uint16_t next_rdt = rxq->rx_free_trigger; + + if (!ixgbe_rx_alloc_bufs(rxq, false)) { + rte_wmb(); + IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, + next_rdt); + nb_hold -= rxq->rx_free_thresh; + } else { + PMD_RX_LOG(DEBUG, "RX bulk alloc failed " + "port_id=%u queue_id=%u", + rxq->port_id, rxq->queue_id); + + rte_eth_devices[rxq->port_id].data-> + rx_mbuf_alloc_failed++; + break; + } + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + eop = staterr & IXGBE_RXDADV_STAT_EOP; + + next_id = rx_id + 1; + if (next_id == rxq->nb_rx_desc) + next_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_ixgbe_prefetch(sw_ring[next_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 4 pointers + * to mbufs. + */ + if ((next_id & 0x3) == 0) { + rte_ixgbe_prefetch(&rx_ring[next_id]); + rte_ixgbe_prefetch(&sw_ring[next_id]); + } + + rxm = rxe->mbuf; + + if (!bulk_alloc) { + __le64 dma = + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); + /* + * Update RX descriptor with the physical address of the + * new data buffer of the new allocated mbuf. + */ + rxe->mbuf = nmb; + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma; + } else + rxe->mbuf = NULL; + + /* + * Set data length & data buffer address of mbuf. + */ + data_len = rte_le_to_cpu_16(rxd.wb.upper.length); + rxm->data_len = data_len; + + if (!eop) { + uint16_t nextp_id; + /* + * Get next descriptor index: + * - For RSC it's in the NEXTP field. + * - For a scattered packet - it's just a following + * descriptor. + */ + if (ixgbe_rsc_count(&rxd)) + nextp_id = + (staterr & IXGBE_RXDADV_NEXTP_MASK) >> + IXGBE_RXDADV_NEXTP_SHIFT; + else + nextp_id = next_id; + + next_sc_entry = &sw_sc_ring[nextp_id]; + next_rxe = &sw_ring[nextp_id]; + rte_ixgbe_prefetch(next_rxe); + } + + sc_entry = &sw_sc_ring[rx_id]; + first_seg = sc_entry->fbuf; + sc_entry->fbuf = NULL; + + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet and + * initialize its context. + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (first_seg == NULL) { + first_seg = rxm; + first_seg->pkt_len = data_len; + first_seg->nb_segs = 1; + } else { + first_seg->pkt_len += data_len; + first_seg->nb_segs++; + } + + prev_id = rx_id; + rx_id = next_id; + + /* + * If this is not the last buffer of the received packet, update + * the pointer to the first mbuf at the NEXTP entry in the + * sw_sc_ring and continue to parse the RX ring. + */ + if (!eop && next_rxe) { + rxm->next = next_rxe->mbuf; + next_sc_entry->fbuf = first_seg; + goto next_desc; + } + + /* + * This is the last buffer of the received packet - return + * the current cluster to the user. + */ + rxm->next = NULL; + + /* Initialize the first mbuf of the returned packet */ + ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr); + + /* + * Deal with the case, when HW CRC srip is disabled. + * That can't happen when LRO is enabled, but still could + * happen for scattered RX mode. + */ + first_seg->pkt_len -= rxq->crc_len; + if (unlikely(rxm->data_len <= rxq->crc_len)) { + struct rte_mbuf *lp; + + for (lp = first_seg; lp->next != rxm; lp = lp->next) + ; + + first_seg->nb_segs--; + lp->data_len -= rxq->crc_len - rxm->data_len; + lp->next = NULL; + rte_pktmbuf_free_seg(rxm); + } else + rxm->data_len -= rxq->crc_len; + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch((char *)first_seg->buf_addr + + first_seg->data_off); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = first_seg; + } + + /* + * Record index of the next RX descriptor to probe. + */ + rxq->rx_tail = rx_id; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u", + rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx); + + rte_wmb(); + IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id); + nb_hold = 0; + } + + rxq->nb_rx_hold = nb_hold; + return nb_rx; +} + +uint16_t +ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false); +} + +uint16_t +ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true); +} + +/********************************************************************* + * + * Queue management functions + * + **********************************************************************/ + +static void __attribute__((cold)) +ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq) +{ + unsigned i; + + if (txq->sw_ring != NULL) { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void __attribute__((cold)) +ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) +{ + if (txq != NULL && + txq->sw_ring != NULL) + rte_free(txq->sw_ring); +} + +static void __attribute__((cold)) +ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq) +{ + if (txq != NULL && txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->free_swring(txq); + rte_free(txq); + } +} + +void __attribute__((cold)) +ixgbe_dev_tx_queue_release(void *txq) +{ + ixgbe_tx_queue_release(txq); +} + +/* (Re)set dynamic ixgbe_tx_queue fields to defaults */ +static void __attribute__((cold)) +ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) +{ + static const union ixgbe_adv_tx_desc zeroed_desc = {{0}}; + struct ixgbe_tx_entry *txe = txq->sw_ring; + uint16_t prev, i; + + /* Zero out HW ring memory */ + for (i = 0; i < txq->nb_tx_desc; i++) { + txq->tx_ring[i] = zeroed_desc; + } + + /* Initialize SW ring entries */ + prev = (uint16_t) (txq->nb_tx_desc - 1); + for (i = 0; i < txq->nb_tx_desc; i++) { + volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; + txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD); + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + txq->tx_tail = 0; + txq->nb_tx_used = 0; + /* + * Always allow 1 descriptor to be un-allocated to avoid + * a H/W race condition + */ + txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); + txq->ctx_curr = 0; + memset((void*)&txq->ctx_cache, 0, + IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info)); +} + +static const struct ixgbe_txq_ops def_txq_ops = { + .release_mbufs = ixgbe_tx_queue_release_mbufs, + .free_swring = ixgbe_tx_free_swring, + .reset = ixgbe_reset_tx_queue, +}; + +/* Takes an ethdev and a queue and sets up the tx function to be used based on + * the queue parameters. Used in tx_queue_setup by primary process and then + * in dev_init by secondary process when attaching to an existing ethdev. + */ +void __attribute__((cold)) +ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) +{ + /* Use a simple Tx queue (no offloads, no multi segs) if possible */ + if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) + && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { + PMD_INIT_LOG(DEBUG, "Using simple tx code path"); +#ifdef RTE_IXGBE_INC_VECTOR + if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && + (rte_eal_process_type() != RTE_PROC_PRIMARY || + ixgbe_txq_vec_setup(txq) == 0)) { + PMD_INIT_LOG(DEBUG, "Vector tx enabled."); + dev->tx_pkt_burst = ixgbe_xmit_pkts_vec; + } else +#endif + dev->tx_pkt_burst = ixgbe_xmit_pkts_simple; + } else { + PMD_INIT_LOG(DEBUG, "Using full-featured tx code path"); + PMD_INIT_LOG(DEBUG, + " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]", + (unsigned long)txq->txq_flags, + (unsigned long)IXGBE_SIMPLE_FLAGS); + PMD_INIT_LOG(DEBUG, + " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]", + (unsigned long)txq->tx_rs_thresh, + (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST); + dev->tx_pkt_burst = ixgbe_xmit_pkts; + } +} + +int __attribute__((cold)) +ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + const struct rte_memzone *tz; + struct ixgbe_tx_queue *txq; + struct ixgbe_hw *hw; + uint16_t tx_rs_thresh, tx_free_thresh; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Validate number of transmit descriptors. + * It must not exceed hardware maximum, and must be multiple + * of IXGBE_ALIGN. + */ + if (nb_desc % IXGBE_TXD_ALIGN != 0 || + (nb_desc > IXGBE_MAX_RING_DESC) || + (nb_desc < IXGBE_MIN_RING_DESC)) { + return -EINVAL; + } + + /* + * The following two parameters control the setting of the RS bit on + * transmit descriptors. + * TX descriptors will have their RS bit set after txq->tx_rs_thresh + * descriptors have been used. + * The TX descriptor ring will be cleaned after txq->tx_free_thresh + * descriptors are used or if the number of descriptors required + * to transmit a packet is greater than the number of free TX + * descriptors. + * The following constraints must be satisfied: + * tx_rs_thresh must be greater than 0. + * tx_rs_thresh must be less than the size of the ring minus 2. + * tx_rs_thresh must be less than or equal to tx_free_thresh. + * tx_rs_thresh must be a divisor of the ring size. + * tx_free_thresh must be greater than 0. + * tx_free_thresh must be less than the size of the ring minus 3. + * One descriptor in the TX ring is used as a sentinel to avoid a + * H/W race condition, hence the maximum threshold constraints. + * When set to zero use default values. + */ + tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ? + tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH); + tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? + tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); + if (tx_rs_thresh >= (nb_desc - 2)) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number " + "of TX descriptors minus 2. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. " + "(tx_rs_thresh=%u port=%d queue=%d)", + DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + if (tx_free_thresh >= (nb_desc - 3)) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " + "tx_free_thresh must be less than the number of " + "TX descriptors minus 3. (tx_free_thresh=%u " + "port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + if (tx_rs_thresh > tx_free_thresh) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to " + "tx_free_thresh. (tx_free_thresh=%u " + "tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); + return -(EINVAL); + } + if ((nb_desc % tx_rs_thresh) != 0) { + PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the " + "number of TX descriptors. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + + /* + * If rs_bit_thresh is greater than 1, then TX WTHRESH should be + * set to 0. If WTHRESH is greater than zero, the RS bit is ignored + * by the NIC and all descriptors are written back after the NIC + * accumulates WTHRESH descriptors. + */ + if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) { + PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if " + "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + + /* Free memory prior to re-allocation if needed... */ + if (dev->data->tx_queues[queue_idx] != NULL) { + ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* First allocate the tx queue data structure */ + txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq == NULL) + return -ENOMEM; + + /* + * Allocate TX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, + sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC, + IXGBE_ALIGN, socket_id); + if (tz == NULL) { + ixgbe_tx_queue_release(txq); + return -ENOMEM; + } + + txq->nb_tx_desc = nb_desc; + txq->tx_rs_thresh = tx_rs_thresh; + txq->tx_free_thresh = tx_free_thresh; + txq->pthresh = tx_conf->tx_thresh.pthresh; + txq->hthresh = tx_conf->tx_thresh.hthresh; + txq->wthresh = tx_conf->tx_thresh.wthresh; + txq->queue_id = queue_idx; + txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? + queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); + txq->port_id = dev->data->port_id; + txq->txq_flags = tx_conf->txq_flags; + txq->ops = &def_txq_ops; + txq->tx_deferred_start = tx_conf->tx_deferred_start; + + /* + * Modification to set VFTDT for virtual function if vf is detected + */ + if (hw->mac.type == ixgbe_mac_82599_vf || + hw->mac.type == ixgbe_mac_X540_vf || + hw->mac.type == ixgbe_mac_X550_vf || + hw->mac.type == ixgbe_mac_X550EM_x_vf || + hw->mac.type == ixgbe_mac_X550EM_a_vf) + txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx)); + else + txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx)); + + txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); + txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr; + + /* Allocate software ring */ + txq->sw_ring = rte_zmalloc_socket("txq->sw_ring", + sizeof(struct ixgbe_tx_entry) * nb_desc, + RTE_CACHE_LINE_SIZE, socket_id); + if (txq->sw_ring == NULL) { + ixgbe_tx_queue_release(txq); + return -ENOMEM; + } + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); + + /* set up vector or scalar TX function as appropriate */ + ixgbe_set_tx_function(dev, txq); + + txq->ops->reset(txq); + + dev->data->tx_queues[queue_idx] = txq; + + + return 0; +} + +/** + * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster + * + * The "next" pointer of the last segment of (not-yet-completed) RSC clusters + * in the sw_rsc_ring is not set to NULL but rather points to the next + * mbuf of this RSC aggregation (that has not been completed yet and still + * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we + * will just free first "nb_segs" segments of the cluster explicitly by calling + * an rte_pktmbuf_free_seg(). + * + * @m scattered cluster head + */ +static void __attribute__((cold)) +ixgbe_free_sc_cluster(struct rte_mbuf *m) +{ + uint8_t i, nb_segs = m->nb_segs; + struct rte_mbuf *next_seg; + + for (i = 0; i < nb_segs; i++) { + next_seg = m->next; + rte_pktmbuf_free_seg(m); + m = next_seg; + } +} + +static void __attribute__((cold)) +ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) +{ + unsigned i; + +#ifdef RTE_IXGBE_INC_VECTOR + /* SSE Vector driver has a different way of releasing mbufs. */ + if (rxq->rx_using_sse) { + ixgbe_rx_queue_release_mbufs_vec(rxq); + return; + } +#endif + + if (rxq->sw_ring != NULL) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + if (rxq->rx_nb_avail) { + for (i = 0; i < rxq->rx_nb_avail; ++i) { + struct rte_mbuf *mb; + mb = rxq->rx_stage[rxq->rx_next_avail + i]; + rte_pktmbuf_free_seg(mb); + } + rxq->rx_nb_avail = 0; + } + } + + if (rxq->sw_sc_ring) + for (i = 0; i < rxq->nb_rx_desc; i++) + if (rxq->sw_sc_ring[i].fbuf) { + ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf); + rxq->sw_sc_ring[i].fbuf = NULL; + } +} + +static void __attribute__((cold)) +ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq) +{ + if (rxq != NULL) { + ixgbe_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq->sw_sc_ring); + rte_free(rxq); + } +} + +void __attribute__((cold)) +ixgbe_dev_rx_queue_release(void *rxq) +{ + ixgbe_rx_queue_release(rxq); +} + +/* + * Check if Rx Burst Bulk Alloc function can be used. + * Return + * 0: the preconditions are satisfied and the bulk allocation function + * can be used. + * -EINVAL: the preconditions are NOT satisfied and the default Rx burst + * function must be used. + */ +static inline int __attribute__((cold)) +check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) +{ + int ret = 0; + + /* + * Make sure the following pre-conditions are satisfied: + * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST + * rxq->rx_free_thresh < rxq->nb_rx_desc + * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0 + * rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST) + * Scattered packets are not supported. This should be checked + * outside of this function. + */ + if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "RTE_PMD_IXGBE_RX_MAX_BURST=%d", + rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST); + ret = -EINVAL; + } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "rxq->nb_rx_desc=%d", + rxq->rx_free_thresh, rxq->nb_rx_desc); + ret = -EINVAL; + } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->nb_rx_desc=%d, " + "rxq->rx_free_thresh=%d", + rxq->nb_rx_desc, rxq->rx_free_thresh); + ret = -EINVAL; + } else if (!(rxq->nb_rx_desc < + (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->nb_rx_desc=%d, " + "IXGBE_MAX_RING_DESC=%d, " + "RTE_PMD_IXGBE_RX_MAX_BURST=%d", + rxq->nb_rx_desc, IXGBE_MAX_RING_DESC, + RTE_PMD_IXGBE_RX_MAX_BURST); + ret = -EINVAL; + } + + return ret; +} + +/* Reset dynamic ixgbe_rx_queue fields back to defaults */ +static void __attribute__((cold)) +ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) +{ + static const union ixgbe_adv_rx_desc zeroed_desc = {{0}}; + unsigned i; + uint16_t len = rxq->nb_rx_desc; + + /* + * By default, the Rx queue setup function allocates enough memory for + * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires + * extra memory at the end of the descriptor ring to be zero'd out. A + * pre-condition for using the Rx burst bulk alloc function is that the + * number of descriptors is less than or equal to + * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the + * constraints here to see if we need to zero out memory after the end + * of the H/W descriptor ring. + */ + if (adapter->rx_bulk_alloc_allowed) + /* zero out extra memory */ + len += RTE_PMD_IXGBE_RX_MAX_BURST; + + /* + * Zero out HW ring memory. Zero out extra memory at the end of + * the H/W ring so look-ahead logic in Rx Burst bulk alloc function + * reads extra memory as zeros. + */ + for (i = 0; i < len; i++) { + rxq->rx_ring[i] = zeroed_desc; + } + + /* + * initialize extra software ring entries. Space for these extra + * entries is always allocated + */ + memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); + for (i = rxq->nb_rx_desc; i < len; ++i) { + rxq->sw_ring[i].mbuf = &rxq->fake_mbuf; + } + + rxq->rx_nb_avail = 0; + rxq->rx_next_avail = 0; + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + +#ifdef RTE_IXGBE_INC_VECTOR + rxq->rxrearm_start = 0; + rxq->rxrearm_nb = 0; +#endif +} + +int __attribute__((cold)) +ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + const struct rte_memzone *rz; + struct ixgbe_rx_queue *rxq; + struct ixgbe_hw *hw; + uint16_t len; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Validate number of receive descriptors. + * It must not exceed hardware maximum, and must be multiple + * of IXGBE_ALIGN. + */ + if (nb_desc % IXGBE_RXD_ALIGN != 0 || + (nb_desc > IXGBE_MAX_RING_DESC) || + (nb_desc < IXGBE_MIN_RING_DESC)) { + return -EINVAL; + } + + /* Free memory prior to re-allocation if needed... */ + if (dev->data->rx_queues[queue_idx] != NULL) { + ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* First allocate the rx queue data structure */ + rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq == NULL) + return -ENOMEM; + rxq->mb_pool = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->queue_id = queue_idx; + rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? + queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); + rxq->port_id = dev->data->port_id; + rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? + 0 : ETHER_CRC_LEN); + rxq->drop_en = rx_conf->rx_drop_en; + rxq->rx_deferred_start = rx_conf->rx_deferred_start; + + /* + * The packet type in RX descriptor is different for different NICs. + * Some bits are used for x550 but reserved for other NICS. + * So set different masks for different NICs. + */ + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a || + hw->mac.type == ixgbe_mac_X550_vf || + hw->mac.type == ixgbe_mac_X550EM_x_vf || + hw->mac.type == ixgbe_mac_X550EM_a_vf) + rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550; + else + rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599; + + /* + * Allocate RX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, + RX_RING_SZ, IXGBE_ALIGN, socket_id); + if (rz == NULL) { + ixgbe_rx_queue_release(rxq); + return -ENOMEM; + } + + /* + * Zero init all the descriptors in the ring. + */ + memset (rz->addr, 0, RX_RING_SZ); + + /* + * Modified to setup VFRDT for Virtual Function + */ + if (hw->mac.type == ixgbe_mac_82599_vf || + hw->mac.type == ixgbe_mac_X540_vf || + hw->mac.type == ixgbe_mac_X550_vf || + hw->mac.type == ixgbe_mac_X550EM_x_vf || + hw->mac.type == ixgbe_mac_X550EM_a_vf) { + rxq->rdt_reg_addr = + IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx)); + rxq->rdh_reg_addr = + IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx)); + } + else { + rxq->rdt_reg_addr = + IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx)); + rxq->rdh_reg_addr = + IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx)); + } + + rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); + rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr; + + /* + * Certain constraints must be met in order to use the bulk buffer + * allocation Rx burst function. If any of Rx queues doesn't meet them + * the feature should be disabled for the whole port. + */ + if (check_rx_burst_bulk_alloc_preconditions(rxq)) { + PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc " + "preconditions - canceling the feature for " + "the whole port[%d]", + rxq->queue_id, rxq->port_id); + adapter->rx_bulk_alloc_allowed = false; + } + + /* + * Allocate software ring. Allow for space at the end of the + * S/W ring to make sure look-ahead logic in bulk alloc Rx burst + * function does not access an invalid memory region. + */ + len = nb_desc; + if (adapter->rx_bulk_alloc_allowed) + len += RTE_PMD_IXGBE_RX_MAX_BURST; + + rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring", + sizeof(struct ixgbe_rx_entry) * len, + RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq->sw_ring) { + ixgbe_rx_queue_release(rxq); + return -ENOMEM; + } + + /* + * Always allocate even if it's not going to be needed in order to + * simplify the code. + * + * This ring is used in LRO and Scattered Rx cases and Scattered Rx may + * be requested in ixgbe_dev_rx_init(), which is called later from + * dev_start() flow. + */ + rxq->sw_sc_ring = + rte_zmalloc_socket("rxq->sw_sc_ring", + sizeof(struct ixgbe_scattered_rx_entry) * len, + RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq->sw_sc_ring) { + ixgbe_rx_queue_release(rxq); + return -ENOMEM; + } + + PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p " + "dma_addr=0x%"PRIx64, + rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring, + rxq->rx_ring_phys_addr); + + if (!rte_is_power_of_2(nb_desc)) { + PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx " + "preconditions - canceling the feature for " + "the whole port[%d]", + rxq->queue_id, rxq->port_id); + adapter->rx_vec_allowed = false; + } else + ixgbe_rxq_vec_setup(rxq); + + dev->data->rx_queues[queue_idx] = rxq; + + ixgbe_reset_rx_queue(adapter, rxq); + + return 0; +} + +uint32_t +ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define IXGBE_RXQ_SCAN_INTERVAL 4 + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_queue *rxq; + uint32_t desc = 0; + + if (rx_queue_id >= dev->data->nb_rx_queues) { + PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id); + return 0; + } + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &(rxq->rx_ring[rxq->rx_tail]); + + while ((desc < rxq->nb_rx_desc) && + (rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) { + desc += IXGBE_RXQ_SCAN_INTERVAL; + rxdp += IXGBE_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return desc; +} + +int +ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) +{ + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_queue *rxq = rx_queue; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return 0; + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + rxdp = &rxq->rx_ring[desc]; + return !!(rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)); +} + +void __attribute__((cold)) +ixgbe_dev_clear_queues(struct rte_eth_dev *dev) +{ + unsigned i; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct ixgbe_tx_queue *txq = dev->data->tx_queues[i]; + if (txq != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; + if (rxq != NULL) { + ixgbe_rx_queue_release_mbufs(rxq); + ixgbe_reset_rx_queue(adapter, rxq); + } + } +} + +void +ixgbe_dev_free_queues(struct rte_eth_dev *dev) +{ + unsigned i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + +/********************************************************************* + * + * Device RX/TX init functions + * + **********************************************************************/ + +/** + * Receive Side Scaling (RSS) + * See section 7.1.2.8 in the following document: + * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009 + * + * Principles: + * The source and destination IP addresses of the IP header and the source + * and destination ports of TCP/UDP headers, if any, of received packets are + * hashed against a configurable random key to compute a 32-bit RSS hash result. + * The seven (7) LSBs of the 32-bit hash result are used as an index into a + * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit + * RSS output index which is used as the RX queue index where to store the + * received packets. + * The following output is supplied in the RX write-back descriptor: + * - 32-bit result of the Microsoft RSS hash function, + * - 4-bit RSS type field. + */ + +/* + * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet. + * Used as the default key. + */ +static uint8_t rss_intel_key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, +}; + +static void +ixgbe_rss_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + uint32_t mrqc; + uint32_t mrqc_reg; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + mrqc = IXGBE_READ_REG(hw, mrqc_reg); + mrqc &= ~IXGBE_MRQC_RSSEN; + IXGBE_WRITE_REG(hw, mrqc_reg, mrqc); +} + +static void +ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf) +{ + uint8_t *hash_key; + uint32_t mrqc; + uint32_t rss_key; + uint64_t rss_hf; + uint16_t i; + uint32_t mrqc_reg; + uint32_t rssrk_reg; + + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0); + + hash_key = rss_conf->rss_key; + if (hash_key != NULL) { + /* Fill in RSS hash key */ + for (i = 0; i < 10; i++) { + rss_key = hash_key[(i * 4)]; + rss_key |= hash_key[(i * 4) + 1] << 8; + rss_key |= hash_key[(i * 4) + 2] << 16; + rss_key |= hash_key[(i * 4) + 3] << 24; + IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key); + } + } + + /* Set configured hashing protocols in MRQC register */ + rss_hf = rss_conf->rss_hf; + mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */ + if (rss_hf & ETH_RSS_IPV4) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; + if (rss_hf & ETH_RSS_IPV6) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; + if (rss_hf & ETH_RSS_IPV6_EX) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; + if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + if (rss_hf & ETH_RSS_IPV6_TCP_EX) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; + if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + if (rss_hf & ETH_RSS_IPV6_UDP_EX) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; + IXGBE_WRITE_REG(hw, mrqc_reg, mrqc); +} + +int +ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct ixgbe_hw *hw; + uint32_t mrqc; + uint64_t rss_hf; + uint32_t mrqc_reg; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!ixgbe_rss_update_sp(hw->mac.type)) { + PMD_DRV_LOG(ERR, "RSS hash update is not supported on this " + "NIC."); + return -ENOTSUP; + } + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + + /* + * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS): + * "RSS enabling cannot be done dynamically while it must be + * preceded by a software reset" + * Before changing anything, first check that the update RSS operation + * does not attempt to disable RSS, if RSS was enabled at + * initialization time, or does not attempt to enable RSS, if RSS was + * disabled at initialization time. + */ + rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL; + mrqc = IXGBE_READ_REG(hw, mrqc_reg); + if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */ + if (rss_hf != 0) /* Enable RSS */ + return -(EINVAL); + return 0; /* Nothing to do */ + } + /* RSS enabled */ + if (rss_hf == 0) /* Disable RSS */ + return -(EINVAL); + ixgbe_hw_rss_hash_set(hw, rss_conf); + return 0; +} + +int +ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct ixgbe_hw *hw; + uint8_t *hash_key; + uint32_t mrqc; + uint32_t rss_key; + uint64_t rss_hf; + uint16_t i; + uint32_t mrqc_reg; + uint32_t rssrk_reg; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type); + rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0); + hash_key = rss_conf->rss_key; + if (hash_key != NULL) { + /* Return RSS hash key */ + for (i = 0; i < 10; i++) { + rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i); + hash_key[(i * 4)] = rss_key & 0x000000FF; + hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF; + hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF; + hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF; + } + } + + /* Get RSS functions configured in MRQC register */ + mrqc = IXGBE_READ_REG(hw, mrqc_reg); + if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */ + rss_conf->rss_hf = 0; + return 0; + } + rss_hf = 0; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4) + rss_hf |= ETH_RSS_IPV4; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6) + rss_hf |= ETH_RSS_IPV6; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX) + rss_hf |= ETH_RSS_IPV6_EX; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP) + rss_hf |= ETH_RSS_IPV6_TCP_EX; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP) + rss_hf |= ETH_RSS_IPV6_UDP_EX; + rss_conf->rss_hf = rss_hf; + return 0; +} + +static void +ixgbe_rss_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_rss_conf rss_conf; + struct ixgbe_hw *hw; + uint32_t reta; + uint16_t i; + uint16_t j; + uint16_t sp_reta_size; + uint32_t reta_reg; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + + /* + * Fill in redirection table + * The byte-swap is needed because NIC registers are in + * little-endian order. + */ + reta = 0; + for (i = 0, j = 0; i < sp_reta_size; i++, j++) { + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + + if (j == dev->data->nb_rx_queues) + j = 0; + reta = (reta << 8) | j; + if ((i & 3) == 3) + IXGBE_WRITE_REG(hw, reta_reg, + rte_bswap32(reta)); + } + + /* + * Configure the RSS key and the RSS protocols used to compute + * the RSS hash of input packets. + */ + rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf; + if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) { + ixgbe_rss_disable(dev); + return; + } + if (rss_conf.rss_key == NULL) + rss_conf.rss_key = rss_intel_key; /* Default hash key */ + ixgbe_hw_rss_hash_set(hw, &rss_conf); +} + +#define NUM_VFTA_REGISTERS 128 +#define NIC_RX_BUFFER_SIZE 0x200 +#define X550_RX_BUFFER_SIZE 0x180 + +static void +ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_vmdq_dcb_conf *cfg; + struct ixgbe_hw *hw; + enum rte_eth_nb_pools num_pools; + uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl; + uint16_t pbsize; + uint8_t nb_tcs; /* number of traffic classes */ + int i; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; + num_pools = cfg->nb_queue_pools; + /* Check we have a valid number of pools */ + if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) { + ixgbe_rss_disable(dev); + return; + } + /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */ + nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools); + + /* + * RXPBSIZE + * split rx buffer up into sections, each for 1 traffic class + */ + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs); + break; + default: + pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs); + break; + } + for (i = 0; i < nb_tcs; i++) { + uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT)); + /* clear 10 bits. */ + rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); + } + /* zero alloc all unused TCs */ + for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT )); + /* clear 10 bits. */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); + } + + /* MRQC: enable vmdq and dcb */ + mrqc = ((num_pools == ETH_16_POOLS) ? \ + IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN ); + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + /* PFVTCTL: turn on virtualisation and set the default pool */ + vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; + if (cfg->enable_default_pool) { + vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT); + } else { + vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL; + } + + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); + + /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */ + queue_mapping = 0; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + /* + * mapping is done with 3 bits per priority, + * so shift by i*3 each time + */ + queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3)); + + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping); + + /* RTRPCS: DCB related */ + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM); + + /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ + vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); + + /* VFTA - enable all vlan filters */ + for (i = 0; i < NUM_VFTA_REGISTERS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); + } + + /* VFRE: pool enabling for receive - 16 or 32 */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \ + num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); + + /* + * MPSAR - allow pools to read specific mac addresses + * In this case, all pools should be able to read from mac addr 0 + */ + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF); + + /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */ + for (i = 0; i < cfg->nb_pool_maps; i++) { + /* set vlan id in VF register and set the valid bit */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \ + (cfg->pool_map[i].vlan_id & 0xFFF))); + /* + * Put the allowed pools in VFB reg. As we only have 16 or 32 + * pools, we only need to use the first half of the register + * i.e. bits 0-31 + */ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools); + } +} + +/** + * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + */ +static void +ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + uint32_t reg; + uint32_t q; + + PMD_INIT_FUNC_TRACE(); + if (hw->mac.type != ixgbe_mac_82598EB) { + /* Disable the Tx desc arbiter so that MTQC can be changed */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + /* Enable DCB for Tx with 8 TCs */ + if (dcb_config->num_tcs.pg_tcs == 8) { + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + } + else { + reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + } + if (dcb_config->vt_mode) + reg |= IXGBE_MTQC_VT_ENA; + IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); + + /* Disable drop for all queues */ + for (q = 0; q < 128; q++) + IXGBE_WRITE_REG(hw, IXGBE_QDE, + (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); + + /* Enable the Tx desc arbiter */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + /* Enable Security TX Buffer IFG for DCB */ + reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + reg |= IXGBE_SECTX_DCB; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); + } + return; +} + +/** + * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters + * @dev: pointer to rte_eth_dev structure + * @dcb_config: pointer to ixgbe_dcb_config structure + */ +static void +ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) +{ + struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = + &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + if (hw->mac.type != ixgbe_mac_82598EB) + /*PF VF Transmit Enable*/ + IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), + vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); + + /*Configure general DCB TX parameters*/ + ixgbe_dcb_tx_hw_config(hw,dcb_config); + return; +} + +static void +ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) +{ + struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = + &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; + struct ixgbe_dcb_tc_config *tc; + uint8_t i,j; + + /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */ + if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) { + dcb_config->num_tcs.pg_tcs = ETH_8_TCS; + dcb_config->num_tcs.pfc_tcs = ETH_8_TCS; + } + else { + dcb_config->num_tcs.pg_tcs = ETH_4_TCS; + dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; + } + /* User Priority to Traffic Class mapping */ + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + j = vmdq_rx_conf->dcb_tc[i]; + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = + (uint8_t)(1 << j); + } +} + +static void +ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) +{ + struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = + &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf; + struct ixgbe_dcb_tc_config *tc; + uint8_t i,j; + + /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */ + if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) { + dcb_config->num_tcs.pg_tcs = ETH_8_TCS; + dcb_config->num_tcs.pfc_tcs = ETH_8_TCS; + } + else { + dcb_config->num_tcs.pg_tcs = ETH_4_TCS; + dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; + } + + /* User Priority to Traffic Class mapping */ + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + j = vmdq_tx_conf->dcb_tc[i]; + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = + (uint8_t)(1 << j); + } + return; +} + +static void +ixgbe_dcb_rx_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) +{ + struct rte_eth_dcb_rx_conf *rx_conf = + &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; + struct ixgbe_dcb_tc_config *tc; + uint8_t i,j; + + dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs; + dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs; + + /* User Priority to Traffic Class mapping */ + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + j = rx_conf->dcb_tc[i]; + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = + (uint8_t)(1 << j); + } +} + +static void +ixgbe_dcb_tx_config(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) +{ + struct rte_eth_dcb_tx_conf *tx_conf = + &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; + struct ixgbe_dcb_tc_config *tc; + uint8_t i,j; + + dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs; + dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs; + + /* User Priority to Traffic Class mapping */ + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + j = tx_conf->dcb_tc[i]; + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = + (uint8_t)(1 << j); + } +} + +/** + * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + */ +static void +ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + uint32_t reg; + uint32_t vlanctrl; + uint8_t i; + + PMD_INIT_FUNC_TRACE(); + /* + * Disable the arbiter before changing parameters + * (always enable recycle mode; WSP) + */ + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + + if (hw->mac.type != ixgbe_mac_82598EB) { + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + if (dcb_config->num_tcs.pg_tcs == 4) { + if (dcb_config->vt_mode) + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_VMDQRT4TCEN; + else { + /* no matter the mode is DCB or DCB_RSS, just + * set the MRQE to RSSXTCEN. RSS is controlled + * by RSS_FIELD + */ + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RTRSS4TCEN; + } + } + if (dcb_config->num_tcs.pg_tcs == 8) { + if (dcb_config->vt_mode) + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_VMDQRT8TCEN; + else { + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); + reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | + IXGBE_MRQC_RTRSS8TCEN; + } + } + + IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg); + } + + /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ + vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); + + /* VFTA - enable all vlan filters */ + for (i = 0; i < NUM_VFTA_REGISTERS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); + } + + /* + * Configure Rx packet plane (recycle mode; WSP) and + * enable arbiter + */ + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + + return; +} + +static void +ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill, + uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, + tsa, map); + break; + default: + break; + } +} + +static void +ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max, + uint8_t *bwg_id, uint8_t *tsa, uint8_t *map) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map); + break; + default: + break; + } +} + +#define DCB_RX_CONFIG 1 +#define DCB_TX_CONFIG 1 +#define DCB_TX_PB 1024 +/** + * ixgbe_dcb_hw_configure - Enable DCB and configure + * general DCB in VT mode and non-VT mode parameters + * @dev: pointer to rte_eth_dev structure + * @dcb_config: pointer to ixgbe_dcb_config structure + */ +static int +ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, + struct ixgbe_dcb_config *dcb_config) +{ + int ret = 0; + uint8_t i,pfc_en,nb_tcs; + uint16_t pbsize, rx_buffer_size; + uint8_t config_dcb_rx = 0; + uint8_t config_dcb_tx = 0; + uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; + uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; + uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; + uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; + uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0}; + struct ixgbe_dcb_tc_config *tc; + uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch(dev->data->dev_conf.rxmode.mq_mode){ + case ETH_MQ_RX_VMDQ_DCB: + dcb_config->vt_mode = true; + if (hw->mac.type != ixgbe_mac_82598EB) { + config_dcb_rx = DCB_RX_CONFIG; + /* + *get dcb and VT rx configuration parameters + *from rte_eth_conf + */ + ixgbe_vmdq_dcb_rx_config(dev, dcb_config); + /*Configure general VMDQ and DCB RX parameters*/ + ixgbe_vmdq_dcb_configure(dev); + } + break; + case ETH_MQ_RX_DCB: + case ETH_MQ_RX_DCB_RSS: + dcb_config->vt_mode = false; + config_dcb_rx = DCB_RX_CONFIG; + /* Get dcb TX configuration parameters from rte_eth_conf */ + ixgbe_dcb_rx_config(dev, dcb_config); + /*Configure general DCB RX parameters*/ + ixgbe_dcb_rx_hw_config(hw, dcb_config); + break; + default: + PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration"); + break; + } + switch (dev->data->dev_conf.txmode.mq_mode) { + case ETH_MQ_TX_VMDQ_DCB: + dcb_config->vt_mode = true; + config_dcb_tx = DCB_TX_CONFIG; + /* get DCB and VT TX configuration parameters from rte_eth_conf */ + ixgbe_dcb_vt_tx_config(dev,dcb_config); + /*Configure general VMDQ and DCB TX parameters*/ + ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config); + break; + + case ETH_MQ_TX_DCB: + dcb_config->vt_mode = false; + config_dcb_tx = DCB_TX_CONFIG; + /*get DCB TX configuration parameters from rte_eth_conf*/ + ixgbe_dcb_tx_config(dev, dcb_config); + /*Configure general DCB TX parameters*/ + ixgbe_dcb_tx_hw_config(hw, dcb_config); + break; + default: + PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration"); + break; + } + + nb_tcs = dcb_config->num_tcs.pfc_tcs; + /* Unpack map */ + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); + if (nb_tcs == ETH_4_TCS) { + /* Avoid un-configured priority mapping to TC0 */ + uint8_t j = 4; + uint8_t mask = 0xFF; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++) + mask = (uint8_t)(mask & (~ (1 << map[i]))); + for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) { + if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES)) + map[j++] = i; + mask >>= 1; + } + /* Re-configure 4 TCs BW */ + for (i = 0; i < nb_tcs; i++) { + tc = &dcb_config->tc_config[i]; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = + (uint8_t)(100 / nb_tcs); + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = + (uint8_t)(100 / nb_tcs); + } + for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + tc = &dcb_config->tc_config[i]; + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0; + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0; + } + } + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + rx_buffer_size = X550_RX_BUFFER_SIZE; + break; + default: + rx_buffer_size = NIC_RX_BUFFER_SIZE; + break; + } + + if (config_dcb_rx) { + /* Set RX buffer size */ + pbsize = (uint16_t)(rx_buffer_size / nb_tcs); + uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT; + for (i = 0; i < nb_tcs; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); + } + /* zero alloc all unused TCs */ + for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + } + } + if (config_dcb_tx) { + /* Only support an equally distributed Tx packet buffer strategy. */ + uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs; + uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX; + for (i = 0; i < nb_tcs; i++) { + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); + } + /* Clear unused TCs, if any, to zero buffer size*/ + for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); + } + } + + /*Calculates traffic class credits*/ + ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame, + IXGBE_DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame, + IXGBE_DCB_RX_CONFIG); + + if (config_dcb_rx) { + /* Unpack CEE standard containers */ + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa); + /* Configure PG(ETS) RX */ + ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map); + } + + if (config_dcb_tx) { + /* Unpack CEE standard containers */ + ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max_cee(dcb_config, max); + ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + /* Configure PG(ETS) TX */ + ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map); + } + + /*Configure queue statistics registers*/ + ixgbe_dcb_config_tc_stats_82599(hw, dcb_config); + + /* Check if the PFC is supported */ + if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) { + pbsize = (uint16_t)(rx_buffer_size / nb_tcs); + for (i = 0; i < nb_tcs; i++) { + /* + * If the TC count is 8,and the default high_water is 48, + * the low_water is 16 as default. + */ + hw->fc.high_water[i] = (pbsize * 3 ) / 4; + hw->fc.low_water[i] = pbsize / 4; + /* Enable pfc for this TC */ + tc = &dcb_config->tc_config[i]; + tc->pfc = ixgbe_dcb_pfc_enabled; + } + ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); + if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS) + pfc_en &= 0x0F; + ret = ixgbe_dcb_config_pfc(hw, pfc_en, map); + } + + return ret; +} + +/** + * ixgbe_configure_dcb - Configure DCB Hardware + * @dev: pointer to rte_eth_dev + */ +void ixgbe_configure_dcb(struct rte_eth_dev *dev) +{ + struct ixgbe_dcb_config *dcb_cfg = + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + struct rte_eth_conf *dev_conf = &(dev->data->dev_conf); + + PMD_INIT_FUNC_TRACE(); + + /* check support mq_mode for DCB */ + if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) && + (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) && + (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS)) + return; + + if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES) + return; + + /** Configure DCB hardware **/ + ixgbe_dcb_hw_configure(dev, dcb_cfg); + + return; +} + +/* + * VMDq only support for 10 GbE NIC. + */ +static void +ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_vmdq_rx_conf *cfg; + struct ixgbe_hw *hw; + enum rte_eth_nb_pools num_pools; + uint32_t mrqc, vt_ctl, vlanctrl; + uint32_t vmolr = 0; + int i; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + num_pools = cfg->nb_queue_pools; + + ixgbe_rss_disable(dev); + + /* MRQC: enable vmdq */ + mrqc = IXGBE_MRQC_VMDQEN; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + /* PFVTCTL: turn on virtualisation and set the default pool */ + vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; + if (cfg->enable_default_pool) + vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT); + else + vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL; + + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); + + for (i = 0; i < (int)num_pools; i++) { + vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr); + } + + /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ + vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); + + /* VFTA - enable all vlan filters */ + for (i = 0; i < NUM_VFTA_REGISTERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX); + + /* VFRE: pool enabling for receive - 64 */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX); + if (num_pools == ETH_64_POOLS) + IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX); + + /* + * MPSAR - allow pools to read specific mac addresses + * In this case, all pools should be able to read from mac addr 0 + */ + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX); + + /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */ + for (i = 0; i < cfg->nb_pool_maps; i++) { + /* set vlan id in VF register and set the valid bit */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \ + (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK))); + /* + * Put the allowed pools in VFB reg. As we only have 16 or 64 + * pools, we only need to use the first half of the register + * i.e. bits 0-31 + */ + if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0) + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \ + (cfg->pool_map[i].pools & UINT32_MAX)); + else + IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \ + ((cfg->pool_map[i].pools >> 32) \ + & UINT32_MAX)); + + } + + /* PFDMA Tx General Switch Control Enables VMDQ loopback */ + if (cfg->enable_loop_back) { + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++) + IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX); + } + + IXGBE_WRITE_FLUSH(hw); +} + +/* + * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters + * @hw: pointer to hardware structure + */ +static void +ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw) +{ + uint32_t reg; + uint32_t q; + + PMD_INIT_FUNC_TRACE(); + /*PF VF Transmit Enable*/ + IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX); + + /* Disable the Tx desc arbiter so that MTQC can be changed */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF; + IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); + + /* Disable drop for all queues */ + for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++) + IXGBE_WRITE_REG(hw, IXGBE_QDE, + (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); + + /* Enable the Tx desc arbiter */ + reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + IXGBE_WRITE_FLUSH(hw); + + return; +} + +static int __attribute__((cold)) +ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) +{ + struct ixgbe_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + unsigned i; + + /* Initialize software ring entries */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile union ixgbe_adv_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u", + (unsigned) rxq->queue_id); + return -ENOMEM; + } + + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); + rxd = &rxq->rx_ring[i]; + rxd->read.hdr_addr = 0; + rxd->read.pkt_addr = dma_addr; + rxe[i].mbuf = mbuf; + } + + return 0; +} + +static int +ixgbe_config_vf_rss(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + uint32_t mrqc; + + ixgbe_rss_configure(dev); + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* MRQC: enable VF RSS */ + mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc &= ~IXGBE_MRQC_MRQE_MASK; + switch (RTE_ETH_DEV_SRIOV(dev).active) { + case ETH_64_POOLS: + mrqc |= IXGBE_MRQC_VMDQRSS64EN; + break; + + case ETH_32_POOLS: + mrqc |= IXGBE_MRQC_VMDQRSS32EN; + break; + + default: + PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS"); + return -EINVAL; + } + + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + return 0; +} + +static int +ixgbe_config_vf_default(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (RTE_ETH_DEV_SRIOV(dev).active) { + case ETH_64_POOLS: + IXGBE_WRITE_REG(hw, IXGBE_MRQC, + IXGBE_MRQC_VMDQEN); + break; + + case ETH_32_POOLS: + IXGBE_WRITE_REG(hw, IXGBE_MRQC, + IXGBE_MRQC_VMDQRT4TCEN); + break; + + case ETH_16_POOLS: + IXGBE_WRITE_REG(hw, IXGBE_MRQC, + IXGBE_MRQC_VMDQRT8TCEN); + break; + default: + PMD_INIT_LOG(ERR, + "invalid pool number in IOV mode"); + break; + } + return 0; +} + +static int +ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type == ixgbe_mac_82598EB) + return 0; + + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * SRIOV inactive scheme + * any DCB/RSS w/o VMDq multi-queue setting + */ + switch (dev->data->dev_conf.rxmode.mq_mode) { + case ETH_MQ_RX_RSS: + case ETH_MQ_RX_DCB_RSS: + case ETH_MQ_RX_VMDQ_RSS: + ixgbe_rss_configure(dev); + break; + + case ETH_MQ_RX_VMDQ_DCB: + ixgbe_vmdq_dcb_configure(dev); + break; + + case ETH_MQ_RX_VMDQ_ONLY: + ixgbe_vmdq_rx_hw_configure(dev); + break; + + case ETH_MQ_RX_NONE: + default: + /* if mq_mode is none, disable rss mode.*/ + ixgbe_rss_disable(dev); + break; + } + } else { + /* + * SRIOV active scheme + * Support RSS together with VMDq & SRIOV + */ + switch (dev->data->dev_conf.rxmode.mq_mode) { + case ETH_MQ_RX_RSS: + case ETH_MQ_RX_VMDQ_RSS: + ixgbe_config_vf_rss(dev); + break; + + /* FIXME if support DCB/RSS together with VMDq & SRIOV */ + case ETH_MQ_RX_VMDQ_DCB: + case ETH_MQ_RX_VMDQ_DCB_RSS: + PMD_INIT_LOG(ERR, + "Could not support DCB with VMDq & SRIOV"); + return -1; + default: + ixgbe_config_vf_default(dev); + break; + } + } + + return 0; +} + +static int +ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t mtqc; + uint32_t rttdcs; + + if (hw->mac.type == ixgbe_mac_82598EB) + return 0; + + /* disable arbiter before setting MTQC */ + rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + rttdcs |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * SRIOV inactive scheme + * any DCB w/o VMDq multi-queue setting + */ + if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY) + ixgbe_vmdq_tx_hw_configure(hw); + else { + mtqc = IXGBE_MTQC_64Q_1PB; + IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); + } + } else { + switch (RTE_ETH_DEV_SRIOV(dev).active) { + + /* + * SRIOV active scheme + * FIXME if support DCB together with VMDq & SRIOV + */ + case ETH_64_POOLS: + mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF; + break; + case ETH_32_POOLS: + mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF; + break; + case ETH_16_POOLS: + mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA | + IXGBE_MTQC_8TC_8TQ; + break; + default: + mtqc = IXGBE_MTQC_64Q_1PB; + PMD_INIT_LOG(ERR, "invalid pool number in IOV mode"); + } + IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); + } + + /* re-enable arbiter */ + rttdcs &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + + return 0; +} + +/** + * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF + * + * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the + * spec rev. 3.0 chapter 8.2.3.8.13. + * + * @pool Memory pool of the Rx queue + */ +static inline uint32_t +ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool) +{ + struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool); + + /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */ + uint16_t maxdesc = + IPV4_MAX_PKT_LEN / + (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM); + + if (maxdesc >= 16) + return IXGBE_RSCCTL_MAXDESC_16; + else if (maxdesc >= 8) + return IXGBE_RSCCTL_MAXDESC_8; + else if (maxdesc >= 4) + return IXGBE_RSCCTL_MAXDESC_4; + else + return IXGBE_RSCCTL_MAXDESC_1; +} + +/** + * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX + * interrupt + * + * (Taken from FreeBSD tree) + * (yes this is all very magic and confusing :) + * + * @dev port handle + * @entry the register array entry + * @vector the MSIX vector for this queue + * @type RX/TX/MISC + */ +static void +ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 ivar, index; + + vector |= IXGBE_IVAR_ALLOC_VAL; + + switch (hw->mac.type) { + + case ixgbe_mac_82598EB: + if (type == -1) + entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; + else + entry += (type * 64); + index = (entry >> 2) & 0x1F; + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); + ivar &= ~(0xFF << (8 * (entry & 0x3))); + ivar |= (vector << (8 * (entry & 0x3))); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); + break; + + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + if (type == -1) { /* MISC IVAR */ + index = (entry & 1) * 8; + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); + ivar &= ~(0xFF << index); + ivar |= (vector << index); + IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); + } else { /* RX/TX IVARS */ + index = (16 * (entry & 1)) + (8 * type); + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); + ivar &= ~(0xFF << index); + ivar |= (vector << index); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); + } + + break; + + default: + break; + } +} + +void __attribute__((cold)) +ixgbe_set_rx_function(struct rte_eth_dev *dev) +{ + uint16_t i, rx_using_sse; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + + /* + * In order to allow Vector Rx there are a few configuration + * conditions to be met and Rx Bulk Allocation should be allowed. + */ + if (ixgbe_rx_vec_dev_conf_condition_check(dev) || + !adapter->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx " + "preconditions or RTE_IXGBE_INC_VECTOR is " + "not enabled", + dev->data->port_id); + + adapter->rx_vec_allowed = false; + } + + /* + * Initialize the appropriate LRO callback. + * + * If all queues satisfy the bulk allocation preconditions + * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation. + * Otherwise use a single allocation version. + */ + if (dev->data->lro) { + if (adapter->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk " + "allocation version"); + dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc; + } else { + PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single " + "allocation version"); + dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc; + } + } else if (dev->data->scattered_rx) { + /* + * Set the non-LRO scattered callback: there are Vector and + * single allocation versions. + */ + if (adapter->rx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx " + "callback (port=%d).", + dev->data->port_id); + + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; + } else if (adapter->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk " + "allocation callback (port=%d).", + dev->data->port_id); + dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc; + } else { + PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, " + "single allocation) " + "Scattered Rx callback " + "(port=%d).", + dev->data->port_id); + + dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc; + } + /* + * Below we set "simple" callbacks according to port/queues parameters. + * If parameters allow we are going to choose between the following + * callbacks: + * - Vector + * - Bulk Allocation + * - Single buffer allocation (the simplest one) + */ + } else if (adapter->rx_vec_allowed) { + PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX " + "burst size no less than %d (port=%d).", + RTE_IXGBE_DESCS_PER_LOOP, + dev->data->port_id); + + dev->rx_pkt_burst = ixgbe_recv_pkts_vec; + } else if (adapter->rx_bulk_alloc_allowed) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function " + "will be used on port=%d.", + dev->data->port_id); + + dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc; + } else { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " + "satisfied, or Scattered Rx is requested " + "(port=%d).", + dev->data->port_id); + + dev->rx_pkt_burst = ixgbe_recv_pkts; + } + + /* Propagate information about RX function choice through all queues. */ + + rx_using_sse = + (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec || + dev->rx_pkt_burst == ixgbe_recv_pkts_vec); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; + rxq->rx_using_sse = rx_using_sse; + } +} + +/** + * ixgbe_set_rsc - configure RSC related port HW registers + * + * Configures the port's RSC related registers according to the 4.6.7.2 chapter + * of 82599 Spec (x540 configuration is virtually the same). + * + * @dev port handle + * + * Returns 0 in case of success or a non-zero error code + */ +static int +ixgbe_set_rsc(struct rte_eth_dev *dev) +{ + struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_dev_info dev_info = { 0 }; + bool rsc_capable = false; + uint16_t i; + uint32_t rdrxctl; + + /* Sanity check */ + dev->dev_ops->dev_infos_get(dev, &dev_info); + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) + rsc_capable = true; + + if (!rsc_capable && rx_conf->enable_lro) { + PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't " + "support it"); + return -EINVAL; + } + + /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */ + + if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) { + /* + * According to chapter of 4.6.7.2.1 of the Spec Rev. + * 3.0 RSC configuration requires HW CRC stripping being + * enabled. If user requested both HW CRC stripping off + * and RSC on - return an error. + */ + PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC " + "is disabled"); + return -EINVAL; + } + + /* RFCTL configuration */ + if (rsc_capable) { + uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); + if (rx_conf->enable_lro) + /* + * Since NFS packets coalescing is not supported - clear + * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is + * enabled. + */ + rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS | + IXGBE_RFCTL_NFSR_DIS); + else + rfctl |= IXGBE_RFCTL_RSC_DIS; + + IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); + } + + /* If LRO hasn't been requested - we are done here. */ + if (!rx_conf->enable_lro) + return 0; + + /* Set RDRXCTL.RSCACKC bit */ + rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + rdrxctl |= IXGBE_RDRXCTL_RSCACKC; + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); + + /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; + uint32_t srrctl = + IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx)); + uint32_t rscctl = + IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx)); + uint32_t psrtype = + IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx)); + uint32_t eitr = + IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx)); + + /* + * ixgbe PMD doesn't support header-split at the moment. + * + * Following the 4.6.7.2.1 chapter of the 82599/x540 + * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER + * should be configured even if header split is not + * enabled. We will configure it 128 bytes following the + * recommendation in the spec. + */ + srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; + srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + IXGBE_SRRCTL_BSIZEHDR_MASK; + + /* + * TODO: Consider setting the Receive Descriptor Minimum + * Threshold Size for an RSC case. This is not an obviously + * beneficiary option but the one worth considering... + */ + + rscctl |= IXGBE_RSCCTL_RSCEN; + rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool); + psrtype |= IXGBE_PSRTYPE_TCPHDR; + + /* + * RSC: Set ITR interval corresponding to 2K ints/s. + * + * Full-sized RSC aggregations for a 10Gb/s link will + * arrive at about 20K aggregation/s rate. + * + * 2K inst/s rate will make only 10% of the + * aggregations to be closed due to the interrupt timer + * expiration for a streaming at wire-speed case. + * + * For a sparse streaming case this setting will yield + * at most 500us latency for a single RSC aggregation. + */ + eitr &= ~IXGBE_EITR_ITR_INT_MASK; + eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS; + + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl); + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl); + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype); + IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr); + + /* + * RSC requires the mapping of the queue to the + * interrupt vector. + */ + ixgbe_set_ivar(dev, rxq->reg_idx, i, 0); + } + + dev->data->lro = 1; + + PMD_INIT_LOG(DEBUG, "enabling LRO mode"); + + return 0; +} + +/* + * Initializes Receive Unit. + */ +int __attribute__((cold)) +ixgbe_dev_rx_init(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct ixgbe_rx_queue *rxq; + uint64_t bus_addr; + uint32_t rxctrl; + uint32_t fctrl; + uint32_t hlreg0; + uint32_t maxfrs; + uint32_t srrctl; + uint32_t rdrxctl; + uint32_t rxcsum; + uint16_t buf_size; + uint16_t i; + struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; + int rc; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Make sure receives are disabled while setting + * up the RX context (registers, descriptor rings, etc.). + */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + + /* Enable receipt of broadcasted frames */ + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_BAM; + fctrl |= IXGBE_FCTRL_DPF; + fctrl |= IXGBE_FCTRL_PMCF; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + /* + * Configure CRC stripping, if any. + */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + if (rx_conf->hw_strip_crc) + hlreg0 |= IXGBE_HLREG0_RXCRCSTRP; + else + hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP; + + /* + * Configure jumbo frame support, if any. + */ + if (rx_conf->jumbo_frame == 1) { + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); + maxfrs &= 0x0000FFFF; + maxfrs |= (rx_conf->max_rx_pkt_len << 16); + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); + } else + hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; + + /* + * If loopback mode is configured for 82599, set LPBK bit. + */ + if (hw->mac.type == ixgbe_mac_82599EB && + dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) + hlreg0 |= IXGBE_HLREG0_LPBK; + else + hlreg0 &= ~IXGBE_HLREG0_LPBK; + + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + /* Setup RX queues */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + + /* + * Reset crc_len in case it was changed after queue setup by a + * call to configure. + */ + rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN; + + /* Setup the Base and Length of the Rx Descriptor Rings */ + bus_addr = rxq->rx_ring_phys_addr; + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx), + (uint32_t)(bus_addr & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx), + (uint32_t)(bus_addr >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx), + rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0); + + /* Configure the SRRCTL register */ +#ifdef RTE_HEADER_SPLIT_ENABLE + /* + * Configure Header Split + */ + if (rx_conf->header_split) { + if (hw->mac.type == ixgbe_mac_82599EB) { + /* Must setup the PSRTYPE register */ + uint32_t psrtype; + psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_IPV6HDR; + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype); + } + srrctl = ((rx_conf->split_hdr_size << + IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + IXGBE_SRRCTL_BSIZEHDR_MASK); + srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } else +#endif + srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + + /* Set if packets are dropped when no descriptors available */ + if (rxq->drop_en) + srrctl |= IXGBE_SRRCTL_DROP_EN; + + /* + * Configure the RX buffer size in the BSIZEPACKET field of + * the SRRCTL register of the queue. + * The value is in 1 KB resolution. Valid values can be from + * 1 KB to 16 KB. + */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) & + IXGBE_SRRCTL_BSIZEPKT_MASK); + + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl); + + buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) << + IXGBE_SRRCTL_BSIZEPKT_SHIFT); + + /* It adds dual VLAN length for supporting dual VLAN */ + if (dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * IXGBE_VLAN_TAG_SIZE > buf_size) + dev->data->scattered_rx = 1; + } + + if (rx_conf->enable_scatter) + dev->data->scattered_rx = 1; + + /* + * Device configured with multiple RX queues. + */ + ixgbe_dev_mq_rx_configure(dev); + + /* + * Setup the Checksum Register. + * Disable Full-Packet Checksum which is mutually exclusive with RSS. + * Enable IP/L4 checkum computation by hardware if requested to do so. + */ + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + rxcsum |= IXGBE_RXCSUM_PCSD; + if (rx_conf->hw_ip_checksum) + rxcsum |= IXGBE_RXCSUM_IPPCSE; + else + rxcsum &= ~IXGBE_RXCSUM_IPPCSE; + + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) { + rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + if (rx_conf->hw_strip_crc) + rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; + else + rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP; + rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); + } + + rc = ixgbe_set_rsc(dev); + if (rc) + return rc; + + ixgbe_set_rx_function(dev); + + return 0; +} + +/* + * Initializes Transmit Unit. + */ +void __attribute__((cold)) +ixgbe_dev_tx_init(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct ixgbe_tx_queue *txq; + uint64_t bus_addr; + uint32_t hlreg0; + uint32_t txctrl; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Enable TX CRC (checksum offload requirement) and hw padding + * (TSO requirement) */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN); + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + /* Setup the Base and Length of the Tx Descriptor Rings */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + + bus_addr = txq->tx_ring_phys_addr; + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx), + (uint32_t)(bus_addr & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx), + (uint32_t)(bus_addr >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx), + txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc)); + /* Setup the HW Tx Head and TX Tail descriptor pointers */ + IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0); + + /* + * Disable Tx Head Writeback RO bit, since this hoses + * bookkeeping if things aren't delivered in order. + */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + txctrl = IXGBE_READ_REG(hw, + IXGBE_DCA_TXCTRL(txq->reg_idx)); + txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx), + txctrl); + break; + + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + default: + txctrl = IXGBE_READ_REG(hw, + IXGBE_DCA_TXCTRL_82599(txq->reg_idx)); + txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx), + txctrl); + break; + } + } + + /* Device configured with multiple TX queues. */ + ixgbe_dev_mq_tx_configure(dev); +} + +/* + * Set up link for 82599 loopback mode Tx->Rx. + */ +static inline void __attribute__((cold)) +ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw) +{ + PMD_INIT_FUNC_TRACE(); + + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) != + IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "Could not enable loopback mode"); + /* ignore error */ + return; + } + } + + /* Restart link */ + IXGBE_WRITE_REG(hw, + IXGBE_AUTOC, + IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU); + ixgbe_reset_pipeline_82599(hw); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + msec_delay(50); +} + + +/* + * Start Transmit and Receive Units. + */ +int __attribute__((cold)) +ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct ixgbe_tx_queue *txq; + struct ixgbe_rx_queue *rxq; + uint32_t txdctl; + uint32_t dmatxctl; + uint32_t rxctrl; + uint16_t i; + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + /* Setup Transmit Threshold Registers */ + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + txdctl |= txq->pthresh & 0x7F; + txdctl |= ((txq->hthresh & 0x7F) << 8); + txdctl |= ((txq->wthresh & 0x7F) << 16); + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + } + + if (hw->mac.type != ixgbe_mac_82598EB) { + dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + dmatxctl |= IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (!txq->tx_deferred_start) { + ret = ixgbe_dev_tx_queue_start(dev, i); + if (ret < 0) + return ret; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (!rxq->rx_deferred_start) { + ret = ixgbe_dev_rx_queue_start(dev, i); + if (ret < 0) + return ret; + } + } + + /* Enable Receive engine */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (hw->mac.type == ixgbe_mac_82598EB) + rxctrl |= IXGBE_RXCTRL_DMBYPS; + rxctrl |= IXGBE_RXCTRL_RXEN; + hw->mac.ops.enable_rx_dma(hw, rxctrl); + + /* If loopback mode is enabled for 82599, set up the link accordingly */ + if (hw->mac.type == ixgbe_mac_82599EB && + dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) + ixgbe_setup_loopback_link_82599(hw); + + return 0; +} + +/* + * Start Receive Units for specified queue. + */ +int __attribute__((cold)) +ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ixgbe_hw *hw; + struct ixgbe_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + + /* Allocate buffers for descriptor rings */ + if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) { + PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d", + rx_queue_id); + return -1; + } + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); + + /* Wait until RX Enable ready */ + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", + rx_queue_id); + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + } else + return -1; + + return 0; +} + +/* + * Stop Receive Units for specified queue. + */ +int __attribute__((cold)) +ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); + + /* Wait until RX Enable ready */ + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", + rx_queue_id); + + rte_delay_us(RTE_IXGBE_WAIT_100_US); + + ixgbe_rx_queue_release_mbufs(rxq); + ixgbe_reset_rx_queue(adapter, rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + } else + return -1; + + return 0; +} + + +/* + * Start Transmit Units for specified queue. + */ +int __attribute__((cold)) +ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ixgbe_hw *hw; + struct ixgbe_tx_queue *txq; + uint32_t txdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (tx_queue_id < dev->data->nb_tx_queues) { + txq = dev->data->tx_queues[tx_queue_id]; + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + + /* Wait until TX Enable ready */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, + IXGBE_TXDCTL(txq->reg_idx)); + } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable " + "Tx Queue %d", tx_queue_id); + } + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + } else + return -1; + + return 0; +} + +/* + * Stop Transmit Units for specified queue. + */ +int __attribute__((cold)) +ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ixgbe_hw *hw; + struct ixgbe_tx_queue *txq; + uint32_t txdctl; + uint32_t txtdh, txtdt; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (tx_queue_id < dev->data->nb_tx_queues) { + txq = dev->data->tx_queues[tx_queue_id]; + + /* Wait until TX queue is empty */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_us(RTE_IXGBE_WAIT_100_US); + txtdh = IXGBE_READ_REG(hw, + IXGBE_TDH(txq->reg_idx)); + txtdt = IXGBE_READ_REG(hw, + IXGBE_TDT(txq->reg_idx)); + } while (--poll_ms && (txtdh != txtdt)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Tx Queue %d is not empty " + "when stopping.", tx_queue_id); + } + + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + txdctl &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + + /* Wait until TX Enable ready */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, + IXGBE_TXDCTL(txq->reg_idx)); + } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable " + "Tx Queue %d", tx_queue_id); + } + + if (txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + } else + return -1; + + return 0; +} + +void +ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct ixgbe_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; +} + +void +ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct ixgbe_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.txq_flags = txq->txq_flags; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + +/* + * [VF] Initializes Receive Unit. + */ +int __attribute__((cold)) +ixgbevf_dev_rx_init(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct ixgbe_rx_queue *rxq; + uint64_t bus_addr; + uint32_t srrctl, psrtype = 0; + uint16_t buf_size; + uint16_t i; + int ret; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) { + PMD_INIT_LOG(ERR, "The number of Rx queue invalid, " + "it should be power of 2"); + return -1; + } + + if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) { + PMD_INIT_LOG(ERR, "The number of Rx queue invalid, " + "it should be equal to or less than %d", + hw->mac.max_rx_queues); + return -1; + } + + /* + * When the VF driver issues a IXGBE_VF_RESET request, the PF driver + * disables the VF receipt of packets if the PF MTU is > 1500. + * This is done to deal with 82599 limitations that imposes + * the PF and all VFs to share the same MTU. + * Then, the PF driver enables again the VF receipt of packet when + * the VF driver issues a IXGBE_VF_SET_LPE request. + * In the meantime, the VF device cannot be used, even if the VF driver + * and the Guest VM network stack are ready to accept packets with a + * size up to the PF MTU. + * As a work-around to this PF behaviour, force the call to + * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way, + * VF packets received can work in all cases. + */ + ixgbevf_rlpml_set_vf(hw, + (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len); + + /* Setup RX queues */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + + /* Allocate buffers for descriptor rings */ + ret = ixgbe_alloc_rx_queue_mbufs(rxq); + if (ret) + return ret; + + /* Setup the Base and Length of the Rx Descriptor Rings */ + bus_addr = rxq->rx_ring_phys_addr; + + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i), + (uint32_t)(bus_addr & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), + (uint32_t)(bus_addr >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i), + rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0); + + + /* Configure the SRRCTL register */ +#ifdef RTE_HEADER_SPLIT_ENABLE + /* + * Configure Header Split + */ + if (dev->data->dev_conf.rxmode.header_split) { + srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size << + IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + IXGBE_SRRCTL_BSIZEHDR_MASK); + srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } else +#endif + srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + + /* Set if packets are dropped when no descriptors available */ + if (rxq->drop_en) + srrctl |= IXGBE_SRRCTL_DROP_EN; + + /* + * Configure the RX buffer size in the BSIZEPACKET field of + * the SRRCTL register of the queue. + * The value is in 1 KB resolution. Valid values can be from + * 1 KB to 16 KB. + */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) & + IXGBE_SRRCTL_BSIZEPKT_MASK); + + /* + * VF modification to write virtual function SRRCTL register + */ + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl); + + buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) << + IXGBE_SRRCTL_BSIZEPKT_SHIFT); + + if (dev->data->dev_conf.rxmode.enable_scatter || + /* It adds dual VLAN length for supporting dual VLAN */ + (dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); + dev->data->scattered_rx = 1; + } + } + +#ifdef RTE_HEADER_SPLIT_ENABLE + if (dev->data->dev_conf.rxmode.header_split) + /* Must setup the PSRTYPE register */ + psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_IPV6HDR; +#endif + + /* Set RQPL for VF RSS according to max Rx queue */ + psrtype |= (dev->data->nb_rx_queues >> 1) << + IXGBE_PSRTYPE_RQPL_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); + + ixgbe_set_rx_function(dev); + + return 0; +} + +/* + * [VF] Initializes Transmit Unit. + */ +void __attribute__((cold)) +ixgbevf_dev_tx_init(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct ixgbe_tx_queue *txq; + uint64_t bus_addr; + uint32_t txctrl; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Setup the Base and Length of the Tx Descriptor Rings */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + bus_addr = txq->tx_ring_phys_addr; + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i), + (uint32_t)(bus_addr & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), + (uint32_t)(bus_addr >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i), + txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc)); + /* Setup the HW Tx Head and TX Tail descriptor pointers */ + IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0); + + /* + * Disable Tx Head Writeback RO bit, since this hoses + * bookkeeping if things aren't delivered in order. + */ + txctrl = IXGBE_READ_REG(hw, + IXGBE_VFDCA_TXCTRL(i)); + txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), + txctrl); + } +} + +/* + * [VF] Start Transmit and Receive Units. + */ +void __attribute__((cold)) +ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct ixgbe_tx_queue *txq; + struct ixgbe_rx_queue *rxq; + uint32_t txdctl; + uint32_t rxdctl; + uint16_t i; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + /* Setup Transmit Threshold Registers */ + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + txdctl |= txq->pthresh & 0x7F; + txdctl |= ((txq->hthresh & 0x7F) << 8); + txdctl |= ((txq->wthresh & 0x7F) << 16); + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); + + poll_ms = 10; + /* Wait until TX Enable ready */ + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + + rxq = dev->data->rx_queues[i]; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); + + /* Wait until RX Enable ready */ + poll_ms = 10; + do { + rte_delay_ms(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1); + + } +} + +/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */ +int __attribute__((weak)) +ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) +{ + return -1; +} + +uint16_t __attribute__((weak)) +ixgbe_recv_pkts_vec( + void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t __attribute__((weak)) +ixgbe_recv_scattered_pkts_vec( + void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +int __attribute__((weak)) +ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq) +{ + return -1; +} diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h new file mode 100644 index 00000000..3691a19d --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -0,0 +1,315 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _IXGBE_RXTX_H_ +#define _IXGBE_RXTX_H_ + +/* + * Rings setup and release. + * + * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be + * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will + * also optimize cache line size effect. H/W supports up to cache line size 128. + */ +#define IXGBE_ALIGN 128 + +#define IXGBE_RXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_rx_desc)) +#define IXGBE_TXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_tx_desc)) + +/* + * Maximum number of Ring Descriptors. + * + * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring + * descriptors should meet the following condition: + * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0 + */ +#define IXGBE_MIN_RING_DESC 32 +#define IXGBE_MAX_RING_DESC 4096 + +#define RTE_PMD_IXGBE_TX_MAX_BURST 32 +#define RTE_PMD_IXGBE_RX_MAX_BURST 32 +#define RTE_IXGBE_TX_MAX_FREE_BUF_SZ 64 + +#define RTE_IXGBE_DESCS_PER_LOOP 4 + +#ifdef RTE_IXGBE_INC_VECTOR +#define RTE_IXGBE_RXQ_REARM_THRESH 32 +#define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH +#endif + +#define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_IXGBE_DESCS_PER_LOOP - 1) * \ + sizeof(union ixgbe_adv_rx_desc)) + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while(0) +#endif + +#define RTE_IXGBE_REGISTER_POLL_WAIT_10_MS 10 +#define RTE_IXGBE_WAIT_100_US 100 +#define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2 + +#define IXGBE_PACKET_TYPE_MASK_82599 0X7F +#define IXGBE_PACKET_TYPE_MASK_X550 0X10FF +#define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF +#define IXGBE_PACKET_TYPE_TUNNEL_BIT 0X1000 + +/** + * Structure associated with each descriptor of the RX ring of a RX queue. + */ +struct ixgbe_rx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ +}; + +struct ixgbe_scattered_rx_entry { + struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */ +}; + +/** + * Structure associated with each descriptor of the TX ring of a TX queue. + */ +struct ixgbe_tx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ + uint16_t next_id; /**< Index of next descriptor in ring. */ + uint16_t last_id; /**< Index of last scattered descriptor. */ +}; + +/** + * Structure associated with each descriptor of the TX ring of a TX queue. + */ +struct ixgbe_tx_entry_v { + struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ +}; + +/** + * Structure associated with each RX queue. + */ +struct ixgbe_rx_queue { + struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ + volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */ + uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ + volatile uint32_t *rdt_reg_addr; /**< RDT register address. */ + volatile uint32_t *rdh_reg_addr; /**< RDH register address. */ + struct ixgbe_rx_entry *sw_ring; /**< address of RX software ring. */ + struct ixgbe_scattered_rx_entry *sw_sc_ring; /**< address of scattered Rx software ring. */ + struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint64_t mbuf_initializer; /**< value to init mbufs */ + uint16_t nb_rx_desc; /**< number of RX descriptors. */ + uint16_t rx_tail; /**< current value of RDT register. */ + uint16_t nb_rx_hold; /**< number of held free RX desc. */ + uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */ + uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */ + uint16_t rx_free_trigger; /**< triggers rx buffer allocation */ + uint16_t rx_using_sse; + /**< indicates that vector RX is in use */ +#ifdef RTE_IXGBE_INC_VECTOR + uint16_t rxrearm_nb; /**< number of remaining to be re-armed */ + uint16_t rxrearm_start; /**< the idx we start the re-arming from */ +#endif + uint16_t rx_free_thresh; /**< max free RX desc to hold. */ + uint16_t queue_id; /**< RX queue index. */ + uint16_t reg_idx; /**< RX queue register index. */ + uint16_t pkt_type_mask; /**< Packet type mask for different NICs. */ + uint8_t port_id; /**< Device port identifier. */ + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ + uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ + uint8_t rx_deferred_start; /**< not in global dev start. */ + /** need to alloc dummy mbuf, for wraparound when scanning hw ring */ + struct rte_mbuf fake_mbuf; + /** hold packets to return to application */ + struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2]; +}; + +/** + * IXGBE CTX Constants + */ +enum ixgbe_advctx_num { + IXGBE_CTX_0 = 0, /**< CTX0 */ + IXGBE_CTX_1 = 1, /**< CTX1 */ + IXGBE_CTX_NUM = 2, /**< CTX NUMBER */ +}; + +/** Offload features */ +union ixgbe_tx_offload { + uint64_t data[2]; + struct { + uint64_t l2_len:7; /**< L2 (MAC) Header Length. */ + uint64_t l3_len:9; /**< L3 (IP) Header Length. */ + uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */ + uint64_t tso_segsz:16; /**< TCP TSO segment size */ + uint64_t vlan_tci:16; + /**< VLAN Tag Control Identifier (CPU order). */ + + /* fields for TX offloading of tunnels */ + uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */ + uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */ + }; +}; + +/* + * Compare mask for vlan_macip_len.data, + * should be in sync with ixgbe_vlan_macip.f layout. + * */ +#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */ +#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */ +#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */ +/** MAC+IP length. */ +#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK) + +/** + * Structure to check if new context need be built + */ + +struct ixgbe_advctx_info { + uint64_t flags; /**< ol_flags for context build. */ + /**< tx offload: vlan, tso, l2-l3-l4 lengths. */ + union ixgbe_tx_offload tx_offload; + /** compare mask for tx offload. */ + union ixgbe_tx_offload tx_offload_mask; +}; + +/** + * Structure associated with each TX queue. + */ +struct ixgbe_tx_queue { + /** TX ring virtual address. */ + volatile union ixgbe_adv_tx_desc *tx_ring; + uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ + union { + struct ixgbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */ + struct ixgbe_tx_entry_v *sw_ring_v; /**< address of SW ring for vector PMD */ + }; + volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ + uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_tail; /**< current value of TDT reg. */ + /**< Start freeing TX buffers if there are less free descriptors than + this value. */ + uint16_t tx_free_thresh; + /** Number of TX descriptors to use before RS bit is set. */ + uint16_t tx_rs_thresh; + /** Number of TX descriptors used since RS bit was set. */ + uint16_t nb_tx_used; + /** Index to last TX descriptor to have been cleaned. */ + uint16_t last_desc_cleaned; + /** Total number of TX descriptors ready to be allocated. */ + uint16_t nb_tx_free; + uint16_t tx_next_dd; /**< next desc to scan for DD bit */ + uint16_t tx_next_rs; /**< next desc to set RS bit */ + uint16_t queue_id; /**< TX queue index. */ + uint16_t reg_idx; /**< TX queue register index. */ + uint8_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold reg. */ + uint32_t txq_flags; /**< Holds flags for this TXq */ + uint32_t ctx_curr; /**< Hardware context states. */ + /** Hardware context0 history. */ + struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; + const struct ixgbe_txq_ops *ops; /**< txq ops */ + uint8_t tx_deferred_start; /**< not in global dev start. */ +}; + +struct ixgbe_txq_ops { + void (*release_mbufs)(struct ixgbe_tx_queue *txq); + void (*free_swring)(struct ixgbe_tx_queue *txq); + void (*reset)(struct ixgbe_tx_queue *txq); +}; + +/* + * The "simple" TX queue functions require that the following + * flags are set when the TX queue is configured: + * - ETH_TXQ_FLAGS_NOMULTSEGS + * - ETH_TXQ_FLAGS_NOVLANOFFL + * - ETH_TXQ_FLAGS_NOXSUMSCTP + * - ETH_TXQ_FLAGS_NOXSUMUDP + * - ETH_TXQ_FLAGS_NOXSUMTCP + * and that the RS bit threshold (tx_rs_thresh) is at least equal to + * RTE_PMD_IXGBE_TX_MAX_BURST. + */ +#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ + ETH_TXQ_FLAGS_NOOFFLOADS) + +/* + * Populate descriptors with the following info: + * 1.) buffer_addr = phys_addr + headroom + * 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len + * 3.) olinfo_status = pkt_len << PAYLEN_SHIFT + */ + +/* Defines for Tx descriptor */ +#define DCMD_DTYP_FLAGS (IXGBE_ADVTXD_DTYP_DATA |\ + IXGBE_ADVTXD_DCMD_IFCS |\ + IXGBE_ADVTXD_DCMD_DEXT |\ + IXGBE_ADVTXD_DCMD_EOP) + + +/* Takes an ethdev and a queue and sets up the tx function to be used based on + * the queue parameters. Used in tx_queue_setup by primary process and then + * in dev_init by secondary process when attaching to an existing ethdev. + */ +void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq); + +/** + * Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance. + * + * Sets the callback based on the device parameters: + * - ixgbe_hw.rx_bulk_alloc_allowed + * - rte_eth_dev_data.scattered_rx + * - rte_eth_dev_data.lro + * - conditions checked in ixgbe_rx_vec_condition_check() + * + * This means that the parameters above have to be configured prior to calling + * to this function. + * + * @dev rte_eth_dev handle + */ +void ixgbe_set_rx_function(struct rte_eth_dev *dev); + +uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev); +int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq); +void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq); + +#ifdef RTE_IXGBE_INC_VECTOR + +uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq); + +#endif /* RTE_IXGBE_INC_VECTOR */ +#endif /* _IXGBE_RXTX_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c b/drivers/net/ixgbe/ixgbe_rxtx_vec.c new file mode 100644 index 00000000..50407043 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c @@ -0,0 +1,833 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> + +#include "ixgbe_ethdev.h" +#include "ixgbe_rxtx.h" + +#include <tmmintrin.h> + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +static inline void +ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, + RTE_PKTMBUF_HEADROOM); + __m128i dma_addr0, dma_addr1; + + const __m128i hba_msk = _mm_set_epi64x(0, UINT64_MAX); + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (rte_mempool_get_bulk(rxq->mb_pool, + (void *)rxep, + RTE_IXGBE_RXQ_REARM_THRESH) < 0) { + if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + _mm_store_si128((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + RTE_IXGBE_RXQ_REARM_THRESH; + return; + } + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) { + __m128i vaddr0, vaddr1; + uintptr_t p0, p1; + + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + + /* + * Flush mbuf with pkt template. + * Data to be rearmed is 6 bytes long. + * Though, RX will overwrite ol_flags that are coming next + * anyway. So overwrite whole 8 bytes with one load: + * 6 bytes of rearm_data plus first 2 bytes of ol_flags. + */ + p0 = (uintptr_t)&mb0->rearm_data; + *(uint64_t *)p0 = rxq->mbuf_initializer; + p1 = (uintptr_t)&mb1->rearm_data; + *(uint64_t *)p1 = rxq->mbuf_initializer; + + /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ + vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr)); + vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr)); + + /* convert pa to dma_addr hdr/data */ + dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); + dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); + + /* add headroom to pa values */ + dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); + dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); + + /* set Header Buffer Address to zero */ + dma_addr0 = _mm_and_si128(dma_addr0, hba_msk); + dma_addr1 = _mm_and_si128(dma_addr1, hba_msk); + + /* flush desc with pa dma_addr */ + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0); + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1); + } + + rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH; + + rx_id = (uint16_t) ((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); +} + +/* Handling the offload flags (olflags) field takes computation + * time when receiving packets. Therefore we provide a flag to disable + * the processing of the olflags field when they are not needed. This + * gives improved performance, at the cost of losing the offload info + * in the received packet + */ +#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE + +#define VTAG_SHIFT (3) + +static inline void +desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) +{ + __m128i ptype0, ptype1, vtag0, vtag1; + union { + uint16_t e[4]; + uint64_t dword; + } vol; + + /* pkt type + vlan olflags mask */ + const __m128i pkttype_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT); + + /* mask everything except rss type */ + const __m128i rsstype_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + 0x000F, 0x000F, 0x000F, 0x000F); + + /* map rss type to rss hash flag */ + const __m128i rss_flags = _mm_set_epi8(PKT_RX_FDIR, 0, 0, 0, + 0, 0, 0, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0, + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0); + + ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]); + ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]); + vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]); + vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]); + + ptype0 = _mm_unpacklo_epi32(ptype0, ptype1); + ptype0 = _mm_and_si128(ptype0, rsstype_msk); + ptype0 = _mm_shuffle_epi8(rss_flags, ptype0); + + vtag1 = _mm_unpacklo_epi32(vtag0, vtag1); + vtag1 = _mm_srli_epi16(vtag1, VTAG_SHIFT); + vtag1 = _mm_and_si128(vtag1, pkttype_msk); + + vtag1 = _mm_or_si128(ptype0, vtag1); + vol.dword = _mm_cvtsi128_si64(vtag1); + + rx_pkts[0]->ol_flags = vol.e[0]; + rx_pkts[1]->ol_flags = vol.e[1]; + rx_pkts[2]->ol_flags = vol.e[2]; + rx_pkts[3]->ol_flags = vol.e[3]; +} +#else +#define desc_to_olflags_v(desc, rx_pkts) do {} while (0) +#endif + +/* + * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) + * + * Notice: + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST + * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + * - don't support ol_flags for rss and csum err + */ +static inline uint16_t +_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_entry *sw_ring; + uint16_t nb_pkts_recd; + int pos; + uint64_t var; + __m128i shuf_msk; + __m128i crc_adjust = _mm_set_epi16( + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0 /* ignore pkt_type field */ + ); + __m128i dd_check, eop_check; + + /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST); + + /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP); + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles */ + rxdp = rxq->rx_ring + rxq->rx_tail; + + _mm_prefetch((const void *)rxdp, _MM_HINT_T0); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act */ + if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH) + ixgbe_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available */ + if (!(rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + return 0; + + /* 4 packets DD mask */ + dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL); + + /* 4 packets EOP mask */ + eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL); + + /* mask to shuffle from desc. to mbuf */ + shuf_msk = _mm_set_epi8( + 7, 6, 5, 4, /* octet 4~7, 32bits rss */ + 15, 14, /* octet 14~15, low 16 bits vlan_macip */ + 13, 12, /* octet 12~13, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 13, 12, /* octet 12~13, low 16 bits pkt_len */ + 0xFF, 0xFF, /* skip 32 bit pkt_type */ + 0xFF, 0xFF + ); + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache */ + sw_ring = &rxq->sw_ring[rxq->rx_tail]; + + /* A. load 4 packet in one loop + * [A*. mask out 4 unused dirty field in desc] + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; + pos += RTE_IXGBE_DESCS_PER_LOOP, + rxdp += RTE_IXGBE_DESCS_PER_LOOP) { + __m128i descs[RTE_IXGBE_DESCS_PER_LOOP]; + __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + __m128i zero, staterr, sterr_tmp1, sterr_tmp2; + __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */ + + /* B.1 load 1 mbuf point */ + mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]); + + /* Read desc statuses backwards to avoid race condition */ + /* A.1 load 4 pkts desc */ + descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); + + /* B.2 copy 2 mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); + + /* B.1 load 1 mbuf point */ + mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]); + + descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); + /* B.1 load 2 mbuf point */ + descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); + descs[0] = _mm_loadu_si128((__m128i *)(rxdp)); + + /* B.2 copy 2 mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); + + if (split_packet) { + rte_prefetch0(&rx_pkts[pos]->cacheline1); + rte_prefetch0(&rx_pkts[pos + 1]->cacheline1); + rte_prefetch0(&rx_pkts[pos + 2]->cacheline1); + rte_prefetch0(&rx_pkts[pos + 3]->cacheline1); + } + + /* avoid compiler reorder optimization */ + rte_compiler_barrier(); + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk); + pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); + pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); + + /* C.1 4=>2 filter staterr info only */ + sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]); + /* C.1 4=>2 filter staterr info only */ + sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); + + /* set ol_flags with vlan packet type */ + desc_to_olflags_v(descs, &rx_pkts[pos]); + + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ + pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); + pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); + + /* C.2 get 4 pkts staterr value */ + zero = _mm_xor_si128(dd_check, dd_check); + staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); + + /* D.3 copy final 3,4 data to rx_pkts */ + _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1, + pkt_mb4); + _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1, + pkt_mb3); + + /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ + pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust); + pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust); + + /* C* extract and record EOP bit */ + if (split_packet) { + __m128i eop_shuf_mask = _mm_set_epi8( + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0x04, 0x0C, 0x00, 0x08 + ); + + /* and with mask to extract bits, flipping 1-0 */ + __m128i eop_bits = _mm_andnot_si128(staterr, eop_check); + /* the staterr values are not in order, as the count + * count of dd bits doesn't care. However, for end of + * packet tracking, we do care, so shuffle. This also + * compresses the 32-bit values to 8-bit */ + eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask); + /* store the resulting 32-bit value */ + *(int *)split_packet = _mm_cvtsi128_si32(eop_bits); + split_packet += RTE_IXGBE_DESCS_PER_LOOP; + + /* zero-out next pointers */ + rx_pkts[pos]->next = NULL; + rx_pkts[pos + 1]->next = NULL; + rx_pkts[pos + 2]->next = NULL; + rx_pkts[pos + 3]->next = NULL; + } + + /* C.3 calc available number of desc */ + staterr = _mm_and_si128(staterr, dd_check); + staterr = _mm_packs_epi32(staterr, zero); + + /* D.3 copy final 1,2 data to rx_pkts */ + _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1, + pkt_mb2); + _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); + + /* C.4 calc avaialbe number of desc */ + var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + nb_pkts_recd += var; + if (likely(var != RTE_IXGBE_DESCS_PER_LOOP)) + break; + } + + /* Update our internal tail pointer */ + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); + rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + +/* + * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) + * + * Notice: + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST + * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + * - don't support ol_flags for rss and csum err + */ +uint16_t +ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); +} + +static inline uint16_t +reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs, + uint16_t nb_bufs, uint8_t *split_flags) +{ + struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/ + struct rte_mbuf *start = rxq->pkt_first_seg; + struct rte_mbuf *end = rxq->pkt_last_seg; + unsigned pkt_idx, buf_idx; + + for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { + if (end != NULL) { + /* processing a split packet */ + end->next = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + + start->nb_segs++; + start->pkt_len += rx_bufs[buf_idx]->data_len; + end = end->next; + + if (!split_flags[buf_idx]) { + /* it's the last packet of the set */ + start->hash = end->hash; + start->ol_flags = end->ol_flags; + /* we need to strip crc for the whole packet */ + start->pkt_len -= rxq->crc_len; + if (end->data_len > rxq->crc_len) + end->data_len -= rxq->crc_len; + else { + /* free up last mbuf */ + struct rte_mbuf *secondlast = start; + + start->nb_segs--; + while (secondlast->next != end) + secondlast = secondlast->next; + secondlast->data_len -= (rxq->crc_len - + end->data_len); + secondlast->next = NULL; + rte_pktmbuf_free_seg(end); + end = secondlast; + } + pkts[pkt_idx++] = start; + start = end = NULL; + } + } else { + /* not processing a split packet */ + if (!split_flags[buf_idx]) { + /* not a split packet, save and skip */ + pkts[pkt_idx++] = rx_bufs[buf_idx]; + continue; + } + end = start = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + rx_bufs[buf_idx]->pkt_len += rxq->crc_len; + } + } + + /* save the partial packet for next time */ + rxq->pkt_first_seg = start; + rxq->pkt_last_seg = end; + memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); + return pkt_idx; +} + +/* + * vPMD receive routine that reassembles scattered packets + * + * Notice: + * - don't support ol_flags for rss and csum err + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST + * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + */ +uint16_t +ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + if (rxq->pkt_first_seg == NULL && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned i = 0; + if (rxq->pkt_first_seg == NULL) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +static inline void +vtx1(volatile union ixgbe_adv_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + __m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 | + flags | pkt->data_len, + pkt->buf_physaddr + pkt->data_off); + _mm_store_si128((__m128i *)&txdp->read, descriptor); +} + +static inline void +vtx(volatile union ixgbe_adv_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + int i; + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + vtx1(txdp, *pkt, flags); +} + +static inline int __attribute__((always_inline)) +ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) +{ + struct ixgbe_tx_entry_v *txep; + uint32_t status; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ]; + + /* check DD bit on threshold descriptor */ + status = txq->tx_ring[txq->tx_next_dd].wb.status; + if (!(status & IXGBE_ADVTXD_STAT_DD)) + return 0; + + n = txq->tx_rs_thresh; + + /* + * first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1) + */ + txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)]; + m = __rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = __rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) + free[nb_free++] = m; + else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = __rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m != NULL) + rte_mempool_put(m->pool, m); + } + } + + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + +static inline void __attribute__((always_inline)) +tx_backlog_entry(struct ixgbe_tx_entry_v *txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + for (i = 0; i < (int)nb_pkts; ++i) + txep[i].mbuf = tx_pkts[i]; +} + +uint16_t +ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; + volatile union ixgbe_adv_tx_desc *txdp; + struct ixgbe_tx_entry_v *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = DCMD_DTYP_FLAGS; + uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + ixgbe_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring_v[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + + tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + vtx1(txdp, *tx_pkts, flags); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &(txq->tx_ring[tx_id]); + txep = &txq->sw_ring_v[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |= + rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS); + txq->tx_next_rs = (uint16_t)(txq->tx_next_rs + + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail); + + return nb_pkts; +} + +static void __attribute__((cold)) +ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq) +{ + unsigned i; + struct ixgbe_tx_entry_v *txe; + const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1); + + if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc) + return; + + /* release the used mbufs in sw_ring */ + for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1); + i != txq->tx_tail; + i = (i + 1) & max_desc) { + txe = &txq->sw_ring_v[i]; + rte_pktmbuf_free_seg(txe->mbuf); + } + txq->nb_tx_free = max_desc; + + /* reset tx_entry */ + for (i = 0; i < txq->nb_tx_desc; i++) { + txe = &txq->sw_ring_v[i]; + txe->mbuf = NULL; + } +} + +void __attribute__((cold)) +ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq) +{ + const unsigned mask = rxq->nb_rx_desc - 1; + unsigned i; + + if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc) + return; + + /* free all mbufs that are valid in the ring */ + for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask) + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->rxrearm_nb = rxq->nb_rx_desc; + + /* set all entries to NULL */ + memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); +} + +static void __attribute__((cold)) +ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) +{ + if (txq == NULL) + return; + + if (txq->sw_ring != NULL) { + rte_free(txq->sw_ring_v - 1); + txq->sw_ring_v = NULL; + } +} + +static void __attribute__((cold)) +ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) +{ + static const union ixgbe_adv_tx_desc zeroed_desc = {{0}}; + struct ixgbe_tx_entry_v *txe = txq->sw_ring_v; + uint16_t i; + + /* Zero out HW ring memory */ + for (i = 0; i < txq->nb_tx_desc; i++) + txq->tx_ring[i] = zeroed_desc; + + /* Initialize SW ring entries */ + for (i = 0; i < txq->nb_tx_desc; i++) { + volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; + txd->wb.status = IXGBE_TXD_STAT_DD; + txe[i].mbuf = NULL; + } + + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + txq->tx_tail = 0; + txq->nb_tx_used = 0; + /* + * Always allow 1 descriptor to be un-allocated to avoid + * a H/W race condition + */ + txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); + txq->ctx_curr = 0; + memset((void *)&txq->ctx_cache, 0, + IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info)); +} + +static const struct ixgbe_txq_ops vec_txq_ops = { + .release_mbufs = ixgbe_tx_queue_release_mbufs_vec, + .free_swring = ixgbe_tx_free_swring, + .reset = ixgbe_reset_tx_queue, +}; + +int __attribute__((cold)) +ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq) +{ + uintptr_t p; + struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ + + mb_def.nb_segs = 1; + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = rxq->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; + return 0; +} + +int __attribute__((cold)) +ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq) +{ + if (txq->sw_ring_v == NULL) + return -1; + + /* leave the first one for overflow */ + txq->sw_ring_v = txq->sw_ring_v + 1; + txq->ops = &vec_txq_ops; + + return 0; +} + +int __attribute__((cold)) +ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) +{ +#ifndef RTE_LIBRTE_IEEE1588 + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; + +#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE + /* whithout rx ol_flags, no VP flag report */ + if (rxmode->hw_vlan_strip != 0 || + rxmode->hw_vlan_extend != 0) + return -1; +#endif + + /* no fdir support */ + if (fconf->mode != RTE_FDIR_MODE_NONE) + return -1; + + /* + * - no csum error report support + * - no header split support + */ + if (rxmode->hw_ip_checksum == 1 || + rxmode->header_split == 1) + return -1; + + return 0; +#else + RTE_SET_USED(dev); + return -1; +#endif +} diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map new file mode 100644 index 00000000..ef353984 --- /dev/null +++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map @@ -0,0 +1,4 @@ +DPDK_2.0 { + + local: *; +}; diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile new file mode 100644 index 00000000..d2f56927 --- /dev/null +++ b/drivers/net/mlx4/Makefile @@ -0,0 +1,126 @@ +# BSD LICENSE +# +# Copyright 2012-2015 6WIND S.A. +# Copyright 2012 Mellanox. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of 6WIND S.A. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# Library name. +LIB = librte_pmd_mlx4.a + +# Sources. +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4.c + +# Dependencies. +DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/librte_eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/librte_mempool + +# Basic CFLAGS. +CFLAGS += -O3 +CFLAGS += -std=gnu99 -Wall -Wextra +CFLAGS += -g +CFLAGS += -I. +CFLAGS += -D_XOPEN_SOURCE=600 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -libverbs + +# A few warnings cannot be avoided in external headers. +CFLAGS += -Wno-error=cast-qual + +EXPORT_MAP := rte_pmd_mlx4_version.map +LIBABIVER := 1 + +# DEBUG which is usually provided on the command-line may enable +# CONFIG_RTE_LIBRTE_MLX4_DEBUG. +ifeq ($(DEBUG),1) +CONFIG_RTE_LIBRTE_MLX4_DEBUG := y +endif + +# User-defined CFLAGS. +ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DEBUG),y) +CFLAGS += -pedantic -UNDEBUG -DPEDANTIC +else +CFLAGS += -DNDEBUG -UPEDANTIC +endif + +ifdef CONFIG_RTE_LIBRTE_MLX4_SGE_WR_N +CFLAGS += -DMLX4_PMD_SGE_WR_N=$(CONFIG_RTE_LIBRTE_MLX4_SGE_WR_N) +endif + +ifdef CONFIG_RTE_LIBRTE_MLX4_MAX_INLINE +CFLAGS += -DMLX4_PMD_MAX_INLINE=$(CONFIG_RTE_LIBRTE_MLX4_MAX_INLINE) +endif + +ifdef CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE +CFLAGS += -DMLX4_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE) +endif + +ifdef CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS +CFLAGS += -DMLX4_PMD_SOFT_COUNTERS=$(CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS) +endif + +include $(RTE_SDK)/mk/rte.lib.mk + +# Generate and clean-up mlx4_autoconf.h. + +export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS +export AUTO_CONFIG_CFLAGS = -Wno-error + +ifndef V +AUTOCONF_OUTPUT := >/dev/null +endif + +mlx4_autoconf.h: $(RTE_SDK)/scripts/auto-config-h.sh + $Q $(RM) -f -- '$@' + $Q sh -- '$<' '$@' \ + RSS_SUPPORT \ + infiniband/verbs.h \ + enum IBV_EXP_DEVICE_UD_RSS $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + INLINE_RECV \ + infiniband/verbs.h \ + enum IBV_EXP_DEVICE_ATTR_INLINE_RECV_SZ $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_EXP_QUERY_DEVICE \ + infiniband/verbs.h \ + type 'struct ibv_exp_device_attr' $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK \ + infiniband/verbs.h \ + enum IBV_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK \ + $(AUTOCONF_OUTPUT) + +mlx4.o: mlx4_autoconf.h + +clean_mlx4: FORCE + $Q rm -f -- mlx4_autoconf.h + +clean: clean_mlx4 diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c new file mode 100644 index 00000000..4f21dbe2 --- /dev/null +++ b/drivers/net/mlx4/mlx4.c @@ -0,0 +1,5820 @@ +/*- + * BSD LICENSE + * + * Copyright 2012-2015 6WIND S.A. + * Copyright 2012 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Known limitations: + * - RSS hash key and options cannot be modified. + * - Hardware counters aren't implemented. + */ + +/* System headers. */ +#include <stddef.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <inttypes.h> +#include <string.h> +#include <errno.h> +#include <unistd.h> +#include <limits.h> +#include <assert.h> +#include <arpa/inet.h> +#include <net/if.h> +#include <dirent.h> +#include <sys/ioctl.h> +#include <sys/socket.h> +#include <netinet/in.h> +#include <linux/if.h> +#include <linux/ethtool.h> +#include <linux/sockios.h> +#include <fcntl.h> + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <infiniband/verbs.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_dev.h> +#include <rte_mbuf.h> +#include <rte_errno.h> +#include <rte_mempool.h> +#include <rte_prefetch.h> +#include <rte_malloc.h> +#include <rte_spinlock.h> +#include <rte_atomic.h> +#include <rte_version.h> +#include <rte_log.h> +#include <rte_alarm.h> +#include <rte_memory.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +/* Generated configuration header. */ +#include "mlx4_autoconf.h" + +/* PMD header. */ +#include "mlx4.h" + +/* Runtime logging through RTE_LOG() is enabled when not in debugging mode. + * Intermediate LOG_*() macros add the required end-of-line characters. */ +#ifndef NDEBUG +#define INFO(...) DEBUG(__VA_ARGS__) +#define WARN(...) DEBUG(__VA_ARGS__) +#define ERROR(...) DEBUG(__VA_ARGS__) +#else +#define LOG__(level, m, ...) \ + RTE_LOG(level, PMD, MLX4_DRIVER_NAME ": " m "%c", __VA_ARGS__) +#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n') +#define INFO(...) LOG_(INFO, __VA_ARGS__) +#define WARN(...) LOG_(WARNING, __VA_ARGS__) +#define ERROR(...) LOG_(ERR, __VA_ARGS__) +#endif + +/* Convenience macros for accessing mbuf fields. */ +#define NEXT(m) ((m)->next) +#define DATA_LEN(m) ((m)->data_len) +#define PKT_LEN(m) ((m)->pkt_len) +#define DATA_OFF(m) ((m)->data_off) +#define SET_DATA_OFF(m, o) ((m)->data_off = (o)) +#define NB_SEGS(m) ((m)->nb_segs) +#define PORT(m) ((m)->port) + +/* Work Request ID data type (64 bit). */ +typedef union { + struct { + uint32_t id; + uint16_t offset; + } data; + uint64_t raw; +} wr_id_t; + +#define WR_ID(o) (((wr_id_t *)&(o))->data) + +/* Transpose flags. Useful to convert IBV to DPDK flags. */ +#define TRANSPOSE(val, from, to) \ + (((from) >= (to)) ? \ + (((val) & (from)) / ((from) / (to))) : \ + (((val) & (from)) * ((to) / (from)))) + +struct mlx4_rxq_stats { + unsigned int idx; /**< Mapping index. */ +#ifdef MLX4_PMD_SOFT_COUNTERS + uint64_t ipackets; /**< Total of successfully received packets. */ + uint64_t ibytes; /**< Total of successfully received bytes. */ +#endif + uint64_t idropped; /**< Total of packets dropped when RX ring full. */ + uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */ +}; + +struct mlx4_txq_stats { + unsigned int idx; /**< Mapping index. */ +#ifdef MLX4_PMD_SOFT_COUNTERS + uint64_t opackets; /**< Total of successfully sent packets. */ + uint64_t obytes; /**< Total of successfully sent bytes. */ +#endif + uint64_t odropped; /**< Total of packets not sent when TX ring full. */ +}; + +/* RX element (scattered packets). */ +struct rxq_elt_sp { + struct ibv_recv_wr wr; /* Work Request. */ + struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */ + struct rte_mbuf *bufs[MLX4_PMD_SGE_WR_N]; /* SGEs buffers. */ +}; + +/* RX element. */ +struct rxq_elt { + struct ibv_recv_wr wr; /* Work Request. */ + struct ibv_sge sge; /* Scatter/Gather Element. */ + /* mbuf pointer is derived from WR_ID(wr.wr_id).offset. */ +}; + +/* RX queue descriptor. */ +struct rxq { + struct priv *priv; /* Back pointer to private data. */ + struct rte_mempool *mp; /* Memory Pool for allocations. */ + struct ibv_mr *mr; /* Memory Region (for mp). */ + struct ibv_cq *cq; /* Completion Queue. */ + struct ibv_qp *qp; /* Queue Pair. */ + struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */ + struct ibv_exp_cq_family *if_cq; /* CQ interface. */ + /* + * Each VLAN ID requires a separate flow steering rule. + */ + BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES); + struct ibv_flow *mac_flow[MLX4_MAX_MAC_ADDRESSES][MLX4_MAX_VLAN_IDS]; + struct ibv_flow *promisc_flow; /* Promiscuous flow. */ + struct ibv_flow *allmulti_flow; /* Multicast flow. */ + unsigned int port_id; /* Port ID for incoming packets. */ + unsigned int elts_n; /* (*elts)[] length. */ + unsigned int elts_head; /* Current index in (*elts)[]. */ + union { + struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */ + struct rxq_elt (*no_sp)[]; /* RX elements. */ + } elts; + unsigned int sp:1; /* Use scattered RX elements. */ + unsigned int csum:1; /* Enable checksum offloading. */ + unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ + uint32_t mb_len; /* Length of a mp-issued mbuf. */ + struct mlx4_rxq_stats stats; /* RX queue counters. */ + unsigned int socket; /* CPU socket ID for allocations. */ + struct ibv_exp_res_domain *rd; /* Resource Domain. */ +}; + +/* TX element. */ +struct txq_elt { + struct rte_mbuf *buf; +}; + +/* Linear buffer type. It is used when transmitting buffers with too many + * segments that do not fit the hardware queue (see max_send_sge). + * Extra segments are copied (linearized) in such buffers, replacing the + * last SGE during TX. + * The size is arbitrary but large enough to hold a jumbo frame with + * 8 segments considering mbuf.buf_len is about 2048 bytes. */ +typedef uint8_t linear_t[16384]; + +/* TX queue descriptor. */ +struct txq { + struct priv *priv; /* Back pointer to private data. */ + struct { + const struct rte_mempool *mp; /* Cached Memory Pool. */ + struct ibv_mr *mr; /* Memory Region (for mp). */ + uint32_t lkey; /* mr->lkey */ + } mp2mr[MLX4_PMD_TX_MP_CACHE]; /* MP to MR translation table. */ + struct ibv_cq *cq; /* Completion Queue. */ + struct ibv_qp *qp; /* Queue Pair. */ + struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */ + struct ibv_exp_cq_family *if_cq; /* CQ interface. */ +#if MLX4_PMD_MAX_INLINE > 0 + uint32_t max_inline; /* Max inline send size <= MLX4_PMD_MAX_INLINE. */ +#endif + unsigned int elts_n; /* (*elts)[] length. */ + struct txq_elt (*elts)[]; /* TX elements. */ + unsigned int elts_head; /* Current index in (*elts)[]. */ + unsigned int elts_tail; /* First element awaiting completion. */ + unsigned int elts_comp; /* Number of completion requests. */ + unsigned int elts_comp_cd; /* Countdown for next completion request. */ + unsigned int elts_comp_cd_init; /* Initial value for countdown. */ + struct mlx4_txq_stats stats; /* TX queue counters. */ + linear_t (*elts_linear)[]; /* Linearized buffers. */ + struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */ + unsigned int socket; /* CPU socket ID for allocations. */ + struct ibv_exp_res_domain *rd; /* Resource Domain. */ +}; + +struct priv { + struct rte_eth_dev *dev; /* Ethernet device. */ + struct ibv_context *ctx; /* Verbs context. */ + struct ibv_device_attr device_attr; /* Device properties. */ + struct ibv_pd *pd; /* Protection Domain. */ + /* + * MAC addresses array and configuration bit-field. + * An extra entry that cannot be modified by the DPDK is reserved + * for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff). + */ + struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES]; + BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES); + /* VLAN filters. */ + struct { + unsigned int enabled:1; /* If enabled. */ + unsigned int id:12; /* VLAN ID (0-4095). */ + } vlan_filter[MLX4_MAX_VLAN_IDS]; /* VLAN filters table. */ + /* Device properties. */ + uint16_t mtu; /* Configured MTU. */ + uint8_t port; /* Physical port number. */ + unsigned int started:1; /* Device started, flows enabled. */ + unsigned int promisc:1; /* Device in promiscuous mode. */ + unsigned int allmulti:1; /* Device receives all multicast packets. */ + unsigned int hw_qpg:1; /* QP groups are supported. */ + unsigned int hw_tss:1; /* TSS is supported. */ + unsigned int hw_rss:1; /* RSS is supported. */ + unsigned int hw_csum:1; /* Checksum offload is supported. */ + unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */ + unsigned int rss:1; /* RSS is enabled. */ + unsigned int vf:1; /* This is a VF device. */ + unsigned int pending_alarm:1; /* An alarm is pending. */ +#ifdef INLINE_RECV + unsigned int inl_recv_size; /* Inline recv size */ +#endif + unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */ + /* RX/TX queues. */ + struct rxq rxq_parent; /* Parent queue when RSS is enabled. */ + unsigned int rxqs_n; /* RX queues array size. */ + unsigned int txqs_n; /* TX queues array size. */ + struct rxq *(*rxqs)[]; /* RX queues. */ + struct txq *(*txqs)[]; /* TX queues. */ + struct rte_intr_handle intr_handle; /* Interrupt handler. */ + rte_spinlock_t lock; /* Lock for control functions. */ +}; + +/* Local storage for secondary process data. */ +struct mlx4_secondary_data { + struct rte_eth_dev_data data; /* Local device data. */ + struct priv *primary_priv; /* Private structure from primary. */ + struct rte_eth_dev_data *shared_dev_data; /* Shared device data. */ + rte_spinlock_t lock; /* Port configuration lock. */ +} mlx4_secondary_data[RTE_MAX_ETHPORTS]; + +/** + * Check if running as a secondary process. + * + * @return + * Nonzero if running as a secondary process. + */ +static inline int +mlx4_is_secondary(void) +{ + return rte_eal_process_type() != RTE_PROC_PRIMARY; +} + +/** + * Return private structure associated with an Ethernet device. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * Pointer to private structure. + */ +static struct priv * +mlx4_get_priv(struct rte_eth_dev *dev) +{ + struct mlx4_secondary_data *sd; + + if (!mlx4_is_secondary()) + return dev->data->dev_private; + sd = &mlx4_secondary_data[dev->data->port_id]; + return sd->data.dev_private; +} + +/** + * Lock private structure to protect it from concurrent access in the + * control path. + * + * @param priv + * Pointer to private structure. + */ +static void +priv_lock(struct priv *priv) +{ + rte_spinlock_lock(&priv->lock); +} + +/** + * Unlock private structure. + * + * @param priv + * Pointer to private structure. + */ +static void +priv_unlock(struct priv *priv) +{ + rte_spinlock_unlock(&priv->lock); +} + +/* Allocate a buffer on the stack and fill it with a printf format string. */ +#define MKSTR(name, ...) \ + char name[snprintf(NULL, 0, __VA_ARGS__) + 1]; \ + \ + snprintf(name, sizeof(name), __VA_ARGS__) + +/** + * Get interface name from private structure. + * + * @param[in] priv + * Pointer to private structure. + * @param[out] ifname + * Interface name output buffer. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]) +{ + DIR *dir; + struct dirent *dent; + unsigned int dev_type = 0; + unsigned int dev_port_prev = ~0u; + char match[IF_NAMESIZE] = ""; + + { + MKSTR(path, "%s/device/net", priv->ctx->device->ibdev_path); + + dir = opendir(path); + if (dir == NULL) + return -1; + } + while ((dent = readdir(dir)) != NULL) { + char *name = dent->d_name; + FILE *file; + unsigned int dev_port; + int r; + + if ((name[0] == '.') && + ((name[1] == '\0') || + ((name[1] == '.') && (name[2] == '\0')))) + continue; + + MKSTR(path, "%s/device/net/%s/%s", + priv->ctx->device->ibdev_path, name, + (dev_type ? "dev_id" : "dev_port")); + + file = fopen(path, "rb"); + if (file == NULL) { + if (errno != ENOENT) + continue; + /* + * Switch to dev_id when dev_port does not exist as + * is the case with Linux kernel versions < 3.15. + */ +try_dev_id: + match[0] = '\0'; + if (dev_type) + break; + dev_type = 1; + dev_port_prev = ~0u; + rewinddir(dir); + continue; + } + r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port); + fclose(file); + if (r != 1) + continue; + /* + * Switch to dev_id when dev_port returns the same value for + * all ports. May happen when using a MOFED release older than + * 3.0 with a Linux kernel >= 3.15. + */ + if (dev_port == dev_port_prev) + goto try_dev_id; + dev_port_prev = dev_port; + if (dev_port == (priv->port - 1u)) + snprintf(match, sizeof(match), "%s", name); + } + closedir(dir); + if (match[0] == '\0') + return -1; + strncpy(*ifname, match, sizeof(*ifname)); + return 0; +} + +/** + * Read from sysfs entry. + * + * @param[in] priv + * Pointer to private structure. + * @param[in] entry + * Entry name relative to sysfs path. + * @param[out] buf + * Data output buffer. + * @param size + * Buffer size. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_sysfs_read(const struct priv *priv, const char *entry, + char *buf, size_t size) +{ + char ifname[IF_NAMESIZE]; + FILE *file; + int ret; + int err; + + if (priv_get_ifname(priv, &ifname)) + return -1; + + MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path, + ifname, entry); + + file = fopen(path, "rb"); + if (file == NULL) + return -1; + ret = fread(buf, 1, size, file); + err = errno; + if (((size_t)ret < size) && (ferror(file))) + ret = -1; + else + ret = size; + fclose(file); + errno = err; + return ret; +} + +/** + * Write to sysfs entry. + * + * @param[in] priv + * Pointer to private structure. + * @param[in] entry + * Entry name relative to sysfs path. + * @param[in] buf + * Data buffer. + * @param size + * Buffer size. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_sysfs_write(const struct priv *priv, const char *entry, + char *buf, size_t size) +{ + char ifname[IF_NAMESIZE]; + FILE *file; + int ret; + int err; + + if (priv_get_ifname(priv, &ifname)) + return -1; + + MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path, + ifname, entry); + + file = fopen(path, "wb"); + if (file == NULL) + return -1; + ret = fwrite(buf, 1, size, file); + err = errno; + if (((size_t)ret < size) || (ferror(file))) + ret = -1; + else + ret = size; + fclose(file); + errno = err; + return ret; +} + +/** + * Get unsigned long sysfs property. + * + * @param priv + * Pointer to private structure. + * @param[in] name + * Entry name relative to sysfs path. + * @param[out] value + * Value output buffer. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value) +{ + int ret; + unsigned long value_ret; + char value_str[32]; + + ret = priv_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1)); + if (ret == -1) { + DEBUG("cannot read %s value from sysfs: %s", + name, strerror(errno)); + return -1; + } + value_str[ret] = '\0'; + errno = 0; + value_ret = strtoul(value_str, NULL, 0); + if (errno) { + DEBUG("invalid %s value `%s': %s", name, value_str, + strerror(errno)); + return -1; + } + *value = value_ret; + return 0; +} + +/** + * Set unsigned long sysfs property. + * + * @param priv + * Pointer to private structure. + * @param[in] name + * Entry name relative to sysfs path. + * @param value + * Value to set. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value) +{ + int ret; + MKSTR(value_str, "%lu", value); + + ret = priv_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1)); + if (ret == -1) { + DEBUG("cannot write %s `%s' (%lu) to sysfs: %s", + name, value_str, value, strerror(errno)); + return -1; + } + return 0; +} + +/** + * Perform ifreq ioctl() on associated Ethernet device. + * + * @param[in] priv + * Pointer to private structure. + * @param req + * Request number to pass to ioctl(). + * @param[out] ifr + * Interface request structure output buffer. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr) +{ + int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); + int ret = -1; + + if (sock == -1) + return ret; + if (priv_get_ifname(priv, &ifr->ifr_name) == 0) + ret = ioctl(sock, req, ifr); + close(sock); + return ret; +} + +/** + * Get device MTU. + * + * @param priv + * Pointer to private structure. + * @param[out] mtu + * MTU value output buffer. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_get_mtu(struct priv *priv, uint16_t *mtu) +{ + unsigned long ulong_mtu; + + if (priv_get_sysfs_ulong(priv, "mtu", &ulong_mtu) == -1) + return -1; + *mtu = ulong_mtu; + return 0; +} + +/** + * Set device MTU. + * + * @param priv + * Pointer to private structure. + * @param mtu + * MTU value to set. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_set_mtu(struct priv *priv, uint16_t mtu) +{ + return priv_set_sysfs_ulong(priv, "mtu", mtu); +} + +/** + * Set device flags. + * + * @param priv + * Pointer to private structure. + * @param keep + * Bitmask for flags that must remain untouched. + * @param flags + * Bitmask for flags to modify. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) +{ + unsigned long tmp; + + if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1) + return -1; + tmp &= keep; + tmp |= flags; + return priv_set_sysfs_ulong(priv, "flags", tmp); +} + +/* Device configuration. */ + +static int +txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf); + +static void +txq_cleanup(struct txq *txq); + +static int +rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, + unsigned int socket, int inactive, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); + +static void +rxq_cleanup(struct rxq *rxq); + +/** + * Ethernet device configuration. + * + * Prepare the driver for a given number of TX and RX queues. + * Allocate parent RSS queue when several RX queues are requested. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, errno value on failure. + */ +static int +dev_configure(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + unsigned int rxqs_n = dev->data->nb_rx_queues; + unsigned int txqs_n = dev->data->nb_tx_queues; + unsigned int tmp; + int ret; + + priv->rxqs = (void *)dev->data->rx_queues; + priv->txqs = (void *)dev->data->tx_queues; + if (txqs_n != priv->txqs_n) { + INFO("%p: TX queues number update: %u -> %u", + (void *)dev, priv->txqs_n, txqs_n); + priv->txqs_n = txqs_n; + } + if (rxqs_n == priv->rxqs_n) + return 0; + if (!rte_is_power_of_2(rxqs_n)) { + unsigned n_active; + + n_active = rte_align32pow2(rxqs_n + 1) >> 1; + WARN("%p: number of RX queues must be a power" + " of 2: %u queues among %u will be active", + (void *)dev, n_active, rxqs_n); + } + + INFO("%p: RX queues number update: %u -> %u", + (void *)dev, priv->rxqs_n, rxqs_n); + /* If RSS is enabled, disable it first. */ + if (priv->rss) { + unsigned int i; + + /* Only if there are no remaining child RX queues. */ + for (i = 0; (i != priv->rxqs_n); ++i) + if ((*priv->rxqs)[i] != NULL) + return EINVAL; + rxq_cleanup(&priv->rxq_parent); + priv->rss = 0; + priv->rxqs_n = 0; + } + if (rxqs_n <= 1) { + /* Nothing else to do. */ + priv->rxqs_n = rxqs_n; + return 0; + } + /* Allocate a new RSS parent queue if supported by hardware. */ + if (!priv->hw_rss) { + ERROR("%p: only a single RX queue can be configured when" + " hardware doesn't support RSS", + (void *)dev); + return EINVAL; + } + /* Fail if hardware doesn't support that many RSS queues. */ + if (rxqs_n >= priv->max_rss_tbl_sz) { + ERROR("%p: only %u RX queues can be configured for RSS", + (void *)dev, priv->max_rss_tbl_sz); + return EINVAL; + } + priv->rss = 1; + tmp = priv->rxqs_n; + priv->rxqs_n = rxqs_n; + ret = rxq_setup(dev, &priv->rxq_parent, 0, 0, 0, NULL, NULL); + if (!ret) + return 0; + /* Failure, rollback. */ + priv->rss = 0; + priv->rxqs_n = tmp; + assert(ret > 0); + return ret; +} + +/** + * DPDK callback for Ethernet device configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mlx4_dev_configure(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + int ret; + + if (mlx4_is_secondary()) + return -E_RTE_SECONDARY; + priv_lock(priv); + ret = dev_configure(dev); + assert(ret >= 0); + priv_unlock(priv); + return -ret; +} + +static uint16_t mlx4_tx_burst(void *, struct rte_mbuf **, uint16_t); +static uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t); + +/** + * Configure secondary process queues from a private data pointer (primary + * or secondary) and update burst callbacks. Can take place only once. + * + * All queues must have been previously created by the primary process to + * avoid undefined behavior. + * + * @param priv + * Private data pointer from either primary or secondary process. + * + * @return + * Private data pointer from secondary process, NULL in case of error. + */ +static struct priv * +mlx4_secondary_data_setup(struct priv *priv) +{ + unsigned int port_id = 0; + struct mlx4_secondary_data *sd; + void **tx_queues; + void **rx_queues; + unsigned int nb_tx_queues; + unsigned int nb_rx_queues; + unsigned int i; + + /* priv must be valid at this point. */ + assert(priv != NULL); + /* priv->dev must also be valid but may point to local memory from + * another process, possibly with the same address and must not + * be dereferenced yet. */ + assert(priv->dev != NULL); + /* Determine port ID by finding out where priv comes from. */ + while (1) { + sd = &mlx4_secondary_data[port_id]; + rte_spinlock_lock(&sd->lock); + /* Primary process? */ + if (sd->primary_priv == priv) + break; + /* Secondary process? */ + if (sd->data.dev_private == priv) + break; + rte_spinlock_unlock(&sd->lock); + if (++port_id == RTE_DIM(mlx4_secondary_data)) + port_id = 0; + } + /* Switch to secondary private structure. If private data has already + * been updated by another thread, there is nothing else to do. */ + priv = sd->data.dev_private; + if (priv->dev->data == &sd->data) + goto end; + /* Sanity checks. Secondary private structure is supposed to point + * to local eth_dev, itself still pointing to the shared device data + * structure allocated by the primary process. */ + assert(sd->shared_dev_data != &sd->data); + assert(sd->data.nb_tx_queues == 0); + assert(sd->data.tx_queues == NULL); + assert(sd->data.nb_rx_queues == 0); + assert(sd->data.rx_queues == NULL); + assert(priv != sd->primary_priv); + assert(priv->dev->data == sd->shared_dev_data); + assert(priv->txqs_n == 0); + assert(priv->txqs == NULL); + assert(priv->rxqs_n == 0); + assert(priv->rxqs == NULL); + nb_tx_queues = sd->shared_dev_data->nb_tx_queues; + nb_rx_queues = sd->shared_dev_data->nb_rx_queues; + /* Allocate local storage for queues. */ + tx_queues = rte_zmalloc("secondary ethdev->tx_queues", + sizeof(sd->data.tx_queues[0]) * nb_tx_queues, + RTE_CACHE_LINE_SIZE); + rx_queues = rte_zmalloc("secondary ethdev->rx_queues", + sizeof(sd->data.rx_queues[0]) * nb_rx_queues, + RTE_CACHE_LINE_SIZE); + if (tx_queues == NULL || rx_queues == NULL) + goto error; + /* Lock to prevent control operations during setup. */ + priv_lock(priv); + /* TX queues. */ + for (i = 0; i != nb_tx_queues; ++i) { + struct txq *primary_txq = (*sd->primary_priv->txqs)[i]; + struct txq *txq; + + if (primary_txq == NULL) + continue; + txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, + primary_txq->socket); + if (txq != NULL) { + if (txq_setup(priv->dev, + txq, + primary_txq->elts_n * MLX4_PMD_SGE_WR_N, + primary_txq->socket, + NULL) == 0) { + txq->stats.idx = primary_txq->stats.idx; + tx_queues[i] = txq; + continue; + } + rte_free(txq); + } + while (i) { + txq = tx_queues[--i]; + txq_cleanup(txq); + rte_free(txq); + } + goto error; + } + /* RX queues. */ + for (i = 0; i != nb_rx_queues; ++i) { + struct rxq *primary_rxq = (*sd->primary_priv->rxqs)[i]; + + if (primary_rxq == NULL) + continue; + /* Not supported yet. */ + rx_queues[i] = NULL; + } + /* Update everything. */ + priv->txqs = (void *)tx_queues; + priv->txqs_n = nb_tx_queues; + priv->rxqs = (void *)rx_queues; + priv->rxqs_n = nb_rx_queues; + sd->data.rx_queues = rx_queues; + sd->data.tx_queues = tx_queues; + sd->data.nb_rx_queues = nb_rx_queues; + sd->data.nb_tx_queues = nb_tx_queues; + sd->data.dev_link = sd->shared_dev_data->dev_link; + sd->data.mtu = sd->shared_dev_data->mtu; + memcpy(sd->data.rx_queue_state, sd->shared_dev_data->rx_queue_state, + sizeof(sd->data.rx_queue_state)); + memcpy(sd->data.tx_queue_state, sd->shared_dev_data->tx_queue_state, + sizeof(sd->data.tx_queue_state)); + sd->data.dev_flags = sd->shared_dev_data->dev_flags; + /* Use local data from now on. */ + rte_mb(); + priv->dev->data = &sd->data; + rte_mb(); + priv->dev->tx_pkt_burst = mlx4_tx_burst; + priv->dev->rx_pkt_burst = removed_rx_burst; + priv_unlock(priv); +end: + /* More sanity checks. */ + assert(priv->dev->tx_pkt_burst == mlx4_tx_burst); + assert(priv->dev->rx_pkt_burst == removed_rx_burst); + assert(priv->dev->data == &sd->data); + rte_spinlock_unlock(&sd->lock); + return priv; +error: + priv_unlock(priv); + rte_free(tx_queues); + rte_free(rx_queues); + rte_spinlock_unlock(&sd->lock); + return NULL; +} + +/* TX queues handling. */ + +/** + * Allocate TX queue elements. + * + * @param txq + * Pointer to TX queue structure. + * @param elts_n + * Number of elements to allocate. + * + * @return + * 0 on success, errno value on failure. + */ +static int +txq_alloc_elts(struct txq *txq, unsigned int elts_n) +{ + unsigned int i; + struct txq_elt (*elts)[elts_n] = + rte_calloc_socket("TXQ", 1, sizeof(*elts), 0, txq->socket); + linear_t (*elts_linear)[elts_n] = + rte_calloc_socket("TXQ", 1, sizeof(*elts_linear), 0, + txq->socket); + struct ibv_mr *mr_linear = NULL; + int ret = 0; + + if ((elts == NULL) || (elts_linear == NULL)) { + ERROR("%p: can't allocate packets array", (void *)txq); + ret = ENOMEM; + goto error; + } + mr_linear = + ibv_reg_mr(txq->priv->pd, elts_linear, sizeof(*elts_linear), + (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); + if (mr_linear == NULL) { + ERROR("%p: unable to configure MR, ibv_reg_mr() failed", + (void *)txq); + ret = EINVAL; + goto error; + } + for (i = 0; (i != elts_n); ++i) { + struct txq_elt *elt = &(*elts)[i]; + + elt->buf = NULL; + } + DEBUG("%p: allocated and configured %u WRs", (void *)txq, elts_n); + txq->elts_n = elts_n; + txq->elts = elts; + txq->elts_head = 0; + txq->elts_tail = 0; + txq->elts_comp = 0; + /* Request send completion every MLX4_PMD_TX_PER_COMP_REQ packets or + * at least 4 times per ring. */ + txq->elts_comp_cd_init = + ((MLX4_PMD_TX_PER_COMP_REQ < (elts_n / 4)) ? + MLX4_PMD_TX_PER_COMP_REQ : (elts_n / 4)); + txq->elts_comp_cd = txq->elts_comp_cd_init; + txq->elts_linear = elts_linear; + txq->mr_linear = mr_linear; + assert(ret == 0); + return 0; +error: + if (mr_linear != NULL) + claim_zero(ibv_dereg_mr(mr_linear)); + + rte_free(elts_linear); + rte_free(elts); + + DEBUG("%p: failed, freed everything", (void *)txq); + assert(ret > 0); + return ret; +} + +/** + * Free TX queue elements. + * + * @param txq + * Pointer to TX queue structure. + */ +static void +txq_free_elts(struct txq *txq) +{ + unsigned int elts_n = txq->elts_n; + unsigned int elts_head = txq->elts_head; + unsigned int elts_tail = txq->elts_tail; + struct txq_elt (*elts)[elts_n] = txq->elts; + linear_t (*elts_linear)[elts_n] = txq->elts_linear; + struct ibv_mr *mr_linear = txq->mr_linear; + + DEBUG("%p: freeing WRs", (void *)txq); + txq->elts_n = 0; + txq->elts_head = 0; + txq->elts_tail = 0; + txq->elts_comp = 0; + txq->elts_comp_cd = 0; + txq->elts_comp_cd_init = 0; + txq->elts = NULL; + txq->elts_linear = NULL; + txq->mr_linear = NULL; + if (mr_linear != NULL) + claim_zero(ibv_dereg_mr(mr_linear)); + + rte_free(elts_linear); + if (elts == NULL) + return; + while (elts_tail != elts_head) { + struct txq_elt *elt = &(*elts)[elts_tail]; + + assert(elt->buf != NULL); + rte_pktmbuf_free(elt->buf); +#ifndef NDEBUG + /* Poisoning. */ + memset(elt, 0x77, sizeof(*elt)); +#endif + if (++elts_tail == elts_n) + elts_tail = 0; + } + rte_free(elts); +} + + +/** + * Clean up a TX queue. + * + * Destroy objects, free allocated memory and reset the structure for reuse. + * + * @param txq + * Pointer to TX queue structure. + */ +static void +txq_cleanup(struct txq *txq) +{ + struct ibv_exp_release_intf_params params; + size_t i; + + DEBUG("cleaning up %p", (void *)txq); + txq_free_elts(txq); + if (txq->if_qp != NULL) { + assert(txq->priv != NULL); + assert(txq->priv->ctx != NULL); + assert(txq->qp != NULL); + params = (struct ibv_exp_release_intf_params){ + .comp_mask = 0, + }; + claim_zero(ibv_exp_release_intf(txq->priv->ctx, + txq->if_qp, + ¶ms)); + } + if (txq->if_cq != NULL) { + assert(txq->priv != NULL); + assert(txq->priv->ctx != NULL); + assert(txq->cq != NULL); + params = (struct ibv_exp_release_intf_params){ + .comp_mask = 0, + }; + claim_zero(ibv_exp_release_intf(txq->priv->ctx, + txq->if_cq, + ¶ms)); + } + if (txq->qp != NULL) + claim_zero(ibv_destroy_qp(txq->qp)); + if (txq->cq != NULL) + claim_zero(ibv_destroy_cq(txq->cq)); + if (txq->rd != NULL) { + struct ibv_exp_destroy_res_domain_attr attr = { + .comp_mask = 0, + }; + + assert(txq->priv != NULL); + assert(txq->priv->ctx != NULL); + claim_zero(ibv_exp_destroy_res_domain(txq->priv->ctx, + txq->rd, + &attr)); + } + for (i = 0; (i != elemof(txq->mp2mr)); ++i) { + if (txq->mp2mr[i].mp == NULL) + break; + assert(txq->mp2mr[i].mr != NULL); + claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr)); + } + memset(txq, 0, sizeof(*txq)); +} + +/** + * Manage TX completions. + * + * When sending a burst, mlx4_tx_burst() posts several WRs. + * To improve performance, a completion event is only required once every + * MLX4_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information + * for other WRs, but this information would not be used anyway. + * + * @param txq + * Pointer to TX queue structure. + * + * @return + * 0 on success, -1 on failure. + */ +static int +txq_complete(struct txq *txq) +{ + unsigned int elts_comp = txq->elts_comp; + unsigned int elts_tail = txq->elts_tail; + const unsigned int elts_n = txq->elts_n; + int wcs_n; + + if (unlikely(elts_comp == 0)) + return 0; +#ifdef DEBUG_SEND + DEBUG("%p: processing %u work requests completions", + (void *)txq, elts_comp); +#endif + wcs_n = txq->if_cq->poll_cnt(txq->cq, elts_comp); + if (unlikely(wcs_n == 0)) + return 0; + if (unlikely(wcs_n < 0)) { + DEBUG("%p: ibv_poll_cq() failed (wcs_n=%d)", + (void *)txq, wcs_n); + return -1; + } + elts_comp -= wcs_n; + assert(elts_comp <= txq->elts_comp); + /* + * Assume WC status is successful as nothing can be done about it + * anyway. + */ + elts_tail += wcs_n * txq->elts_comp_cd_init; + if (elts_tail >= elts_n) + elts_tail -= elts_n; + txq->elts_tail = elts_tail; + txq->elts_comp = elts_comp; + return 0; +} + +/* For best performance, this function should not be inlined. */ +static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, const struct rte_mempool *) + __attribute__((noinline)); + +/** + * Register mempool as a memory region. + * + * @param pd + * Pointer to protection domain. + * @param mp + * Pointer to memory pool. + * + * @return + * Memory region pointer, NULL in case of error. + */ +static struct ibv_mr * +mlx4_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp) +{ + const struct rte_memseg *ms = rte_eal_get_physmem_layout(); + uintptr_t start = mp->elt_va_start; + uintptr_t end = mp->elt_va_end; + unsigned int i; + + DEBUG("mempool %p area start=%p end=%p size=%zu", + (const void *)mp, (void *)start, (void *)end, + (size_t)(end - start)); + /* Round start and end to page boundary if found in memory segments. */ + for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { + uintptr_t addr = (uintptr_t)ms[i].addr; + size_t len = ms[i].len; + unsigned int align = ms[i].hugepage_sz; + + if ((start > addr) && (start < addr + len)) + start = RTE_ALIGN_FLOOR(start, align); + if ((end > addr) && (end < addr + len)) + end = RTE_ALIGN_CEIL(end, align); + } + DEBUG("mempool %p using start=%p end=%p size=%zu for MR", + (const void *)mp, (void *)start, (void *)end, + (size_t)(end - start)); + return ibv_reg_mr(pd, + (void *)start, + end - start, + IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); +} + +/** + * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which + * the cloned mbuf is allocated is returned instead. + * + * @param buf + * Pointer to mbuf. + * + * @return + * Memory pool where data is located for given mbuf. + */ +static struct rte_mempool * +txq_mb2mp(struct rte_mbuf *buf) +{ + if (unlikely(RTE_MBUF_INDIRECT(buf))) + return rte_mbuf_from_indirect(buf)->pool; + return buf->pool; +} + +/** + * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[]. + * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full, + * remove an entry first. + * + * @param txq + * Pointer to TX queue structure. + * @param[in] mp + * Memory Pool for which a Memory Region lkey must be returned. + * + * @return + * mr->lkey on success, (uint32_t)-1 on failure. + */ +static uint32_t +txq_mp2mr(struct txq *txq, const struct rte_mempool *mp) +{ + unsigned int i; + struct ibv_mr *mr; + + for (i = 0; (i != elemof(txq->mp2mr)); ++i) { + if (unlikely(txq->mp2mr[i].mp == NULL)) { + /* Unknown MP, add a new MR for it. */ + break; + } + if (txq->mp2mr[i].mp == mp) { + assert(txq->mp2mr[i].lkey != (uint32_t)-1); + assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey); + return txq->mp2mr[i].lkey; + } + } + /* Add a new entry, register MR first. */ + DEBUG("%p: discovered new memory pool \"%s\" (%p)", + (void *)txq, mp->name, (const void *)mp); + mr = mlx4_mp2mr(txq->priv->pd, mp); + if (unlikely(mr == NULL)) { + DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", + (void *)txq); + return (uint32_t)-1; + } + if (unlikely(i == elemof(txq->mp2mr))) { + /* Table is full, remove oldest entry. */ + DEBUG("%p: MR <-> MP table full, dropping oldest entry.", + (void *)txq); + --i; + claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr)); + memmove(&txq->mp2mr[0], &txq->mp2mr[1], + (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); + } + /* Store the new entry. */ + txq->mp2mr[i].mp = mp; + txq->mp2mr[i].mr = mr; + txq->mp2mr[i].lkey = mr->lkey; + DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, + (void *)txq, mp->name, (const void *)mp, txq->mp2mr[i].lkey); + return txq->mp2mr[i].lkey; +} + +struct txq_mp2mr_mbuf_check_data { + const struct rte_mempool *mp; + int ret; +}; + +/** + * Callback function for rte_mempool_obj_iter() to check whether a given + * mempool object looks like a mbuf. + * + * @param[in, out] arg + * Context data (struct txq_mp2mr_mbuf_check_data). Contains mempool pointer + * and return value. + * @param[in] start + * Object start address. + * @param[in] end + * Object end address. + * @param index + * Unused. + * + * @return + * Nonzero value when object is not a mbuf. + */ +static void +txq_mp2mr_mbuf_check(void *arg, void *start, void *end, + uint32_t index __rte_unused) +{ + struct txq_mp2mr_mbuf_check_data *data = arg; + struct rte_mbuf *buf = + (void *)((uintptr_t)start + data->mp->header_size); + + (void)index; + /* Check whether mbuf structure fits element size and whether mempool + * pointer is valid. */ + if (((uintptr_t)end >= (uintptr_t)(buf + 1)) && + (buf->pool == data->mp)) + data->ret = 0; + else + data->ret = -1; +} + +/** + * Iterator function for rte_mempool_walk() to register existing mempools and + * fill the MP to MR cache of a TX queue. + * + * @param[in] mp + * Memory Pool to register. + * @param *arg + * Pointer to TX queue structure. + */ +static void +txq_mp2mr_iter(const struct rte_mempool *mp, void *arg) +{ + struct txq *txq = arg; + struct txq_mp2mr_mbuf_check_data data = { + .mp = mp, + .ret = -1, + }; + + /* Discard empty mempools. */ + if (mp->size == 0) + return; + /* Register mempool only if the first element looks like a mbuf. */ + rte_mempool_obj_iter((void *)mp->elt_va_start, + 1, + mp->header_size + mp->elt_size + mp->trailer_size, + 1, + mp->elt_pa, + mp->pg_num, + mp->pg_shift, + txq_mp2mr_mbuf_check, + &data); + if (data.ret) + return; + txq_mp2mr(txq, mp); +} + +#if MLX4_PMD_SGE_WR_N > 1 + +/** + * Copy scattered mbuf contents to a single linear buffer. + * + * @param[out] linear + * Linear output buffer. + * @param[in] buf + * Scattered input buffer. + * + * @return + * Number of bytes copied to the output buffer or 0 if not large enough. + */ +static unsigned int +linearize_mbuf(linear_t *linear, struct rte_mbuf *buf) +{ + unsigned int size = 0; + unsigned int offset; + + do { + unsigned int len = DATA_LEN(buf); + + offset = size; + size += len; + if (unlikely(size > sizeof(*linear))) + return 0; + memcpy(&(*linear)[offset], + rte_pktmbuf_mtod(buf, uint8_t *), + len); + buf = NEXT(buf); + } while (buf != NULL); + return size; +} + +/** + * Handle scattered buffers for mlx4_tx_burst(). + * + * @param txq + * TX queue structure. + * @param segs + * Number of segments in buf. + * @param elt + * TX queue element to fill. + * @param[in] buf + * Buffer to process. + * @param elts_head + * Index of the linear buffer to use if necessary (normally txq->elts_head). + * @param[out] sges + * Array filled with SGEs on success. + * + * @return + * A structure containing the processed packet size in bytes and the + * number of SGEs. Both fields are set to (unsigned int)-1 in case of + * failure. + */ +static struct tx_burst_sg_ret { + unsigned int length; + unsigned int num; +} +tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt, + struct rte_mbuf *buf, unsigned int elts_head, + struct ibv_sge (*sges)[MLX4_PMD_SGE_WR_N]) +{ + unsigned int sent_size = 0; + unsigned int j; + int linearize = 0; + + /* When there are too many segments, extra segments are + * linearized in the last SGE. */ + if (unlikely(segs > elemof(*sges))) { + segs = (elemof(*sges) - 1); + linearize = 1; + } + /* Update element. */ + elt->buf = buf; + /* Register segments as SGEs. */ + for (j = 0; (j != segs); ++j) { + struct ibv_sge *sge = &(*sges)[j]; + uint32_t lkey; + + /* Retrieve Memory Region key for this memory pool. */ + lkey = txq_mp2mr(txq, txq_mb2mp(buf)); + if (unlikely(lkey == (uint32_t)-1)) { + /* MR does not exist. */ + DEBUG("%p: unable to get MP <-> MR association", + (void *)txq); + /* Clean up TX element. */ + elt->buf = NULL; + goto stop; + } + /* Update SGE. */ + sge->addr = rte_pktmbuf_mtod(buf, uintptr_t); + if (txq->priv->vf) + rte_prefetch0((volatile void *) + (uintptr_t)sge->addr); + sge->length = DATA_LEN(buf); + sge->lkey = lkey; + sent_size += sge->length; + buf = NEXT(buf); + } + /* If buf is not NULL here and is not going to be linearized, + * nb_segs is not valid. */ + assert(j == segs); + assert((buf == NULL) || (linearize)); + /* Linearize extra segments. */ + if (linearize) { + struct ibv_sge *sge = &(*sges)[segs]; + linear_t *linear = &(*txq->elts_linear)[elts_head]; + unsigned int size = linearize_mbuf(linear, buf); + + assert(segs == (elemof(*sges) - 1)); + if (size == 0) { + /* Invalid packet. */ + DEBUG("%p: packet too large to be linearized.", + (void *)txq); + /* Clean up TX element. */ + elt->buf = NULL; + goto stop; + } + /* If MLX4_PMD_SGE_WR_N is 1, free mbuf immediately. */ + if (elemof(*sges) == 1) { + do { + struct rte_mbuf *next = NEXT(buf); + + rte_pktmbuf_free_seg(buf); + buf = next; + } while (buf != NULL); + elt->buf = NULL; + } + /* Update SGE. */ + sge->addr = (uintptr_t)&(*linear)[0]; + sge->length = size; + sge->lkey = txq->mr_linear->lkey; + sent_size += size; + /* Include last segment. */ + segs++; + } + return (struct tx_burst_sg_ret){ + .length = sent_size, + .num = segs, + }; +stop: + return (struct tx_burst_sg_ret){ + .length = -1, + .num = -1, + }; +} + +#endif /* MLX4_PMD_SGE_WR_N > 1 */ + +/** + * DPDK callback for TX. + * + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +static uint16_t +mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct txq *txq = (struct txq *)dpdk_txq; + unsigned int elts_head = txq->elts_head; + const unsigned int elts_n = txq->elts_n; + unsigned int elts_comp_cd = txq->elts_comp_cd; + unsigned int elts_comp = 0; + unsigned int i; + unsigned int max; + int err; + + assert(elts_comp_cd != 0); + txq_complete(txq); + max = (elts_n - (elts_head - txq->elts_tail)); + if (max > elts_n) + max -= elts_n; + assert(max >= 1); + assert(max <= elts_n); + /* Always leave one free entry in the ring. */ + --max; + if (max == 0) + return 0; + if (max > pkts_n) + max = pkts_n; + for (i = 0; (i != max); ++i) { + struct rte_mbuf *buf = pkts[i]; + unsigned int elts_head_next = + (((elts_head + 1) == elts_n) ? 0 : elts_head + 1); + struct txq_elt *elt_next = &(*txq->elts)[elts_head_next]; + struct txq_elt *elt = &(*txq->elts)[elts_head]; + unsigned int segs = NB_SEGS(buf); +#ifdef MLX4_PMD_SOFT_COUNTERS + unsigned int sent_size = 0; +#endif + uint32_t send_flags = 0; + + /* Clean up old buffer. */ + if (likely(elt->buf != NULL)) { + struct rte_mbuf *tmp = elt->buf; + +#ifndef NDEBUG + /* Poisoning. */ + memset(elt, 0x66, sizeof(*elt)); +#endif + /* Faster than rte_pktmbuf_free(). */ + do { + struct rte_mbuf *next = NEXT(tmp); + + rte_pktmbuf_free_seg(tmp); + tmp = next; + } while (tmp != NULL); + } + /* Request TX completion. */ + if (unlikely(--elts_comp_cd == 0)) { + elts_comp_cd = txq->elts_comp_cd_init; + ++elts_comp; + send_flags |= IBV_EXP_QP_BURST_SIGNALED; + } + /* Should we enable HW CKSUM offload */ + if (buf->ol_flags & + (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { + send_flags |= IBV_EXP_QP_BURST_IP_CSUM; + /* HW does not support checksum offloads at arbitrary + * offsets but automatically recognizes the packet + * type. For inner L3/L4 checksums, only VXLAN (UDP) + * tunnels are currently supported. */ + if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type)) + send_flags |= IBV_EXP_QP_BURST_TUNNEL; + } + if (likely(segs == 1)) { + uintptr_t addr; + uint32_t length; + uint32_t lkey; + + /* Retrieve buffer information. */ + addr = rte_pktmbuf_mtod(buf, uintptr_t); + length = DATA_LEN(buf); + /* Retrieve Memory Region key for this memory pool. */ + lkey = txq_mp2mr(txq, txq_mb2mp(buf)); + if (unlikely(lkey == (uint32_t)-1)) { + /* MR does not exist. */ + DEBUG("%p: unable to get MP <-> MR" + " association", (void *)txq); + /* Clean up TX element. */ + elt->buf = NULL; + goto stop; + } + /* Update element. */ + elt->buf = buf; + if (txq->priv->vf) + rte_prefetch0((volatile void *) + (uintptr_t)addr); + RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf); + /* Put packet into send queue. */ +#if MLX4_PMD_MAX_INLINE > 0 + if (length <= txq->max_inline) + err = txq->if_qp->send_pending_inline + (txq->qp, + (void *)addr, + length, + send_flags); + else +#endif + err = txq->if_qp->send_pending + (txq->qp, + addr, + length, + lkey, + send_flags); + if (unlikely(err)) + goto stop; +#ifdef MLX4_PMD_SOFT_COUNTERS + sent_size += length; +#endif + } else { +#if MLX4_PMD_SGE_WR_N > 1 + struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; + struct tx_burst_sg_ret ret; + + ret = tx_burst_sg(txq, segs, elt, buf, elts_head, + &sges); + if (ret.length == (unsigned int)-1) + goto stop; + RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf); + /* Put SG list into send queue. */ + err = txq->if_qp->send_pending_sg_list + (txq->qp, + sges, + ret.num, + send_flags); + if (unlikely(err)) + goto stop; +#ifdef MLX4_PMD_SOFT_COUNTERS + sent_size += ret.length; +#endif +#else /* MLX4_PMD_SGE_WR_N > 1 */ + DEBUG("%p: TX scattered buffers support not" + " compiled in", (void *)txq); + goto stop; +#endif /* MLX4_PMD_SGE_WR_N > 1 */ + } + elts_head = elts_head_next; +#ifdef MLX4_PMD_SOFT_COUNTERS + /* Increment sent bytes counter. */ + txq->stats.obytes += sent_size; +#endif + } +stop: + /* Take a shortcut if nothing must be sent. */ + if (unlikely(i == 0)) + return 0; +#ifdef MLX4_PMD_SOFT_COUNTERS + /* Increment sent packets counter. */ + txq->stats.opackets += i; +#endif + /* Ring QP doorbell. */ + err = txq->if_qp->send_flush(txq->qp); + if (unlikely(err)) { + /* A nonzero value is not supposed to be returned. + * Nothing can be done about it. */ + DEBUG("%p: send_flush() failed with error %d", + (void *)txq, err); + } + txq->elts_head = elts_head; + txq->elts_comp += elts_comp; + txq->elts_comp_cd = elts_comp_cd; + return i; +} + +/** + * DPDK callback for TX in secondary processes. + * + * This function configures all queues from primary process information + * if necessary before reverting to the normal TX burst callback. + * + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +static uint16_t +mlx4_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n) +{ + struct txq *txq = dpdk_txq; + struct priv *priv = mlx4_secondary_data_setup(txq->priv); + struct priv *primary_priv; + unsigned int index; + + if (priv == NULL) + return 0; + primary_priv = + mlx4_secondary_data[priv->dev->data->port_id].primary_priv; + /* Look for queue index in both private structures. */ + for (index = 0; index != priv->txqs_n; ++index) + if (((*primary_priv->txqs)[index] == txq) || + ((*priv->txqs)[index] == txq)) + break; + if (index == priv->txqs_n) + return 0; + txq = (*priv->txqs)[index]; + return priv->dev->tx_pkt_burst(txq, pkts, pkts_n); +} + +/** + * Configure a TX queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param txq + * Pointer to TX queue structure. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * + * @return + * 0 on success, errno value on failure. + */ +static int +txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf) +{ + struct priv *priv = mlx4_get_priv(dev); + struct txq tmpl = { + .priv = priv, + .socket = socket + }; + union { + struct ibv_exp_query_intf_params params; + struct ibv_exp_qp_init_attr init; + struct ibv_exp_res_domain_init_attr rd; + struct ibv_exp_cq_init_attr cq; + struct ibv_exp_qp_attr mod; + } attr; + enum ibv_exp_query_intf_status status; + int ret = 0; + + (void)conf; /* Thresholds configuration (ignored). */ + if (priv == NULL) + return EINVAL; + if ((desc == 0) || (desc % MLX4_PMD_SGE_WR_N)) { + ERROR("%p: invalid number of TX descriptors (must be a" + " multiple of %d)", (void *)dev, MLX4_PMD_SGE_WR_N); + return EINVAL; + } + desc /= MLX4_PMD_SGE_WR_N; + /* MRs will be registered in mp2mr[] later. */ + attr.rd = (struct ibv_exp_res_domain_init_attr){ + .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | + IBV_EXP_RES_DOMAIN_MSG_MODEL), + .thread_model = IBV_EXP_THREAD_SINGLE, + .msg_model = IBV_EXP_MSG_HIGH_BW, + }; + tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd); + if (tmpl.rd == NULL) { + ret = ENOMEM; + ERROR("%p: RD creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } + attr.cq = (struct ibv_exp_cq_init_attr){ + .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, + .res_domain = tmpl.rd, + }; + tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq); + if (tmpl.cq == NULL) { + ret = ENOMEM; + ERROR("%p: CQ creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } + DEBUG("priv->device_attr.max_qp_wr is %d", + priv->device_attr.max_qp_wr); + DEBUG("priv->device_attr.max_sge is %d", + priv->device_attr.max_sge); + attr.init = (struct ibv_exp_qp_init_attr){ + /* CQ to be associated with the send queue. */ + .send_cq = tmpl.cq, + /* CQ to be associated with the receive queue. */ + .recv_cq = tmpl.cq, + .cap = { + /* Max number of outstanding WRs. */ + .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ? + priv->device_attr.max_qp_wr : + desc), + /* Max number of scatter/gather elements in a WR. */ + .max_send_sge = ((priv->device_attr.max_sge < + MLX4_PMD_SGE_WR_N) ? + priv->device_attr.max_sge : + MLX4_PMD_SGE_WR_N), +#if MLX4_PMD_MAX_INLINE > 0 + .max_inline_data = MLX4_PMD_MAX_INLINE, +#endif + }, + .qp_type = IBV_QPT_RAW_PACKET, + /* Do *NOT* enable this, completions events are managed per + * TX burst. */ + .sq_sig_all = 0, + .pd = priv->pd, + .res_domain = tmpl.rd, + .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD | + IBV_EXP_QP_INIT_ATTR_RES_DOMAIN), + }; + tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init); + if (tmpl.qp == NULL) { + ret = (errno ? errno : EINVAL); + ERROR("%p: QP creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } +#if MLX4_PMD_MAX_INLINE > 0 + /* ibv_create_qp() updates this value. */ + tmpl.max_inline = attr.init.cap.max_inline_data; +#endif + attr.mod = (struct ibv_exp_qp_attr){ + /* Move the QP to this state. */ + .qp_state = IBV_QPS_INIT, + /* Primary port number. */ + .port_num = priv->port + }; + ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, + (IBV_EXP_QP_STATE | IBV_EXP_QP_PORT)); + if (ret) { + ERROR("%p: QP state to IBV_QPS_INIT failed: %s", + (void *)dev, strerror(ret)); + goto error; + } + ret = txq_alloc_elts(&tmpl, desc); + if (ret) { + ERROR("%p: TXQ allocation failed: %s", + (void *)dev, strerror(ret)); + goto error; + } + attr.mod = (struct ibv_exp_qp_attr){ + .qp_state = IBV_QPS_RTR + }; + ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE); + if (ret) { + ERROR("%p: QP state to IBV_QPS_RTR failed: %s", + (void *)dev, strerror(ret)); + goto error; + } + attr.mod.qp_state = IBV_QPS_RTS; + ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE); + if (ret) { + ERROR("%p: QP state to IBV_QPS_RTS failed: %s", + (void *)dev, strerror(ret)); + goto error; + } + attr.params = (struct ibv_exp_query_intf_params){ + .intf_scope = IBV_EXP_INTF_GLOBAL, + .intf = IBV_EXP_INTF_CQ, + .obj = tmpl.cq, + }; + tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); + if (tmpl.if_cq == NULL) { + ERROR("%p: CQ interface family query failed with status %d", + (void *)dev, status); + goto error; + } + attr.params = (struct ibv_exp_query_intf_params){ + .intf_scope = IBV_EXP_INTF_GLOBAL, + .intf = IBV_EXP_INTF_QP_BURST, + .obj = tmpl.qp, +#ifdef HAVE_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK + /* MC loopback must be disabled when not using a VF. */ + .family_flags = + (!priv->vf ? + IBV_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK : + 0), +#endif + }; + tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status); + if (tmpl.if_qp == NULL) { + ERROR("%p: QP interface family query failed with status %d", + (void *)dev, status); + goto error; + } + /* Clean up txq in case we're reinitializing it. */ + DEBUG("%p: cleaning-up old txq just in case", (void *)txq); + txq_cleanup(txq); + *txq = tmpl; + DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl); + /* Pre-register known mempools. */ + rte_mempool_walk(txq_mp2mr_iter, txq); + assert(ret == 0); + return 0; +error: + txq_cleanup(&tmpl); + assert(ret > 0); + return ret; +} + +/** + * DPDK callback to configure a TX queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * TX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf) +{ + struct priv *priv = dev->data->dev_private; + struct txq *txq = (*priv->txqs)[idx]; + int ret; + + if (mlx4_is_secondary()) + return -E_RTE_SECONDARY; + priv_lock(priv); + DEBUG("%p: configuring queue %u for %u descriptors", + (void *)dev, idx, desc); + if (idx >= priv->txqs_n) { + ERROR("%p: queue index out of range (%u >= %u)", + (void *)dev, idx, priv->txqs_n); + priv_unlock(priv); + return -EOVERFLOW; + } + if (txq != NULL) { + DEBUG("%p: reusing already allocated queue index %u (%p)", + (void *)dev, idx, (void *)txq); + if (priv->started) { + priv_unlock(priv); + return -EEXIST; + } + (*priv->txqs)[idx] = NULL; + txq_cleanup(txq); + } else { + txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket); + if (txq == NULL) { + ERROR("%p: unable to allocate queue index %u", + (void *)dev, idx); + priv_unlock(priv); + return -ENOMEM; + } + } + ret = txq_setup(dev, txq, desc, socket, conf); + if (ret) + rte_free(txq); + else { + txq->stats.idx = idx; + DEBUG("%p: adding TX queue %p to list", + (void *)dev, (void *)txq); + (*priv->txqs)[idx] = txq; + /* Update send callback. */ + dev->tx_pkt_burst = mlx4_tx_burst; + } + priv_unlock(priv); + return -ret; +} + +/** + * DPDK callback to release a TX queue. + * + * @param dpdk_txq + * Generic TX queue pointer. + */ +static void +mlx4_tx_queue_release(void *dpdk_txq) +{ + struct txq *txq = (struct txq *)dpdk_txq; + struct priv *priv; + unsigned int i; + + if (mlx4_is_secondary()) + return; + if (txq == NULL) + return; + priv = txq->priv; + priv_lock(priv); + for (i = 0; (i != priv->txqs_n); ++i) + if ((*priv->txqs)[i] == txq) { + DEBUG("%p: removing TX queue %p from list", + (void *)priv->dev, (void *)txq); + (*priv->txqs)[i] = NULL; + break; + } + txq_cleanup(txq); + rte_free(txq); + priv_unlock(priv); +} + +/* RX queues handling. */ + +/** + * Allocate RX queue elements with scattered packets support. + * + * @param rxq + * Pointer to RX queue structure. + * @param elts_n + * Number of elements to allocate. + * @param[in] pool + * If not NULL, fetch buffers from this array instead of allocating them + * with rte_pktmbuf_alloc(). + * + * @return + * 0 on success, errno value on failure. + */ +static int +rxq_alloc_elts_sp(struct rxq *rxq, unsigned int elts_n, + struct rte_mbuf **pool) +{ + unsigned int i; + struct rxq_elt_sp (*elts)[elts_n] = + rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0, + rxq->socket); + int ret = 0; + + if (elts == NULL) { + ERROR("%p: can't allocate packets array", (void *)rxq); + ret = ENOMEM; + goto error; + } + /* For each WR (packet). */ + for (i = 0; (i != elts_n); ++i) { + unsigned int j; + struct rxq_elt_sp *elt = &(*elts)[i]; + struct ibv_recv_wr *wr = &elt->wr; + struct ibv_sge (*sges)[(elemof(elt->sges))] = &elt->sges; + + /* These two arrays must have the same size. */ + assert(elemof(elt->sges) == elemof(elt->bufs)); + /* Configure WR. */ + wr->wr_id = i; + wr->next = &(*elts)[(i + 1)].wr; + wr->sg_list = &(*sges)[0]; + wr->num_sge = elemof(*sges); + /* For each SGE (segment). */ + for (j = 0; (j != elemof(elt->bufs)); ++j) { + struct ibv_sge *sge = &(*sges)[j]; + struct rte_mbuf *buf; + + if (pool != NULL) { + buf = *(pool++); + assert(buf != NULL); + rte_pktmbuf_reset(buf); + } else + buf = rte_pktmbuf_alloc(rxq->mp); + if (buf == NULL) { + assert(pool == NULL); + ERROR("%p: empty mbuf pool", (void *)rxq); + ret = ENOMEM; + goto error; + } + elt->bufs[j] = buf; + /* Headroom is reserved by rte_pktmbuf_alloc(). */ + assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); + /* Buffer is supposed to be empty. */ + assert(rte_pktmbuf_data_len(buf) == 0); + assert(rte_pktmbuf_pkt_len(buf) == 0); + /* sge->addr must be able to store a pointer. */ + assert(sizeof(sge->addr) >= sizeof(uintptr_t)); + if (j == 0) { + /* The first SGE keeps its headroom. */ + sge->addr = rte_pktmbuf_mtod(buf, uintptr_t); + sge->length = (buf->buf_len - + RTE_PKTMBUF_HEADROOM); + } else { + /* Subsequent SGEs lose theirs. */ + assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); + SET_DATA_OFF(buf, 0); + sge->addr = (uintptr_t)buf->buf_addr; + sge->length = buf->buf_len; + } + sge->lkey = rxq->mr->lkey; + /* Redundant check for tailroom. */ + assert(sge->length == rte_pktmbuf_tailroom(buf)); + } + } + /* The last WR pointer must be NULL. */ + (*elts)[(i - 1)].wr.next = NULL; + DEBUG("%p: allocated and configured %u WRs (%zu segments)", + (void *)rxq, elts_n, (elts_n * elemof((*elts)[0].sges))); + rxq->elts_n = elts_n; + rxq->elts_head = 0; + rxq->elts.sp = elts; + assert(ret == 0); + return 0; +error: + if (elts != NULL) { + assert(pool == NULL); + for (i = 0; (i != elemof(*elts)); ++i) { + unsigned int j; + struct rxq_elt_sp *elt = &(*elts)[i]; + + for (j = 0; (j != elemof(elt->bufs)); ++j) { + struct rte_mbuf *buf = elt->bufs[j]; + + if (buf != NULL) + rte_pktmbuf_free_seg(buf); + } + } + rte_free(elts); + } + DEBUG("%p: failed, freed everything", (void *)rxq); + assert(ret > 0); + return ret; +} + +/** + * Free RX queue elements with scattered packets support. + * + * @param rxq + * Pointer to RX queue structure. + */ +static void +rxq_free_elts_sp(struct rxq *rxq) +{ + unsigned int i; + unsigned int elts_n = rxq->elts_n; + struct rxq_elt_sp (*elts)[elts_n] = rxq->elts.sp; + + DEBUG("%p: freeing WRs", (void *)rxq); + rxq->elts_n = 0; + rxq->elts.sp = NULL; + if (elts == NULL) + return; + for (i = 0; (i != elemof(*elts)); ++i) { + unsigned int j; + struct rxq_elt_sp *elt = &(*elts)[i]; + + for (j = 0; (j != elemof(elt->bufs)); ++j) { + struct rte_mbuf *buf = elt->bufs[j]; + + if (buf != NULL) + rte_pktmbuf_free_seg(buf); + } + } + rte_free(elts); +} + +/** + * Allocate RX queue elements. + * + * @param rxq + * Pointer to RX queue structure. + * @param elts_n + * Number of elements to allocate. + * @param[in] pool + * If not NULL, fetch buffers from this array instead of allocating them + * with rte_pktmbuf_alloc(). + * + * @return + * 0 on success, errno value on failure. + */ +static int +rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n, struct rte_mbuf **pool) +{ + unsigned int i; + struct rxq_elt (*elts)[elts_n] = + rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0, + rxq->socket); + int ret = 0; + + if (elts == NULL) { + ERROR("%p: can't allocate packets array", (void *)rxq); + ret = ENOMEM; + goto error; + } + /* For each WR (packet). */ + for (i = 0; (i != elts_n); ++i) { + struct rxq_elt *elt = &(*elts)[i]; + struct ibv_recv_wr *wr = &elt->wr; + struct ibv_sge *sge = &(*elts)[i].sge; + struct rte_mbuf *buf; + + if (pool != NULL) { + buf = *(pool++); + assert(buf != NULL); + rte_pktmbuf_reset(buf); + } else + buf = rte_pktmbuf_alloc(rxq->mp); + if (buf == NULL) { + assert(pool == NULL); + ERROR("%p: empty mbuf pool", (void *)rxq); + ret = ENOMEM; + goto error; + } + /* Configure WR. Work request ID contains its own index in + * the elts array and the offset between SGE buffer header and + * its data. */ + WR_ID(wr->wr_id).id = i; + WR_ID(wr->wr_id).offset = + (((uintptr_t)buf->buf_addr + RTE_PKTMBUF_HEADROOM) - + (uintptr_t)buf); + wr->next = &(*elts)[(i + 1)].wr; + wr->sg_list = sge; + wr->num_sge = 1; + /* Headroom is reserved by rte_pktmbuf_alloc(). */ + assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); + /* Buffer is supposed to be empty. */ + assert(rte_pktmbuf_data_len(buf) == 0); + assert(rte_pktmbuf_pkt_len(buf) == 0); + /* sge->addr must be able to store a pointer. */ + assert(sizeof(sge->addr) >= sizeof(uintptr_t)); + /* SGE keeps its headroom. */ + sge->addr = (uintptr_t) + ((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM); + sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM); + sge->lkey = rxq->mr->lkey; + /* Redundant check for tailroom. */ + assert(sge->length == rte_pktmbuf_tailroom(buf)); + /* Make sure elts index and SGE mbuf pointer can be deduced + * from WR ID. */ + if ((WR_ID(wr->wr_id).id != i) || + ((void *)((uintptr_t)sge->addr - + WR_ID(wr->wr_id).offset) != buf)) { + ERROR("%p: cannot store index and offset in WR ID", + (void *)rxq); + sge->addr = 0; + rte_pktmbuf_free(buf); + ret = EOVERFLOW; + goto error; + } + } + /* The last WR pointer must be NULL. */ + (*elts)[(i - 1)].wr.next = NULL; + DEBUG("%p: allocated and configured %u single-segment WRs", + (void *)rxq, elts_n); + rxq->elts_n = elts_n; + rxq->elts_head = 0; + rxq->elts.no_sp = elts; + assert(ret == 0); + return 0; +error: + if (elts != NULL) { + assert(pool == NULL); + for (i = 0; (i != elemof(*elts)); ++i) { + struct rxq_elt *elt = &(*elts)[i]; + struct rte_mbuf *buf; + + if (elt->sge.addr == 0) + continue; + assert(WR_ID(elt->wr.wr_id).id == i); + buf = (void *)((uintptr_t)elt->sge.addr - + WR_ID(elt->wr.wr_id).offset); + rte_pktmbuf_free_seg(buf); + } + rte_free(elts); + } + DEBUG("%p: failed, freed everything", (void *)rxq); + assert(ret > 0); + return ret; +} + +/** + * Free RX queue elements. + * + * @param rxq + * Pointer to RX queue structure. + */ +static void +rxq_free_elts(struct rxq *rxq) +{ + unsigned int i; + unsigned int elts_n = rxq->elts_n; + struct rxq_elt (*elts)[elts_n] = rxq->elts.no_sp; + + DEBUG("%p: freeing WRs", (void *)rxq); + rxq->elts_n = 0; + rxq->elts.no_sp = NULL; + if (elts == NULL) + return; + for (i = 0; (i != elemof(*elts)); ++i) { + struct rxq_elt *elt = &(*elts)[i]; + struct rte_mbuf *buf; + + if (elt->sge.addr == 0) + continue; + assert(WR_ID(elt->wr.wr_id).id == i); + buf = (void *)((uintptr_t)elt->sge.addr - + WR_ID(elt->wr.wr_id).offset); + rte_pktmbuf_free_seg(buf); + } + rte_free(elts); +} + +/** + * Delete flow steering rule. + * + * @param rxq + * Pointer to RX queue structure. + * @param mac_index + * MAC address index. + * @param vlan_index + * VLAN index. + */ +static void +rxq_del_flow(struct rxq *rxq, unsigned int mac_index, unsigned int vlan_index) +{ +#ifndef NDEBUG + struct priv *priv = rxq->priv; + const uint8_t (*mac)[ETHER_ADDR_LEN] = + (const uint8_t (*)[ETHER_ADDR_LEN]) + priv->mac[mac_index].addr_bytes; +#endif + assert(rxq->mac_flow[mac_index][vlan_index] != NULL); + DEBUG("%p: removing MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u" + " (VLAN ID %" PRIu16 ")", + (void *)rxq, + (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5], + mac_index, priv->vlan_filter[vlan_index].id); + claim_zero(ibv_destroy_flow(rxq->mac_flow[mac_index][vlan_index])); + rxq->mac_flow[mac_index][vlan_index] = NULL; +} + +/** + * Unregister a MAC address from a RX queue. + * + * @param rxq + * Pointer to RX queue structure. + * @param mac_index + * MAC address index. + */ +static void +rxq_mac_addr_del(struct rxq *rxq, unsigned int mac_index) +{ + struct priv *priv = rxq->priv; + unsigned int i; + unsigned int vlans = 0; + + assert(mac_index < elemof(priv->mac)); + if (!BITFIELD_ISSET(rxq->mac_configured, mac_index)) + return; + for (i = 0; (i != elemof(priv->vlan_filter)); ++i) { + if (!priv->vlan_filter[i].enabled) + continue; + rxq_del_flow(rxq, mac_index, i); + vlans++; + } + if (!vlans) { + rxq_del_flow(rxq, mac_index, 0); + } + BITFIELD_RESET(rxq->mac_configured, mac_index); +} + +/** + * Unregister all MAC addresses from a RX queue. + * + * @param rxq + * Pointer to RX queue structure. + */ +static void +rxq_mac_addrs_del(struct rxq *rxq) +{ + struct priv *priv = rxq->priv; + unsigned int i; + + for (i = 0; (i != elemof(priv->mac)); ++i) + rxq_mac_addr_del(rxq, i); +} + +static int rxq_promiscuous_enable(struct rxq *); +static void rxq_promiscuous_disable(struct rxq *); + +/** + * Add single flow steering rule. + * + * @param rxq + * Pointer to RX queue structure. + * @param mac_index + * MAC address index to register. + * @param vlan_index + * VLAN index. Use -1 for a flow without VLAN. + * + * @return + * 0 on success, errno value on failure. + */ +static int +rxq_add_flow(struct rxq *rxq, unsigned int mac_index, unsigned int vlan_index) +{ + struct ibv_flow *flow; + struct priv *priv = rxq->priv; + const uint8_t (*mac)[ETHER_ADDR_LEN] = + (const uint8_t (*)[ETHER_ADDR_LEN]) + priv->mac[mac_index].addr_bytes; + + /* Allocate flow specification on the stack. */ + struct __attribute__((packed)) { + struct ibv_flow_attr attr; + struct ibv_flow_spec_eth spec; + } data; + struct ibv_flow_attr *attr = &data.attr; + struct ibv_flow_spec_eth *spec = &data.spec; + + assert(mac_index < elemof(priv->mac)); + assert((vlan_index < elemof(priv->vlan_filter)) || (vlan_index == -1u)); + /* + * No padding must be inserted by the compiler between attr and spec. + * This layout is expected by libibverbs. + */ + assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec); + *attr = (struct ibv_flow_attr){ + .type = IBV_FLOW_ATTR_NORMAL, + .num_of_specs = 1, + .port = priv->port, + .flags = 0 + }; + *spec = (struct ibv_flow_spec_eth){ + .type = IBV_FLOW_SPEC_ETH, + .size = sizeof(*spec), + .val = { + .dst_mac = { + (*mac)[0], (*mac)[1], (*mac)[2], + (*mac)[3], (*mac)[4], (*mac)[5] + }, + .vlan_tag = ((vlan_index != -1u) ? + htons(priv->vlan_filter[vlan_index].id) : + 0), + }, + .mask = { + .dst_mac = "\xff\xff\xff\xff\xff\xff", + .vlan_tag = ((vlan_index != -1u) ? htons(0xfff) : 0), + } + }; + DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u" + " (VLAN %s %" PRIu16 ")", + (void *)rxq, + (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5], + mac_index, + ((vlan_index != -1u) ? "ID" : "index"), + ((vlan_index != -1u) ? priv->vlan_filter[vlan_index].id : -1u)); + /* Create related flow. */ + errno = 0; + flow = ibv_create_flow(rxq->qp, attr); + if (flow == NULL) { + /* It's not clear whether errno is always set in this case. */ + ERROR("%p: flow configuration failed, errno=%d: %s", + (void *)rxq, errno, + (errno ? strerror(errno) : "Unknown error")); + if (errno) + return errno; + return EINVAL; + } + if (vlan_index == -1u) + vlan_index = 0; + assert(rxq->mac_flow[mac_index][vlan_index] == NULL); + rxq->mac_flow[mac_index][vlan_index] = flow; + return 0; +} + +/** + * Register a MAC address in a RX queue. + * + * @param rxq + * Pointer to RX queue structure. + * @param mac_index + * MAC address index to register. + * + * @return + * 0 on success, errno value on failure. + */ +static int +rxq_mac_addr_add(struct rxq *rxq, unsigned int mac_index) +{ + struct priv *priv = rxq->priv; + unsigned int i; + unsigned int vlans = 0; + int ret; + + assert(mac_index < elemof(priv->mac)); + if (BITFIELD_ISSET(rxq->mac_configured, mac_index)) + rxq_mac_addr_del(rxq, mac_index); + /* Fill VLAN specifications. */ + for (i = 0; (i != elemof(priv->vlan_filter)); ++i) { + if (!priv->vlan_filter[i].enabled) + continue; + /* Create related flow. */ + ret = rxq_add_flow(rxq, mac_index, i); + if (!ret) { + vlans++; + continue; + } + /* Failure, rollback. */ + while (i != 0) + if (priv->vlan_filter[--i].enabled) + rxq_del_flow(rxq, mac_index, i); + assert(ret > 0); + return ret; + } + /* In case there is no VLAN filter. */ + if (!vlans) { + ret = rxq_add_flow(rxq, mac_index, -1); + if (ret) + return ret; + } + BITFIELD_SET(rxq->mac_configured, mac_index); + return 0; +} + +/** + * Register all MAC addresses in a RX queue. + * + * @param rxq + * Pointer to RX queue structure. + * + * @return + * 0 on success, errno value on failure. + */ +static int +rxq_mac_addrs_add(struct rxq *rxq) +{ + struct priv *priv = rxq->priv; + unsigned int i; + int ret; + + for (i = 0; (i != elemof(priv->mac)); ++i) { + if (!BITFIELD_ISSET(priv->mac_configured, i)) + continue; + ret = rxq_mac_addr_add(rxq, i); + if (!ret) + continue; + /* Failure, rollback. */ + while (i != 0) + rxq_mac_addr_del(rxq, --i); + assert(ret > 0); + return ret; + } + return 0; +} + +/** + * Unregister a MAC address. + * + * In RSS mode, the MAC address is unregistered from the parent queue, + * otherwise it is unregistered from each queue directly. + * + * @param priv + * Pointer to private structure. + * @param mac_index + * MAC address index. + */ +static void +priv_mac_addr_del(struct priv *priv, unsigned int mac_index) +{ + unsigned int i; + + assert(mac_index < elemof(priv->mac)); + if (!BITFIELD_ISSET(priv->mac_configured, mac_index)) + return; + if (priv->rss) { + rxq_mac_addr_del(&priv->rxq_parent, mac_index); + goto end; + } + for (i = 0; (i != priv->dev->data->nb_rx_queues); ++i) + rxq_mac_addr_del((*priv->rxqs)[i], mac_index); +end: + BITFIELD_RESET(priv->mac_configured, mac_index); +} + +/** + * Register a MAC address. + * + * In RSS mode, the MAC address is registered in the parent queue, + * otherwise it is registered in each queue directly. + * + * @param priv + * Pointer to private structure. + * @param mac_index + * MAC address index to use. + * @param mac + * MAC address to register. + * + * @return + * 0 on success, errno value on failure. + */ +static int +priv_mac_addr_add(struct priv *priv, unsigned int mac_index, + const uint8_t (*mac)[ETHER_ADDR_LEN]) +{ + unsigned int i; + int ret; + + assert(mac_index < elemof(priv->mac)); + /* First, make sure this address isn't already configured. */ + for (i = 0; (i != elemof(priv->mac)); ++i) { + /* Skip this index, it's going to be reconfigured. */ + if (i == mac_index) + continue; + if (!BITFIELD_ISSET(priv->mac_configured, i)) + continue; + if (memcmp(priv->mac[i].addr_bytes, *mac, sizeof(*mac))) + continue; + /* Address already configured elsewhere, return with error. */ + return EADDRINUSE; + } + if (BITFIELD_ISSET(priv->mac_configured, mac_index)) + priv_mac_addr_del(priv, mac_index); + priv->mac[mac_index] = (struct ether_addr){ + { + (*mac)[0], (*mac)[1], (*mac)[2], + (*mac)[3], (*mac)[4], (*mac)[5] + } + }; + /* If device isn't started, this is all we need to do. */ + if (!priv->started) { +#ifndef NDEBUG + /* Verify that all queues have this index disabled. */ + for (i = 0; (i != priv->rxqs_n); ++i) { + if ((*priv->rxqs)[i] == NULL) + continue; + assert(!BITFIELD_ISSET + ((*priv->rxqs)[i]->mac_configured, mac_index)); + } +#endif + goto end; + } + if (priv->rss) { + ret = rxq_mac_addr_add(&priv->rxq_parent, mac_index); + if (ret) + return ret; + goto end; + } + for (i = 0; (i != priv->rxqs_n); ++i) { + if ((*priv->rxqs)[i] == NULL) + continue; + ret = rxq_mac_addr_add((*priv->rxqs)[i], mac_index); + if (!ret) + continue; + /* Failure, rollback. */ + while (i != 0) + if ((*priv->rxqs)[(--i)] != NULL) + rxq_mac_addr_del((*priv->rxqs)[i], mac_index); + return ret; + } +end: + BITFIELD_SET(priv->mac_configured, mac_index); + return 0; +} + +/** + * Enable allmulti mode in a RX queue. + * + * @param rxq + * Pointer to RX queue structure. + * + * @return + * 0 on success, errno value on failure. + */ +static int +rxq_allmulticast_enable(struct rxq *rxq) +{ + struct ibv_flow *flow; + struct ibv_flow_attr attr = { + .type = IBV_FLOW_ATTR_MC_DEFAULT, + .num_of_specs = 0, + .port = rxq->priv->port, + .flags = 0 + }; + + DEBUG("%p: enabling allmulticast mode", (void *)rxq); + if (rxq->allmulti_flow != NULL) + return EBUSY; + errno = 0; + flow = ibv_create_flow(rxq->qp, &attr); + if (flow == NULL) { + /* It's not clear whether errno is always set in this case. */ + ERROR("%p: flow configuration failed, errno=%d: %s", + (void *)rxq, errno, + (errno ? strerror(errno) : "Unknown error")); + if (errno) + return errno; + return EINVAL; + } + rxq->allmulti_flow = flow; + DEBUG("%p: allmulticast mode enabled", (void *)rxq); + return 0; +} + +/** + * Disable allmulti mode in a RX queue. + * + * @param rxq + * Pointer to RX queue structure. + */ +static void +rxq_allmulticast_disable(struct rxq *rxq) +{ + DEBUG("%p: disabling allmulticast mode", (void *)rxq); + if (rxq->allmulti_flow == NULL) + return; + claim_zero(ibv_destroy_flow(rxq->allmulti_flow)); + rxq->allmulti_flow = NULL; + DEBUG("%p: allmulticast mode disabled", (void *)rxq); +} + +/** + * Enable promiscuous mode in a RX queue. + * + * @param rxq + * Pointer to RX queue structure. + * + * @return + * 0 on success, errno value on failure. + */ +static int +rxq_promiscuous_enable(struct rxq *rxq) +{ + struct ibv_flow *flow; + struct ibv_flow_attr attr = { + .type = IBV_FLOW_ATTR_ALL_DEFAULT, + .num_of_specs = 0, + .port = rxq->priv->port, + .flags = 0 + }; + + if (rxq->priv->vf) + return 0; + DEBUG("%p: enabling promiscuous mode", (void *)rxq); + if (rxq->promisc_flow != NULL) + return EBUSY; + errno = 0; + flow = ibv_create_flow(rxq->qp, &attr); + if (flow == NULL) { + /* It's not clear whether errno is always set in this case. */ + ERROR("%p: flow configuration failed, errno=%d: %s", + (void *)rxq, errno, + (errno ? strerror(errno) : "Unknown error")); + if (errno) + return errno; + return EINVAL; + } + rxq->promisc_flow = flow; + DEBUG("%p: promiscuous mode enabled", (void *)rxq); + return 0; +} + +/** + * Disable promiscuous mode in a RX queue. + * + * @param rxq + * Pointer to RX queue structure. + */ +static void +rxq_promiscuous_disable(struct rxq *rxq) +{ + if (rxq->priv->vf) + return; + DEBUG("%p: disabling promiscuous mode", (void *)rxq); + if (rxq->promisc_flow == NULL) + return; + claim_zero(ibv_destroy_flow(rxq->promisc_flow)); + rxq->promisc_flow = NULL; + DEBUG("%p: promiscuous mode disabled", (void *)rxq); +} + +/** + * Clean up a RX queue. + * + * Destroy objects, free allocated memory and reset the structure for reuse. + * + * @param rxq + * Pointer to RX queue structure. + */ +static void +rxq_cleanup(struct rxq *rxq) +{ + struct ibv_exp_release_intf_params params; + + DEBUG("cleaning up %p", (void *)rxq); + if (rxq->sp) + rxq_free_elts_sp(rxq); + else + rxq_free_elts(rxq); + if (rxq->if_qp != NULL) { + assert(rxq->priv != NULL); + assert(rxq->priv->ctx != NULL); + assert(rxq->qp != NULL); + params = (struct ibv_exp_release_intf_params){ + .comp_mask = 0, + }; + claim_zero(ibv_exp_release_intf(rxq->priv->ctx, + rxq->if_qp, + ¶ms)); + } + if (rxq->if_cq != NULL) { + assert(rxq->priv != NULL); + assert(rxq->priv->ctx != NULL); + assert(rxq->cq != NULL); + params = (struct ibv_exp_release_intf_params){ + .comp_mask = 0, + }; + claim_zero(ibv_exp_release_intf(rxq->priv->ctx, + rxq->if_cq, + ¶ms)); + } + if (rxq->qp != NULL) { + rxq_promiscuous_disable(rxq); + rxq_allmulticast_disable(rxq); + rxq_mac_addrs_del(rxq); + claim_zero(ibv_destroy_qp(rxq->qp)); + } + if (rxq->cq != NULL) + claim_zero(ibv_destroy_cq(rxq->cq)); + if (rxq->rd != NULL) { + struct ibv_exp_destroy_res_domain_attr attr = { + .comp_mask = 0, + }; + + assert(rxq->priv != NULL); + assert(rxq->priv->ctx != NULL); + claim_zero(ibv_exp_destroy_res_domain(rxq->priv->ctx, + rxq->rd, + &attr)); + } + if (rxq->mr != NULL) + claim_zero(ibv_dereg_mr(rxq->mr)); + memset(rxq, 0, sizeof(*rxq)); +} + +/** + * Translate RX completion flags to packet type. + * + * @param flags + * RX completion flags returned by poll_length_flags(). + * + * @note: fix mlx4_dev_supported_ptypes_get() if any change here. + * + * @return + * Packet type for struct rte_mbuf. + */ +static inline uint32_t +rxq_cq_to_pkt_type(uint32_t flags) +{ + uint32_t pkt_type; + + if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) + pkt_type = + TRANSPOSE(flags, + IBV_EXP_CQ_RX_OUTER_IPV4_PACKET, RTE_PTYPE_L3_IPV4) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_OUTER_IPV6_PACKET, RTE_PTYPE_L3_IPV6) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV4_PACKET, RTE_PTYPE_INNER_L3_IPV4) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_INNER_L3_IPV6); + else + pkt_type = + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV4_PACKET, RTE_PTYPE_L3_IPV4) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_L3_IPV6); + return pkt_type; +} + +/** + * Translate RX completion flags to offload flags. + * + * @param[in] rxq + * Pointer to RX queue structure. + * @param flags + * RX completion flags returned by poll_length_flags(). + * + * @return + * Offload flags (ol_flags) for struct rte_mbuf. + */ +static inline uint32_t +rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags) +{ + uint32_t ol_flags = 0; + + if (rxq->csum) + ol_flags |= + TRANSPOSE(~flags, + IBV_EXP_CQ_RX_IP_CSUM_OK, + PKT_RX_IP_CKSUM_BAD) | + TRANSPOSE(~flags, + IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK, + PKT_RX_L4_CKSUM_BAD); + /* + * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place + * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional + * (its value is 0). + */ + if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) + ol_flags |= + TRANSPOSE(~flags, + IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK, + PKT_RX_IP_CKSUM_BAD) | + TRANSPOSE(~flags, + IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK, + PKT_RX_L4_CKSUM_BAD); + return ol_flags; +} + +static uint16_t +mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); + +/** + * DPDK callback for RX with scattered packets support. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +static uint16_t +mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct rxq *rxq = (struct rxq *)dpdk_rxq; + struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp; + const unsigned int elts_n = rxq->elts_n; + unsigned int elts_head = rxq->elts_head; + struct ibv_recv_wr head; + struct ibv_recv_wr **next = &head.next; + struct ibv_recv_wr *bad_wr; + unsigned int i; + unsigned int pkts_ret = 0; + int ret; + + if (unlikely(!rxq->sp)) + return mlx4_rx_burst(dpdk_rxq, pkts, pkts_n); + if (unlikely(elts == NULL)) /* See RTE_DEV_CMD_SET_MTU. */ + return 0; + for (i = 0; (i != pkts_n); ++i) { + struct rxq_elt_sp *elt = &(*elts)[elts_head]; + struct ibv_recv_wr *wr = &elt->wr; + uint64_t wr_id = wr->wr_id; + unsigned int len; + unsigned int pkt_buf_len; + struct rte_mbuf *pkt_buf = NULL; /* Buffer returned in pkts. */ + struct rte_mbuf **pkt_buf_next = &pkt_buf; + unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM; + unsigned int j = 0; + uint32_t flags; + + /* Sanity checks. */ +#ifdef NDEBUG + (void)wr_id; +#endif + assert(wr_id < rxq->elts_n); + assert(wr->sg_list == elt->sges); + assert(wr->num_sge == elemof(elt->sges)); + assert(elts_head < rxq->elts_n); + assert(rxq->elts_head < rxq->elts_n); + ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL, + &flags); + if (unlikely(ret < 0)) { + struct ibv_wc wc; + int wcs_n; + + DEBUG("rxq=%p, poll_length() failed (ret=%d)", + (void *)rxq, ret); + /* ibv_poll_cq() must be used in case of failure. */ + wcs_n = ibv_poll_cq(rxq->cq, 1, &wc); + if (unlikely(wcs_n == 0)) + break; + if (unlikely(wcs_n < 0)) { + DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)", + (void *)rxq, wcs_n); + break; + } + assert(wcs_n == 1); + if (unlikely(wc.status != IBV_WC_SUCCESS)) { + /* Whatever, just repost the offending WR. */ + DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work" + " completion status (%d): %s", + (void *)rxq, wc.wr_id, wc.status, + ibv_wc_status_str(wc.status)); +#ifdef MLX4_PMD_SOFT_COUNTERS + /* Increment dropped packets counter. */ + ++rxq->stats.idropped; +#endif + /* Link completed WRs together for repost. */ + *next = wr; + next = &wr->next; + goto repost; + } + ret = wc.byte_len; + } + if (ret == 0) + break; + len = ret; + pkt_buf_len = len; + /* Link completed WRs together for repost. */ + *next = wr; + next = &wr->next; + /* + * Replace spent segments with new ones, concatenate and + * return them as pkt_buf. + */ + while (1) { + struct ibv_sge *sge = &elt->sges[j]; + struct rte_mbuf *seg = elt->bufs[j]; + struct rte_mbuf *rep; + unsigned int seg_tailroom; + + /* + * Fetch initial bytes of packet descriptor into a + * cacheline while allocating rep. + */ + rte_prefetch0(seg); + rep = __rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(rep == NULL)) { + /* + * Unable to allocate a replacement mbuf, + * repost WR. + */ + DEBUG("rxq=%p, wr_id=%" PRIu64 ":" + " can't allocate a new mbuf", + (void *)rxq, wr_id); + if (pkt_buf != NULL) { + *pkt_buf_next = NULL; + rte_pktmbuf_free(pkt_buf); + } + /* Increase out of memory counters. */ + ++rxq->stats.rx_nombuf; + ++rxq->priv->dev->data->rx_mbuf_alloc_failed; + goto repost; + } +#ifndef NDEBUG + /* Poison user-modifiable fields in rep. */ + NEXT(rep) = (void *)((uintptr_t)-1); + SET_DATA_OFF(rep, 0xdead); + DATA_LEN(rep) = 0xd00d; + PKT_LEN(rep) = 0xdeadd00d; + NB_SEGS(rep) = 0x2a; + PORT(rep) = 0x2a; + rep->ol_flags = -1; +#endif + assert(rep->buf_len == seg->buf_len); + assert(rep->buf_len == rxq->mb_len); + /* Reconfigure sge to use rep instead of seg. */ + assert(sge->lkey == rxq->mr->lkey); + sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom); + elt->bufs[j] = rep; + ++j; + /* Update pkt_buf if it's the first segment, or link + * seg to the previous one and update pkt_buf_next. */ + *pkt_buf_next = seg; + pkt_buf_next = &NEXT(seg); + /* Update seg information. */ + seg_tailroom = (seg->buf_len - seg_headroom); + assert(sge->length == seg_tailroom); + SET_DATA_OFF(seg, seg_headroom); + if (likely(len <= seg_tailroom)) { + /* Last segment. */ + DATA_LEN(seg) = len; + PKT_LEN(seg) = len; + /* Sanity check. */ + assert(rte_pktmbuf_headroom(seg) == + seg_headroom); + assert(rte_pktmbuf_tailroom(seg) == + (seg_tailroom - len)); + break; + } + DATA_LEN(seg) = seg_tailroom; + PKT_LEN(seg) = seg_tailroom; + /* Sanity check. */ + assert(rte_pktmbuf_headroom(seg) == seg_headroom); + assert(rte_pktmbuf_tailroom(seg) == 0); + /* Fix len and clear headroom for next segments. */ + len -= seg_tailroom; + seg_headroom = 0; + } + /* Update head and tail segments. */ + *pkt_buf_next = NULL; + assert(pkt_buf != NULL); + assert(j != 0); + NB_SEGS(pkt_buf) = j; + PORT(pkt_buf) = rxq->port_id; + PKT_LEN(pkt_buf) = pkt_buf_len; + pkt_buf->packet_type = rxq_cq_to_pkt_type(flags); + pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags); + + /* Return packet. */ + *(pkts++) = pkt_buf; + ++pkts_ret; +#ifdef MLX4_PMD_SOFT_COUNTERS + /* Increase bytes counter. */ + rxq->stats.ibytes += pkt_buf_len; +#endif +repost: + if (++elts_head >= elts_n) + elts_head = 0; + continue; + } + if (unlikely(i == 0)) + return 0; + *next = NULL; + /* Repost WRs. */ +#ifdef DEBUG_RECV + DEBUG("%p: reposting %d WRs", (void *)rxq, i); +#endif + ret = ibv_post_recv(rxq->qp, head.next, &bad_wr); + if (unlikely(ret)) { + /* Inability to repost WRs is fatal. */ + DEBUG("%p: ibv_post_recv(): failed for WR %p: %s", + (void *)rxq->priv, + (void *)bad_wr, + strerror(ret)); + abort(); + } + rxq->elts_head = elts_head; +#ifdef MLX4_PMD_SOFT_COUNTERS + /* Increase packets counter. */ + rxq->stats.ipackets += pkts_ret; +#endif + return pkts_ret; +} + +/** + * DPDK callback for RX. + * + * The following function is the same as mlx4_rx_burst_sp(), except it doesn't + * manage scattered packets. Improves performance when MRU is lower than the + * size of the first segment. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +static uint16_t +mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct rxq *rxq = (struct rxq *)dpdk_rxq; + struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp; + const unsigned int elts_n = rxq->elts_n; + unsigned int elts_head = rxq->elts_head; + struct ibv_sge sges[pkts_n]; + unsigned int i; + unsigned int pkts_ret = 0; + int ret; + + if (unlikely(rxq->sp)) + return mlx4_rx_burst_sp(dpdk_rxq, pkts, pkts_n); + for (i = 0; (i != pkts_n); ++i) { + struct rxq_elt *elt = &(*elts)[elts_head]; + struct ibv_recv_wr *wr = &elt->wr; + uint64_t wr_id = wr->wr_id; + unsigned int len; + struct rte_mbuf *seg = (void *)((uintptr_t)elt->sge.addr - + WR_ID(wr_id).offset); + struct rte_mbuf *rep; + uint32_t flags; + + /* Sanity checks. */ + assert(WR_ID(wr_id).id < rxq->elts_n); + assert(wr->sg_list == &elt->sge); + assert(wr->num_sge == 1); + assert(elts_head < rxq->elts_n); + assert(rxq->elts_head < rxq->elts_n); + /* + * Fetch initial bytes of packet descriptor into a + * cacheline while allocating rep. + */ + rte_prefetch0(seg); + rte_prefetch0(&seg->cacheline1); + ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL, + &flags); + if (unlikely(ret < 0)) { + struct ibv_wc wc; + int wcs_n; + + DEBUG("rxq=%p, poll_length() failed (ret=%d)", + (void *)rxq, ret); + /* ibv_poll_cq() must be used in case of failure. */ + wcs_n = ibv_poll_cq(rxq->cq, 1, &wc); + if (unlikely(wcs_n == 0)) + break; + if (unlikely(wcs_n < 0)) { + DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)", + (void *)rxq, wcs_n); + break; + } + assert(wcs_n == 1); + if (unlikely(wc.status != IBV_WC_SUCCESS)) { + /* Whatever, just repost the offending WR. */ + DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work" + " completion status (%d): %s", + (void *)rxq, wc.wr_id, wc.status, + ibv_wc_status_str(wc.status)); +#ifdef MLX4_PMD_SOFT_COUNTERS + /* Increment dropped packets counter. */ + ++rxq->stats.idropped; +#endif + /* Add SGE to array for repost. */ + sges[i] = elt->sge; + goto repost; + } + ret = wc.byte_len; + } + if (ret == 0) + break; + len = ret; + rep = __rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(rep == NULL)) { + /* + * Unable to allocate a replacement mbuf, + * repost WR. + */ + DEBUG("rxq=%p, wr_id=%" PRIu32 ":" + " can't allocate a new mbuf", + (void *)rxq, WR_ID(wr_id).id); + /* Increase out of memory counters. */ + ++rxq->stats.rx_nombuf; + ++rxq->priv->dev->data->rx_mbuf_alloc_failed; + goto repost; + } + + /* Reconfigure sge to use rep instead of seg. */ + elt->sge.addr = (uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM; + assert(elt->sge.lkey == rxq->mr->lkey); + WR_ID(wr->wr_id).offset = + (((uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM) - + (uintptr_t)rep); + assert(WR_ID(wr->wr_id).id == WR_ID(wr_id).id); + + /* Add SGE to array for repost. */ + sges[i] = elt->sge; + + /* Update seg information. */ + SET_DATA_OFF(seg, RTE_PKTMBUF_HEADROOM); + NB_SEGS(seg) = 1; + PORT(seg) = rxq->port_id; + NEXT(seg) = NULL; + PKT_LEN(seg) = len; + DATA_LEN(seg) = len; + seg->packet_type = rxq_cq_to_pkt_type(flags); + seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags); + + /* Return packet. */ + *(pkts++) = seg; + ++pkts_ret; +#ifdef MLX4_PMD_SOFT_COUNTERS + /* Increase bytes counter. */ + rxq->stats.ibytes += len; +#endif +repost: + if (++elts_head >= elts_n) + elts_head = 0; + continue; + } + if (unlikely(i == 0)) + return 0; + /* Repost WRs. */ +#ifdef DEBUG_RECV + DEBUG("%p: reposting %u WRs", (void *)rxq, i); +#endif + ret = rxq->if_qp->recv_burst(rxq->qp, sges, i); + if (unlikely(ret)) { + /* Inability to repost WRs is fatal. */ + DEBUG("%p: recv_burst(): failed (ret=%d)", + (void *)rxq->priv, + ret); + abort(); + } + rxq->elts_head = elts_head; +#ifdef MLX4_PMD_SOFT_COUNTERS + /* Increase packets counter. */ + rxq->stats.ipackets += pkts_ret; +#endif + return pkts_ret; +} + +/** + * DPDK callback for RX in secondary processes. + * + * This function configures all queues from primary process information + * if necessary before reverting to the normal RX burst callback. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +static uint16_t +mlx4_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n) +{ + struct rxq *rxq = dpdk_rxq; + struct priv *priv = mlx4_secondary_data_setup(rxq->priv); + struct priv *primary_priv; + unsigned int index; + + if (priv == NULL) + return 0; + primary_priv = + mlx4_secondary_data[priv->dev->data->port_id].primary_priv; + /* Look for queue index in both private structures. */ + for (index = 0; index != priv->rxqs_n; ++index) + if (((*primary_priv->rxqs)[index] == rxq) || + ((*priv->rxqs)[index] == rxq)) + break; + if (index == priv->rxqs_n) + return 0; + rxq = (*priv->rxqs)[index]; + return priv->dev->rx_pkt_burst(rxq, pkts, pkts_n); +} + +/** + * Allocate a Queue Pair. + * Optionally setup inline receive if supported. + * + * @param priv + * Pointer to private structure. + * @param cq + * Completion queue to associate with QP. + * @param desc + * Number of descriptors in QP (hint only). + * + * @return + * QP pointer or NULL in case of error. + */ +static struct ibv_qp * +rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc, + struct ibv_exp_res_domain *rd) +{ + struct ibv_exp_qp_init_attr attr = { + /* CQ to be associated with the send queue. */ + .send_cq = cq, + /* CQ to be associated with the receive queue. */ + .recv_cq = cq, + .cap = { + /* Max number of outstanding WRs. */ + .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ? + priv->device_attr.max_qp_wr : + desc), + /* Max number of scatter/gather elements in a WR. */ + .max_recv_sge = ((priv->device_attr.max_sge < + MLX4_PMD_SGE_WR_N) ? + priv->device_attr.max_sge : + MLX4_PMD_SGE_WR_N), + }, + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD | + IBV_EXP_QP_INIT_ATTR_RES_DOMAIN), + .pd = priv->pd, + .res_domain = rd, + }; + +#ifdef INLINE_RECV + attr.max_inl_recv = priv->inl_recv_size; + attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_INL_RECV; +#endif + return ibv_exp_create_qp(priv->ctx, &attr); +} + +#ifdef RSS_SUPPORT + +/** + * Allocate a RSS Queue Pair. + * Optionally setup inline receive if supported. + * + * @param priv + * Pointer to private structure. + * @param cq + * Completion queue to associate with QP. + * @param desc + * Number of descriptors in QP (hint only). + * @param parent + * If nonzero, create a parent QP, otherwise a child. + * + * @return + * QP pointer or NULL in case of error. + */ +static struct ibv_qp * +rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc, + int parent, struct ibv_exp_res_domain *rd) +{ + struct ibv_exp_qp_init_attr attr = { + /* CQ to be associated with the send queue. */ + .send_cq = cq, + /* CQ to be associated with the receive queue. */ + .recv_cq = cq, + .cap = { + /* Max number of outstanding WRs. */ + .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ? + priv->device_attr.max_qp_wr : + desc), + /* Max number of scatter/gather elements in a WR. */ + .max_recv_sge = ((priv->device_attr.max_sge < + MLX4_PMD_SGE_WR_N) ? + priv->device_attr.max_sge : + MLX4_PMD_SGE_WR_N), + }, + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD | + IBV_EXP_QP_INIT_ATTR_RES_DOMAIN | + IBV_EXP_QP_INIT_ATTR_QPG), + .pd = priv->pd, + .res_domain = rd, + }; + +#ifdef INLINE_RECV + attr.max_inl_recv = priv->inl_recv_size, + attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_INL_RECV; +#endif + if (parent) { + attr.qpg.qpg_type = IBV_EXP_QPG_PARENT; + /* TSS isn't necessary. */ + attr.qpg.parent_attrib.tss_child_count = 0; + attr.qpg.parent_attrib.rss_child_count = + rte_align32pow2(priv->rxqs_n + 1) >> 1; + DEBUG("initializing parent RSS queue"); + } else { + attr.qpg.qpg_type = IBV_EXP_QPG_CHILD_RX; + attr.qpg.qpg_parent = priv->rxq_parent.qp; + DEBUG("initializing child RSS queue"); + } + return ibv_exp_create_qp(priv->ctx, &attr); +} + +#endif /* RSS_SUPPORT */ + +/** + * Reconfigure a RX queue with new parameters. + * + * rxq_rehash() does not allocate mbufs, which, if not done from the right + * thread (such as a control thread), may corrupt the pool. + * In case of failure, the queue is left untouched. + * + * @param dev + * Pointer to Ethernet device structure. + * @param rxq + * RX queue pointer. + * + * @return + * 0 on success, errno value on failure. + */ +static int +rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) +{ + struct priv *priv = rxq->priv; + struct rxq tmpl = *rxq; + unsigned int mbuf_n; + unsigned int desc_n; + struct rte_mbuf **pool; + unsigned int i, k; + struct ibv_exp_qp_attr mod; + struct ibv_recv_wr *bad_wr; + int err; + int parent = (rxq == &priv->rxq_parent); + + if (parent) { + ERROR("%p: cannot rehash parent queue %p", + (void *)dev, (void *)rxq); + return EINVAL; + } + DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq); + /* Number of descriptors and mbufs currently allocated. */ + desc_n = (tmpl.elts_n * (tmpl.sp ? MLX4_PMD_SGE_WR_N : 1)); + mbuf_n = desc_n; + /* Toggle RX checksum offload if hardware supports it. */ + if (priv->hw_csum) { + tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; + rxq->csum = tmpl.csum; + } + if (priv->hw_csum_l2tun) { + tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum; + rxq->csum_l2tun = tmpl.csum_l2tun; + } + /* Enable scattered packets support for this queue if necessary. */ + if ((dev->data->dev_conf.rxmode.jumbo_frame) && + (dev->data->dev_conf.rxmode.max_rx_pkt_len > + (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) { + tmpl.sp = 1; + desc_n /= MLX4_PMD_SGE_WR_N; + } else + tmpl.sp = 0; + DEBUG("%p: %s scattered packets support (%u WRs)", + (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc_n); + /* If scatter mode is the same as before, nothing to do. */ + if (tmpl.sp == rxq->sp) { + DEBUG("%p: nothing to do", (void *)dev); + return 0; + } + /* Remove attached flows if RSS is disabled (no parent queue). */ + if (!priv->rss) { + rxq_allmulticast_disable(&tmpl); + rxq_promiscuous_disable(&tmpl); + rxq_mac_addrs_del(&tmpl); + /* Update original queue in case of failure. */ + rxq->allmulti_flow = tmpl.allmulti_flow; + rxq->promisc_flow = tmpl.promisc_flow; + memcpy(rxq->mac_configured, tmpl.mac_configured, + sizeof(rxq->mac_configured)); + memcpy(rxq->mac_flow, tmpl.mac_flow, sizeof(rxq->mac_flow)); + } + /* From now on, any failure will render the queue unusable. + * Reinitialize QP. */ + mod = (struct ibv_exp_qp_attr){ .qp_state = IBV_QPS_RESET }; + err = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE); + if (err) { + ERROR("%p: cannot reset QP: %s", (void *)dev, strerror(err)); + assert(err > 0); + return err; + } + err = ibv_resize_cq(tmpl.cq, desc_n); + if (err) { + ERROR("%p: cannot resize CQ: %s", (void *)dev, strerror(err)); + assert(err > 0); + return err; + } + mod = (struct ibv_exp_qp_attr){ + /* Move the QP to this state. */ + .qp_state = IBV_QPS_INIT, + /* Primary port number. */ + .port_num = priv->port + }; + err = ibv_exp_modify_qp(tmpl.qp, &mod, + (IBV_EXP_QP_STATE | +#ifdef RSS_SUPPORT + (parent ? IBV_EXP_QP_GROUP_RSS : 0) | +#endif /* RSS_SUPPORT */ + IBV_EXP_QP_PORT)); + if (err) { + ERROR("%p: QP state to IBV_QPS_INIT failed: %s", + (void *)dev, strerror(err)); + assert(err > 0); + return err; + }; + /* Reconfigure flows. Do not care for errors. */ + if (!priv->rss) { + rxq_mac_addrs_add(&tmpl); + if (priv->promisc) + rxq_promiscuous_enable(&tmpl); + if (priv->allmulti) + rxq_allmulticast_enable(&tmpl); + /* Update original queue in case of failure. */ + rxq->allmulti_flow = tmpl.allmulti_flow; + rxq->promisc_flow = tmpl.promisc_flow; + memcpy(rxq->mac_configured, tmpl.mac_configured, + sizeof(rxq->mac_configured)); + memcpy(rxq->mac_flow, tmpl.mac_flow, sizeof(rxq->mac_flow)); + } + /* Allocate pool. */ + pool = rte_malloc(__func__, (mbuf_n * sizeof(*pool)), 0); + if (pool == NULL) { + ERROR("%p: cannot allocate memory", (void *)dev); + return ENOBUFS; + } + /* Snatch mbufs from original queue. */ + k = 0; + if (rxq->sp) { + struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp; + + for (i = 0; (i != elemof(*elts)); ++i) { + struct rxq_elt_sp *elt = &(*elts)[i]; + unsigned int j; + + for (j = 0; (j != elemof(elt->bufs)); ++j) { + assert(elt->bufs[j] != NULL); + pool[k++] = elt->bufs[j]; + } + } + } else { + struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp; + + for (i = 0; (i != elemof(*elts)); ++i) { + struct rxq_elt *elt = &(*elts)[i]; + struct rte_mbuf *buf = (void *) + ((uintptr_t)elt->sge.addr - + WR_ID(elt->wr.wr_id).offset); + + assert(WR_ID(elt->wr.wr_id).id == i); + pool[k++] = buf; + } + } + assert(k == mbuf_n); + tmpl.elts_n = 0; + tmpl.elts.sp = NULL; + assert((void *)&tmpl.elts.sp == (void *)&tmpl.elts.no_sp); + err = ((tmpl.sp) ? + rxq_alloc_elts_sp(&tmpl, desc_n, pool) : + rxq_alloc_elts(&tmpl, desc_n, pool)); + if (err) { + ERROR("%p: cannot reallocate WRs, aborting", (void *)dev); + rte_free(pool); + assert(err > 0); + return err; + } + assert(tmpl.elts_n == desc_n); + assert(tmpl.elts.sp != NULL); + rte_free(pool); + /* Clean up original data. */ + rxq->elts_n = 0; + rte_free(rxq->elts.sp); + rxq->elts.sp = NULL; + /* Post WRs. */ + err = ibv_post_recv(tmpl.qp, + (tmpl.sp ? + &(*tmpl.elts.sp)[0].wr : + &(*tmpl.elts.no_sp)[0].wr), + &bad_wr); + if (err) { + ERROR("%p: ibv_post_recv() failed for WR %p: %s", + (void *)dev, + (void *)bad_wr, + strerror(err)); + goto skip_rtr; + } + mod = (struct ibv_exp_qp_attr){ + .qp_state = IBV_QPS_RTR + }; + err = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE); + if (err) + ERROR("%p: QP state to IBV_QPS_RTR failed: %s", + (void *)dev, strerror(err)); +skip_rtr: + *rxq = tmpl; + assert(err >= 0); + return err; +} + +/** + * Configure a RX queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param rxq + * Pointer to RX queue structure. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param inactive + * If true, the queue is disabled because its index is higher or + * equal to the real number of queues, which must be a power of 2. + * @param[in] conf + * Thresholds parameters. + * @param mp + * Memory pool for buffer allocations. + * + * @return + * 0 on success, errno value on failure. + */ +static int +rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, + unsigned int socket, int inactive, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) +{ + struct priv *priv = dev->data->dev_private; + struct rxq tmpl = { + .priv = priv, + .mp = mp, + .socket = socket + }; + struct ibv_exp_qp_attr mod; + union { + struct ibv_exp_query_intf_params params; + struct ibv_exp_cq_init_attr cq; + struct ibv_exp_res_domain_init_attr rd; + } attr; + enum ibv_exp_query_intf_status status; + struct ibv_recv_wr *bad_wr; + struct rte_mbuf *buf; + int ret = 0; + int parent = (rxq == &priv->rxq_parent); + + (void)conf; /* Thresholds configuration (ignored). */ + /* + * If this is a parent queue, hardware must support RSS and + * RSS must be enabled. + */ + assert((!parent) || ((priv->hw_rss) && (priv->rss))); + if (parent) { + /* Even if unused, ibv_create_cq() requires at least one + * descriptor. */ + desc = 1; + goto skip_mr; + } + if ((desc == 0) || (desc % MLX4_PMD_SGE_WR_N)) { + ERROR("%p: invalid number of RX descriptors (must be a" + " multiple of %d)", (void *)dev, MLX4_PMD_SGE_WR_N); + return EINVAL; + } + /* Get mbuf length. */ + buf = rte_pktmbuf_alloc(mp); + if (buf == NULL) { + ERROR("%p: unable to allocate mbuf", (void *)dev); + return ENOMEM; + } + tmpl.mb_len = buf->buf_len; + assert((rte_pktmbuf_headroom(buf) + + rte_pktmbuf_tailroom(buf)) == tmpl.mb_len); + assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM); + rte_pktmbuf_free(buf); + /* Toggle RX checksum offload if hardware supports it. */ + if (priv->hw_csum) + tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; + if (priv->hw_csum_l2tun) + tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum; + /* Enable scattered packets support for this queue if necessary. */ + if ((dev->data->dev_conf.rxmode.jumbo_frame) && + (dev->data->dev_conf.rxmode.max_rx_pkt_len > + (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) { + tmpl.sp = 1; + desc /= MLX4_PMD_SGE_WR_N; + } + DEBUG("%p: %s scattered packets support (%u WRs)", + (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc); + /* Use the entire RX mempool as the memory region. */ + tmpl.mr = mlx4_mp2mr(priv->pd, mp); + if (tmpl.mr == NULL) { + ret = EINVAL; + ERROR("%p: MR creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } +skip_mr: + attr.rd = (struct ibv_exp_res_domain_init_attr){ + .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | + IBV_EXP_RES_DOMAIN_MSG_MODEL), + .thread_model = IBV_EXP_THREAD_SINGLE, + .msg_model = IBV_EXP_MSG_HIGH_BW, + }; + tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd); + if (tmpl.rd == NULL) { + ret = ENOMEM; + ERROR("%p: RD creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } + attr.cq = (struct ibv_exp_cq_init_attr){ + .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, + .res_domain = tmpl.rd, + }; + tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq); + if (tmpl.cq == NULL) { + ret = ENOMEM; + ERROR("%p: CQ creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } + DEBUG("priv->device_attr.max_qp_wr is %d", + priv->device_attr.max_qp_wr); + DEBUG("priv->device_attr.max_sge is %d", + priv->device_attr.max_sge); +#ifdef RSS_SUPPORT + if (priv->rss && !inactive) + tmpl.qp = rxq_setup_qp_rss(priv, tmpl.cq, desc, parent, + tmpl.rd); + else +#endif /* RSS_SUPPORT */ + tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc, tmpl.rd); + if (tmpl.qp == NULL) { + ret = (errno ? errno : EINVAL); + ERROR("%p: QP creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } + mod = (struct ibv_exp_qp_attr){ + /* Move the QP to this state. */ + .qp_state = IBV_QPS_INIT, + /* Primary port number. */ + .port_num = priv->port + }; + ret = ibv_exp_modify_qp(tmpl.qp, &mod, + (IBV_EXP_QP_STATE | +#ifdef RSS_SUPPORT + (parent ? IBV_EXP_QP_GROUP_RSS : 0) | +#endif /* RSS_SUPPORT */ + IBV_EXP_QP_PORT)); + if (ret) { + ERROR("%p: QP state to IBV_QPS_INIT failed: %s", + (void *)dev, strerror(ret)); + goto error; + } + if ((parent) || (!priv->rss)) { + /* Configure MAC and broadcast addresses. */ + ret = rxq_mac_addrs_add(&tmpl); + if (ret) { + ERROR("%p: QP flow attachment failed: %s", + (void *)dev, strerror(ret)); + goto error; + } + } + /* Allocate descriptors for RX queues, except for the RSS parent. */ + if (parent) + goto skip_alloc; + if (tmpl.sp) + ret = rxq_alloc_elts_sp(&tmpl, desc, NULL); + else + ret = rxq_alloc_elts(&tmpl, desc, NULL); + if (ret) { + ERROR("%p: RXQ allocation failed: %s", + (void *)dev, strerror(ret)); + goto error; + } + ret = ibv_post_recv(tmpl.qp, + (tmpl.sp ? + &(*tmpl.elts.sp)[0].wr : + &(*tmpl.elts.no_sp)[0].wr), + &bad_wr); + if (ret) { + ERROR("%p: ibv_post_recv() failed for WR %p: %s", + (void *)dev, + (void *)bad_wr, + strerror(ret)); + goto error; + } +skip_alloc: + mod = (struct ibv_exp_qp_attr){ + .qp_state = IBV_QPS_RTR + }; + ret = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE); + if (ret) { + ERROR("%p: QP state to IBV_QPS_RTR failed: %s", + (void *)dev, strerror(ret)); + goto error; + } + /* Save port ID. */ + tmpl.port_id = dev->data->port_id; + DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id); + attr.params = (struct ibv_exp_query_intf_params){ + .intf_scope = IBV_EXP_INTF_GLOBAL, + .intf = IBV_EXP_INTF_CQ, + .obj = tmpl.cq, + }; + tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); + if (tmpl.if_cq == NULL) { + ERROR("%p: CQ interface family query failed with status %d", + (void *)dev, status); + goto error; + } + attr.params = (struct ibv_exp_query_intf_params){ + .intf_scope = IBV_EXP_INTF_GLOBAL, + .intf = IBV_EXP_INTF_QP_BURST, + .obj = tmpl.qp, + }; + tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status); + if (tmpl.if_qp == NULL) { + ERROR("%p: QP interface family query failed with status %d", + (void *)dev, status); + goto error; + } + /* Clean up rxq in case we're reinitializing it. */ + DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq); + rxq_cleanup(rxq); + *rxq = tmpl; + DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl); + assert(ret == 0); + return 0; +error: + rxq_cleanup(&tmpl); + assert(ret > 0); + return ret; +} + +/** + * DPDK callback to configure a RX queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * @param mp + * Memory pool for buffer allocations. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) +{ + struct priv *priv = dev->data->dev_private; + struct rxq *rxq = (*priv->rxqs)[idx]; + int inactive = 0; + int ret; + + if (mlx4_is_secondary()) + return -E_RTE_SECONDARY; + priv_lock(priv); + DEBUG("%p: configuring queue %u for %u descriptors", + (void *)dev, idx, desc); + if (idx >= priv->rxqs_n) { + ERROR("%p: queue index out of range (%u >= %u)", + (void *)dev, idx, priv->rxqs_n); + priv_unlock(priv); + return -EOVERFLOW; + } + if (rxq != NULL) { + DEBUG("%p: reusing already allocated queue index %u (%p)", + (void *)dev, idx, (void *)rxq); + if (priv->started) { + priv_unlock(priv); + return -EEXIST; + } + (*priv->rxqs)[idx] = NULL; + rxq_cleanup(rxq); + } else { + rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket); + if (rxq == NULL) { + ERROR("%p: unable to allocate queue index %u", + (void *)dev, idx); + priv_unlock(priv); + return -ENOMEM; + } + } + if (idx >= rte_align32pow2(priv->rxqs_n + 1) >> 1) + inactive = 1; + ret = rxq_setup(dev, rxq, desc, socket, inactive, conf, mp); + if (ret) + rte_free(rxq); + else { + rxq->stats.idx = idx; + DEBUG("%p: adding RX queue %p to list", + (void *)dev, (void *)rxq); + (*priv->rxqs)[idx] = rxq; + /* Update receive callback. */ + if (rxq->sp) + dev->rx_pkt_burst = mlx4_rx_burst_sp; + else + dev->rx_pkt_burst = mlx4_rx_burst; + } + priv_unlock(priv); + return -ret; +} + +/** + * DPDK callback to release a RX queue. + * + * @param dpdk_rxq + * Generic RX queue pointer. + */ +static void +mlx4_rx_queue_release(void *dpdk_rxq) +{ + struct rxq *rxq = (struct rxq *)dpdk_rxq; + struct priv *priv; + unsigned int i; + + if (mlx4_is_secondary()) + return; + if (rxq == NULL) + return; + priv = rxq->priv; + priv_lock(priv); + assert(rxq != &priv->rxq_parent); + for (i = 0; (i != priv->rxqs_n); ++i) + if ((*priv->rxqs)[i] == rxq) { + DEBUG("%p: removing RX queue %p from list", + (void *)priv->dev, (void *)rxq); + (*priv->rxqs)[i] = NULL; + break; + } + rxq_cleanup(rxq); + rte_free(rxq); + priv_unlock(priv); +} + +static void +priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *); + +/** + * DPDK callback to start the device. + * + * Simulate device start by attaching all configured flows. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mlx4_dev_start(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + unsigned int i = 0; + unsigned int r; + struct rxq *rxq; + + if (mlx4_is_secondary()) + return -E_RTE_SECONDARY; + priv_lock(priv); + if (priv->started) { + priv_unlock(priv); + return 0; + } + DEBUG("%p: attaching configured flows to all RX queues", (void *)dev); + priv->started = 1; + if (priv->rss) { + rxq = &priv->rxq_parent; + r = 1; + } else { + rxq = (*priv->rxqs)[0]; + r = priv->rxqs_n; + } + /* Iterate only once when RSS is enabled. */ + do { + int ret; + + /* Ignore nonexistent RX queues. */ + if (rxq == NULL) + continue; + ret = rxq_mac_addrs_add(rxq); + if (!ret && priv->promisc) + ret = rxq_promiscuous_enable(rxq); + if (!ret && priv->allmulti) + ret = rxq_allmulticast_enable(rxq); + if (!ret) + continue; + WARN("%p: QP flow attachment failed: %s", + (void *)dev, strerror(ret)); + /* Rollback. */ + while (i != 0) { + rxq = (*priv->rxqs)[--i]; + if (rxq != NULL) { + rxq_allmulticast_disable(rxq); + rxq_promiscuous_disable(rxq); + rxq_mac_addrs_del(rxq); + } + } + priv->started = 0; + priv_unlock(priv); + return -ret; + } while ((--r) && ((rxq = (*priv->rxqs)[++i]), i)); + priv_dev_interrupt_handler_install(priv, dev); + priv_unlock(priv); + return 0; +} + +/** + * DPDK callback to stop the device. + * + * Simulate device stop by detaching all configured flows. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx4_dev_stop(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + unsigned int i = 0; + unsigned int r; + struct rxq *rxq; + + if (mlx4_is_secondary()) + return; + priv_lock(priv); + if (!priv->started) { + priv_unlock(priv); + return; + } + DEBUG("%p: detaching flows from all RX queues", (void *)dev); + priv->started = 0; + if (priv->rss) { + rxq = &priv->rxq_parent; + r = 1; + } else { + rxq = (*priv->rxqs)[0]; + r = priv->rxqs_n; + } + /* Iterate only once when RSS is enabled. */ + do { + /* Ignore nonexistent RX queues. */ + if (rxq == NULL) + continue; + rxq_allmulticast_disable(rxq); + rxq_promiscuous_disable(rxq); + rxq_mac_addrs_del(rxq); + } while ((--r) && ((rxq = (*priv->rxqs)[++i]), i)); + priv_unlock(priv); +} + +/** + * Dummy DPDK callback for TX. + * + * This function is used to temporarily replace the real callback during + * unsafe control operations on the queue, or in case of error. + * + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +static uint16_t +removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + (void)dpdk_txq; + (void)pkts; + (void)pkts_n; + return 0; +} + +/** + * Dummy DPDK callback for RX. + * + * This function is used to temporarily replace the real callback during + * unsafe control operations on the queue, or in case of error. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +static uint16_t +removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + (void)dpdk_rxq; + (void)pkts; + (void)pkts_n; + return 0; +} + +static void +priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *); + +/** + * DPDK callback to close the device. + * + * Destroy all queues and objects, free memory. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx4_dev_close(struct rte_eth_dev *dev) +{ + struct priv *priv = mlx4_get_priv(dev); + void *tmp; + unsigned int i; + + if (priv == NULL) + return; + priv_lock(priv); + DEBUG("%p: closing device \"%s\"", + (void *)dev, + ((priv->ctx != NULL) ? priv->ctx->device->name : "")); + /* Prevent crashes when queues are still in use. This is unfortunately + * still required for DPDK 1.3 because some programs (such as testpmd) + * never release them before closing the device. */ + dev->rx_pkt_burst = removed_rx_burst; + dev->tx_pkt_burst = removed_tx_burst; + if (priv->rxqs != NULL) { + /* XXX race condition if mlx4_rx_burst() is still running. */ + usleep(1000); + for (i = 0; (i != priv->rxqs_n); ++i) { + tmp = (*priv->rxqs)[i]; + if (tmp == NULL) + continue; + (*priv->rxqs)[i] = NULL; + rxq_cleanup(tmp); + rte_free(tmp); + } + priv->rxqs_n = 0; + priv->rxqs = NULL; + } + if (priv->txqs != NULL) { + /* XXX race condition if mlx4_tx_burst() is still running. */ + usleep(1000); + for (i = 0; (i != priv->txqs_n); ++i) { + tmp = (*priv->txqs)[i]; + if (tmp == NULL) + continue; + (*priv->txqs)[i] = NULL; + txq_cleanup(tmp); + rte_free(tmp); + } + priv->txqs_n = 0; + priv->txqs = NULL; + } + if (priv->rss) + rxq_cleanup(&priv->rxq_parent); + if (priv->pd != NULL) { + assert(priv->ctx != NULL); + claim_zero(ibv_dealloc_pd(priv->pd)); + claim_zero(ibv_close_device(priv->ctx)); + } else + assert(priv->ctx == NULL); + priv_dev_interrupt_handler_uninstall(priv, dev); + priv_unlock(priv); + memset(priv, 0, sizeof(*priv)); +} + +/** + * DPDK callback to get information about the device. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] info + * Info structure output buffer. + */ +static void +mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) +{ + struct priv *priv = mlx4_get_priv(dev); + unsigned int max; + char ifname[IF_NAMESIZE]; + + if (priv == NULL) + return; + priv_lock(priv); + /* FIXME: we should ask the device for these values. */ + info->min_rx_bufsize = 32; + info->max_rx_pktlen = 65536; + /* + * Since we need one CQ per QP, the limit is the minimum number + * between the two values. + */ + max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ? + priv->device_attr.max_qp : priv->device_attr.max_cq); + /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */ + if (max >= 65535) + max = 65535; + info->max_rx_queues = max; + info->max_tx_queues = max; + /* Last array entry is reserved for broadcast. */ + info->max_mac_addrs = (elemof(priv->mac) - 1); + info->rx_offload_capa = + (priv->hw_csum ? + (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM) : + 0); + info->tx_offload_capa = + (priv->hw_csum ? + (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM) : + 0); + if (priv_get_ifname(priv, &ifname) == 0) + info->if_index = if_nametoindex(ifname); + info->speed_capa = + ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_20G | + ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_56G; + priv_unlock(priv); +} + +static const uint32_t * +mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* refers to rxq_cq_to_pkt_type() */ + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV4, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == mlx4_rx_burst || + dev->rx_pkt_burst == mlx4_rx_burst_sp) + return ptypes; + return NULL; +} + +/** + * DPDK callback to get device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] stats + * Stats structure output buffer. + */ +static void +mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct priv *priv = mlx4_get_priv(dev); + struct rte_eth_stats tmp = {0}; + unsigned int i; + unsigned int idx; + + if (priv == NULL) + return; + priv_lock(priv); + /* Add software counters. */ + for (i = 0; (i != priv->rxqs_n); ++i) { + struct rxq *rxq = (*priv->rxqs)[i]; + + if (rxq == NULL) + continue; + idx = rxq->stats.idx; + if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { +#ifdef MLX4_PMD_SOFT_COUNTERS + tmp.q_ipackets[idx] += rxq->stats.ipackets; + tmp.q_ibytes[idx] += rxq->stats.ibytes; +#endif + tmp.q_errors[idx] += (rxq->stats.idropped + + rxq->stats.rx_nombuf); + } +#ifdef MLX4_PMD_SOFT_COUNTERS + tmp.ipackets += rxq->stats.ipackets; + tmp.ibytes += rxq->stats.ibytes; +#endif + tmp.ierrors += rxq->stats.idropped; + tmp.rx_nombuf += rxq->stats.rx_nombuf; + } + for (i = 0; (i != priv->txqs_n); ++i) { + struct txq *txq = (*priv->txqs)[i]; + + if (txq == NULL) + continue; + idx = txq->stats.idx; + if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { +#ifdef MLX4_PMD_SOFT_COUNTERS + tmp.q_opackets[idx] += txq->stats.opackets; + tmp.q_obytes[idx] += txq->stats.obytes; +#endif + tmp.q_errors[idx] += txq->stats.odropped; + } +#ifdef MLX4_PMD_SOFT_COUNTERS + tmp.opackets += txq->stats.opackets; + tmp.obytes += txq->stats.obytes; +#endif + tmp.oerrors += txq->stats.odropped; + } +#ifndef MLX4_PMD_SOFT_COUNTERS + /* FIXME: retrieve and add hardware counters. */ +#endif + *stats = tmp; + priv_unlock(priv); +} + +/** + * DPDK callback to clear device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx4_stats_reset(struct rte_eth_dev *dev) +{ + struct priv *priv = mlx4_get_priv(dev); + unsigned int i; + unsigned int idx; + + if (priv == NULL) + return; + priv_lock(priv); + for (i = 0; (i != priv->rxqs_n); ++i) { + if ((*priv->rxqs)[i] == NULL) + continue; + idx = (*priv->rxqs)[i]->stats.idx; + (*priv->rxqs)[i]->stats = + (struct mlx4_rxq_stats){ .idx = idx }; + } + for (i = 0; (i != priv->txqs_n); ++i) { + if ((*priv->txqs)[i] == NULL) + continue; + idx = (*priv->txqs)[i]->stats.idx; + (*priv->txqs)[i]->stats = + (struct mlx4_txq_stats){ .idx = idx }; + } +#ifndef MLX4_PMD_SOFT_COUNTERS + /* FIXME: reset hardware counters. */ +#endif + priv_unlock(priv); +} + +/** + * DPDK callback to remove a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param index + * MAC address index. + */ +static void +mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct priv *priv = dev->data->dev_private; + + if (mlx4_is_secondary()) + return; + priv_lock(priv); + DEBUG("%p: removing MAC address from index %" PRIu32, + (void *)dev, index); + /* Last array entry is reserved for broadcast. */ + if (index >= (elemof(priv->mac) - 1)) + goto end; + priv_mac_addr_del(priv, index); +end: + priv_unlock(priv); +} + +/** + * DPDK callback to add a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * @param index + * MAC address index. + * @param vmdq + * VMDq pool index to associate address with (ignored). + */ +static void +mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t vmdq) +{ + struct priv *priv = dev->data->dev_private; + + if (mlx4_is_secondary()) + return; + (void)vmdq; + priv_lock(priv); + DEBUG("%p: adding MAC address at index %" PRIu32, + (void *)dev, index); + /* Last array entry is reserved for broadcast. */ + if (index >= (elemof(priv->mac) - 1)) + goto end; + priv_mac_addr_add(priv, index, + (const uint8_t (*)[ETHER_ADDR_LEN]) + mac_addr->addr_bytes); +end: + priv_unlock(priv); +} + +/** + * DPDK callback to set the primary MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + */ +static void +mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) +{ + DEBUG("%p: setting primary MAC address", (void *)dev); + mlx4_mac_addr_remove(dev, 0); + mlx4_mac_addr_add(dev, mac_addr, 0, 0); +} + +/** + * DPDK callback to enable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx4_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + unsigned int i; + int ret; + + if (mlx4_is_secondary()) + return; + priv_lock(priv); + if (priv->promisc) { + priv_unlock(priv); + return; + } + /* If device isn't started, this is all we need to do. */ + if (!priv->started) + goto end; + if (priv->rss) { + ret = rxq_promiscuous_enable(&priv->rxq_parent); + if (ret) { + priv_unlock(priv); + return; + } + goto end; + } + for (i = 0; (i != priv->rxqs_n); ++i) { + if ((*priv->rxqs)[i] == NULL) + continue; + ret = rxq_promiscuous_enable((*priv->rxqs)[i]); + if (!ret) + continue; + /* Failure, rollback. */ + while (i != 0) + if ((*priv->rxqs)[--i] != NULL) + rxq_promiscuous_disable((*priv->rxqs)[i]); + priv_unlock(priv); + return; + } +end: + priv->promisc = 1; + priv_unlock(priv); +} + +/** + * DPDK callback to disable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx4_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + unsigned int i; + + if (mlx4_is_secondary()) + return; + priv_lock(priv); + if (!priv->promisc) { + priv_unlock(priv); + return; + } + if (priv->rss) { + rxq_promiscuous_disable(&priv->rxq_parent); + goto end; + } + for (i = 0; (i != priv->rxqs_n); ++i) + if ((*priv->rxqs)[i] != NULL) + rxq_promiscuous_disable((*priv->rxqs)[i]); +end: + priv->promisc = 0; + priv_unlock(priv); +} + +/** + * DPDK callback to enable allmulti mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx4_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + unsigned int i; + int ret; + + if (mlx4_is_secondary()) + return; + priv_lock(priv); + if (priv->allmulti) { + priv_unlock(priv); + return; + } + /* If device isn't started, this is all we need to do. */ + if (!priv->started) + goto end; + if (priv->rss) { + ret = rxq_allmulticast_enable(&priv->rxq_parent); + if (ret) { + priv_unlock(priv); + return; + } + goto end; + } + for (i = 0; (i != priv->rxqs_n); ++i) { + if ((*priv->rxqs)[i] == NULL) + continue; + ret = rxq_allmulticast_enable((*priv->rxqs)[i]); + if (!ret) + continue; + /* Failure, rollback. */ + while (i != 0) + if ((*priv->rxqs)[--i] != NULL) + rxq_allmulticast_disable((*priv->rxqs)[i]); + priv_unlock(priv); + return; + } +end: + priv->allmulti = 1; + priv_unlock(priv); +} + +/** + * DPDK callback to disable allmulti mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx4_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + unsigned int i; + + if (mlx4_is_secondary()) + return; + priv_lock(priv); + if (!priv->allmulti) { + priv_unlock(priv); + return; + } + if (priv->rss) { + rxq_allmulticast_disable(&priv->rxq_parent); + goto end; + } + for (i = 0; (i != priv->rxqs_n); ++i) + if ((*priv->rxqs)[i] != NULL) + rxq_allmulticast_disable((*priv->rxqs)[i]); +end: + priv->allmulti = 0; + priv_unlock(priv); +} + +/** + * DPDK callback to retrieve physical link information (unlocked version). + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion (ignored). + */ +static int +mlx4_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct priv *priv = mlx4_get_priv(dev); + struct ethtool_cmd edata = { + .cmd = ETHTOOL_GSET + }; + struct ifreq ifr; + struct rte_eth_link dev_link; + int link_speed = 0; + + if (priv == NULL) + return -EINVAL; + (void)wait_to_complete; + if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { + WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); + return -1; + } + memset(&dev_link, 0, sizeof(dev_link)); + dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && + (ifr.ifr_flags & IFF_RUNNING)); + ifr.ifr_data = &edata; + if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", + strerror(errno)); + return -1; + } + link_speed = ethtool_cmd_speed(&edata); + if (link_speed == -1) + dev_link.link_speed = 0; + else + dev_link.link_speed = link_speed; + dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? + ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); + dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) { + /* Link status changed. */ + dev->data->dev_link = dev_link; + return 0; + } + /* Link status is still the same. */ + return -1; +} + +/** + * DPDK callback to retrieve physical link information. + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion (ignored). + */ +static int +mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct priv *priv = mlx4_get_priv(dev); + int ret; + + if (priv == NULL) + return -EINVAL; + priv_lock(priv); + ret = mlx4_link_update_unlocked(dev, wait_to_complete); + priv_unlock(priv); + return ret; +} + +/** + * DPDK callback to change the MTU. + * + * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be + * received). Use this as a hint to enable/disable scattered packets support + * and improve performance when not needed. + * Since failure is not an option, reconfiguring queues on the fly is not + * recommended. + * + * @param dev + * Pointer to Ethernet device structure. + * @param in_mtu + * New MTU. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct priv *priv = dev->data->dev_private; + int ret = 0; + unsigned int i; + uint16_t (*rx_func)(void *, struct rte_mbuf **, uint16_t) = + mlx4_rx_burst; + + if (mlx4_is_secondary()) + return -E_RTE_SECONDARY; + priv_lock(priv); + /* Set kernel interface MTU first. */ + if (priv_set_mtu(priv, mtu)) { + ret = errno; + WARN("cannot set port %u MTU to %u: %s", priv->port, mtu, + strerror(ret)); + goto out; + } else + DEBUG("adapter port %u MTU set to %u", priv->port, mtu); + priv->mtu = mtu; + /* Temporarily replace RX handler with a fake one, assuming it has not + * been copied elsewhere. */ + dev->rx_pkt_burst = removed_rx_burst; + /* Make sure everyone has left mlx4_rx_burst() and uses + * removed_rx_burst() instead. */ + rte_wmb(); + usleep(1000); + /* Reconfigure each RX queue. */ + for (i = 0; (i != priv->rxqs_n); ++i) { + struct rxq *rxq = (*priv->rxqs)[i]; + unsigned int max_frame_len; + int sp; + + if (rxq == NULL) + continue; + /* Calculate new maximum frame length according to MTU and + * toggle scattered support (sp) if necessary. */ + max_frame_len = (priv->mtu + ETHER_HDR_LEN + + (ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN)); + sp = (max_frame_len > (rxq->mb_len - RTE_PKTMBUF_HEADROOM)); + /* Provide new values to rxq_setup(). */ + dev->data->dev_conf.rxmode.jumbo_frame = sp; + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len; + ret = rxq_rehash(dev, rxq); + if (ret) { + /* Force SP RX if that queue requires it and abort. */ + if (rxq->sp) + rx_func = mlx4_rx_burst_sp; + break; + } + /* Reenable non-RSS queue attributes. No need to check + * for errors at this stage. */ + if (!priv->rss) { + rxq_mac_addrs_add(rxq); + if (priv->promisc) + rxq_promiscuous_enable(rxq); + if (priv->allmulti) + rxq_allmulticast_enable(rxq); + } + /* Scattered burst function takes priority. */ + if (rxq->sp) + rx_func = mlx4_rx_burst_sp; + } + /* Burst functions can now be called again. */ + rte_wmb(); + dev->rx_pkt_burst = rx_func; +out: + priv_unlock(priv); + assert(ret >= 0); + return -ret; +} + +/** + * DPDK callback to get flow control status. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] fc_conf + * Flow control output buffer. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mlx4_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct priv *priv = dev->data->dev_private; + struct ifreq ifr; + struct ethtool_pauseparam ethpause = { + .cmd = ETHTOOL_GPAUSEPARAM + }; + int ret; + + if (mlx4_is_secondary()) + return -E_RTE_SECONDARY; + ifr.ifr_data = ðpause; + priv_lock(priv); + if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + ret = errno; + WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)" + " failed: %s", + strerror(ret)); + goto out; + } + + fc_conf->autoneg = ethpause.autoneg; + if (ethpause.rx_pause && ethpause.tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (ethpause.rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (ethpause.tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + ret = 0; + +out: + priv_unlock(priv); + assert(ret >= 0); + return -ret; +} + +/** + * DPDK callback to modify flow control parameters. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[in] fc_conf + * Flow control parameters. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mlx4_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct priv *priv = dev->data->dev_private; + struct ifreq ifr; + struct ethtool_pauseparam ethpause = { + .cmd = ETHTOOL_SPAUSEPARAM + }; + int ret; + + if (mlx4_is_secondary()) + return -E_RTE_SECONDARY; + ifr.ifr_data = ðpause; + ethpause.autoneg = fc_conf->autoneg; + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_RX_PAUSE)) + ethpause.rx_pause = 1; + else + ethpause.rx_pause = 0; + + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_TX_PAUSE)) + ethpause.tx_pause = 1; + else + ethpause.tx_pause = 0; + + priv_lock(priv); + if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + ret = errno; + WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" + " failed: %s", + strerror(ret)); + goto out; + } + ret = 0; + +out: + priv_unlock(priv); + assert(ret >= 0); + return -ret; +} + +/** + * Configure a VLAN filter. + * + * @param dev + * Pointer to Ethernet device structure. + * @param vlan_id + * VLAN ID to filter. + * @param on + * Toggle filter. + * + * @return + * 0 on success, errno value on failure. + */ +static int +vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct priv *priv = dev->data->dev_private; + unsigned int i; + unsigned int j = -1; + + DEBUG("%p: %s VLAN filter ID %" PRIu16, + (void *)dev, (on ? "enable" : "disable"), vlan_id); + for (i = 0; (i != elemof(priv->vlan_filter)); ++i) { + if (!priv->vlan_filter[i].enabled) { + /* Unused index, remember it. */ + j = i; + continue; + } + if (priv->vlan_filter[i].id != vlan_id) + continue; + /* This VLAN ID is already known, use its index. */ + j = i; + break; + } + /* Check if there's room for another VLAN filter. */ + if (j == (unsigned int)-1) + return ENOMEM; + /* + * VLAN filters apply to all configured MAC addresses, flow + * specifications must be reconfigured accordingly. + */ + priv->vlan_filter[j].id = vlan_id; + if ((on) && (!priv->vlan_filter[j].enabled)) { + /* + * Filter is disabled, enable it. + * Rehashing flows in all RX queues is necessary. + */ + if (priv->rss) + rxq_mac_addrs_del(&priv->rxq_parent); + else + for (i = 0; (i != priv->rxqs_n); ++i) + if ((*priv->rxqs)[i] != NULL) + rxq_mac_addrs_del((*priv->rxqs)[i]); + priv->vlan_filter[j].enabled = 1; + if (priv->started) { + if (priv->rss) + rxq_mac_addrs_add(&priv->rxq_parent); + else + for (i = 0; (i != priv->rxqs_n); ++i) { + if ((*priv->rxqs)[i] == NULL) + continue; + rxq_mac_addrs_add((*priv->rxqs)[i]); + } + } + } else if ((!on) && (priv->vlan_filter[j].enabled)) { + /* + * Filter is enabled, disable it. + * Rehashing flows in all RX queues is necessary. + */ + if (priv->rss) + rxq_mac_addrs_del(&priv->rxq_parent); + else + for (i = 0; (i != priv->rxqs_n); ++i) + if ((*priv->rxqs)[i] != NULL) + rxq_mac_addrs_del((*priv->rxqs)[i]); + priv->vlan_filter[j].enabled = 0; + if (priv->started) { + if (priv->rss) + rxq_mac_addrs_add(&priv->rxq_parent); + else + for (i = 0; (i != priv->rxqs_n); ++i) { + if ((*priv->rxqs)[i] == NULL) + continue; + rxq_mac_addrs_add((*priv->rxqs)[i]); + } + } + } + return 0; +} + +/** + * DPDK callback to configure a VLAN filter. + * + * @param dev + * Pointer to Ethernet device structure. + * @param vlan_id + * VLAN ID to filter. + * @param on + * Toggle filter. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct priv *priv = dev->data->dev_private; + int ret; + + if (mlx4_is_secondary()) + return -E_RTE_SECONDARY; + priv_lock(priv); + ret = vlan_filter_set(dev, vlan_id, on); + priv_unlock(priv); + assert(ret >= 0); + return -ret; +} + +static const struct eth_dev_ops mlx4_dev_ops = { + .dev_configure = mlx4_dev_configure, + .dev_start = mlx4_dev_start, + .dev_stop = mlx4_dev_stop, + .dev_close = mlx4_dev_close, + .promiscuous_enable = mlx4_promiscuous_enable, + .promiscuous_disable = mlx4_promiscuous_disable, + .allmulticast_enable = mlx4_allmulticast_enable, + .allmulticast_disable = mlx4_allmulticast_disable, + .link_update = mlx4_link_update, + .stats_get = mlx4_stats_get, + .stats_reset = mlx4_stats_reset, + .queue_stats_mapping_set = NULL, + .dev_infos_get = mlx4_dev_infos_get, + .dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get, + .vlan_filter_set = mlx4_vlan_filter_set, + .vlan_tpid_set = NULL, + .vlan_strip_queue_set = NULL, + .vlan_offload_set = NULL, + .rx_queue_setup = mlx4_rx_queue_setup, + .tx_queue_setup = mlx4_tx_queue_setup, + .rx_queue_release = mlx4_rx_queue_release, + .tx_queue_release = mlx4_tx_queue_release, + .dev_led_on = NULL, + .dev_led_off = NULL, + .flow_ctrl_get = mlx4_dev_get_flow_ctrl, + .flow_ctrl_set = mlx4_dev_set_flow_ctrl, + .priority_flow_ctrl_set = NULL, + .mac_addr_remove = mlx4_mac_addr_remove, + .mac_addr_add = mlx4_mac_addr_add, + .mac_addr_set = mlx4_mac_addr_set, + .mtu_set = mlx4_dev_set_mtu, +}; + +/** + * Get PCI information from struct ibv_device. + * + * @param device + * Pointer to Ethernet device structure. + * @param[out] pci_addr + * PCI bus address output buffer. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +mlx4_ibv_device_to_pci_addr(const struct ibv_device *device, + struct rte_pci_addr *pci_addr) +{ + FILE *file; + char line[32]; + MKSTR(path, "%s/device/uevent", device->ibdev_path); + + file = fopen(path, "rb"); + if (file == NULL) + return -1; + while (fgets(line, sizeof(line), file) == line) { + size_t len = strlen(line); + int ret; + + /* Truncate long lines. */ + if (len == (sizeof(line) - 1)) + while (line[(len - 1)] != '\n') { + ret = fgetc(file); + if (ret == EOF) + break; + line[(len - 1)] = ret; + } + /* Extract information. */ + if (sscanf(line, + "PCI_SLOT_NAME=" + "%" SCNx16 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n", + &pci_addr->domain, + &pci_addr->bus, + &pci_addr->devid, + &pci_addr->function) == 4) { + ret = 0; + break; + } + } + fclose(file); + return 0; +} + +/** + * Get MAC address by querying netdevice. + * + * @param[in] priv + * struct priv for the requested device. + * @param[out] mac + * MAC address output buffer. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]) +{ + struct ifreq request; + + if (priv_ifreq(priv, SIOCGIFHWADDR, &request)) + return -1; + memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN); + return 0; +} + +/* Support up to 32 adapters. */ +static struct { + struct rte_pci_addr pci_addr; /* associated PCI address */ + uint32_t ports; /* physical ports bitfield. */ +} mlx4_dev[32]; + +/** + * Get device index in mlx4_dev[] from PCI bus address. + * + * @param[in] pci_addr + * PCI bus address to look for. + * + * @return + * mlx4_dev[] index on success, -1 on failure. + */ +static int +mlx4_dev_idx(struct rte_pci_addr *pci_addr) +{ + unsigned int i; + int ret = -1; + + assert(pci_addr != NULL); + for (i = 0; (i != elemof(mlx4_dev)); ++i) { + if ((mlx4_dev[i].pci_addr.domain == pci_addr->domain) && + (mlx4_dev[i].pci_addr.bus == pci_addr->bus) && + (mlx4_dev[i].pci_addr.devid == pci_addr->devid) && + (mlx4_dev[i].pci_addr.function == pci_addr->function)) + return i; + if ((mlx4_dev[i].ports == 0) && (ret == -1)) + ret = i; + } + return ret; +} + +/** + * Retrieve integer value from environment variable. + * + * @param[in] name + * Environment variable name. + * + * @return + * Integer value, 0 if the variable is not set. + */ +static int +mlx4_getenv_int(const char *name) +{ + const char *val = getenv(name); + + if (val == NULL) + return 0; + return atoi(val); +} + +static void +mlx4_dev_link_status_handler(void *); +static void +mlx4_dev_interrupt_handler(struct rte_intr_handle *, void *); + +/** + * Link status handler. + * + * @param priv + * Pointer to private structure. + * @param dev + * Pointer to the rte_eth_dev structure. + * + * @return + * Nonzero if the callback process can be called immediately. + */ +static int +priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev) +{ + struct ibv_async_event event; + int port_change = 0; + int ret = 0; + + /* Read all message and acknowledge them. */ + for (;;) { + if (ibv_get_async_event(priv->ctx, &event)) + break; + + if (event.event_type == IBV_EVENT_PORT_ACTIVE || + event.event_type == IBV_EVENT_PORT_ERR) + port_change = 1; + else + DEBUG("event type %d on port %d not handled", + event.event_type, event.element.port_num); + ibv_ack_async_event(&event); + } + + if (port_change ^ priv->pending_alarm) { + struct rte_eth_link *link = &dev->data->dev_link; + + priv->pending_alarm = 0; + mlx4_link_update_unlocked(dev, 0); + if (((link->link_speed == 0) && link->link_status) || + ((link->link_speed != 0) && !link->link_status)) { + /* Inconsistent status, check again later. */ + priv->pending_alarm = 1; + rte_eal_alarm_set(MLX4_ALARM_TIMEOUT_US, + mlx4_dev_link_status_handler, + dev); + } else + ret = 1; + } + return ret; +} + +/** + * Handle delayed link status event. + * + * @param arg + * Registered argument. + */ +static void +mlx4_dev_link_status_handler(void *arg) +{ + struct rte_eth_dev *dev = arg; + struct priv *priv = dev->data->dev_private; + int ret; + + priv_lock(priv); + assert(priv->pending_alarm == 1); + ret = priv_dev_link_status_handler(priv, dev); + priv_unlock(priv); + if (ret) + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); +} + +/** + * Handle interrupts from the NIC. + * + * @param[in] intr_handle + * Interrupt handler. + * @param cb_arg + * Callback argument. + */ +static void +mlx4_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg) +{ + struct rte_eth_dev *dev = cb_arg; + struct priv *priv = dev->data->dev_private; + int ret; + + (void)intr_handle; + priv_lock(priv); + ret = priv_dev_link_status_handler(priv, dev); + priv_unlock(priv); + if (ret) + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); +} + +/** + * Uninstall interrupt handler. + * + * @param priv + * Pointer to private structure. + * @param dev + * Pointer to the rte_eth_dev structure. + */ +static void +priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) +{ + if (!dev->data->dev_conf.intr_conf.lsc) + return; + rte_intr_callback_unregister(&priv->intr_handle, + mlx4_dev_interrupt_handler, + dev); + if (priv->pending_alarm) + rte_eal_alarm_cancel(mlx4_dev_link_status_handler, dev); + priv->pending_alarm = 0; + priv->intr_handle.fd = 0; + priv->intr_handle.type = 0; +} + +/** + * Install interrupt handler. + * + * @param priv + * Pointer to private structure. + * @param dev + * Pointer to the rte_eth_dev structure. + */ +static void +priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) +{ + int rc, flags; + + if (!dev->data->dev_conf.intr_conf.lsc) + return; + assert(priv->ctx->async_fd > 0); + flags = fcntl(priv->ctx->async_fd, F_GETFL); + rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); + if (rc < 0) { + INFO("failed to change file descriptor async event queue"); + dev->data->dev_conf.intr_conf.lsc = 0; + } else { + priv->intr_handle.fd = priv->ctx->async_fd; + priv->intr_handle.type = RTE_INTR_HANDLE_EXT; + rte_intr_callback_register(&priv->intr_handle, + mlx4_dev_interrupt_handler, + dev); + } +} + +static struct eth_driver mlx4_driver; + +/** + * DPDK callback to register a PCI device. + * + * This function creates an Ethernet device for each port of a given + * PCI device. + * + * @param[in] pci_drv + * PCI driver structure (mlx4_driver). + * @param[in] pci_dev + * PCI device information. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mlx4_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + struct ibv_device **list; + struct ibv_device *ibv_dev; + int err = 0; + struct ibv_context *attr_ctx = NULL; + struct ibv_device_attr device_attr; + unsigned int vf; + int idx; + int i; + + (void)pci_drv; + assert(pci_drv == &mlx4_driver.pci_drv); + /* Get mlx4_dev[] index. */ + idx = mlx4_dev_idx(&pci_dev->addr); + if (idx == -1) { + ERROR("this driver cannot support any more adapters"); + return -ENOMEM; + } + DEBUG("using driver device index %d", idx); + + /* Save PCI address. */ + mlx4_dev[idx].pci_addr = pci_dev->addr; + list = ibv_get_device_list(&i); + if (list == NULL) { + assert(errno); + if (errno == ENOSYS) { + WARN("cannot list devices, is ib_uverbs loaded?"); + return 0; + } + return -errno; + } + assert(i >= 0); + /* + * For each listed device, check related sysfs entry against + * the provided PCI ID. + */ + while (i != 0) { + struct rte_pci_addr pci_addr; + + --i; + DEBUG("checking device \"%s\"", list[i]->name); + if (mlx4_ibv_device_to_pci_addr(list[i], &pci_addr)) + continue; + if ((pci_dev->addr.domain != pci_addr.domain) || + (pci_dev->addr.bus != pci_addr.bus) || + (pci_dev->addr.devid != pci_addr.devid) || + (pci_dev->addr.function != pci_addr.function)) + continue; + vf = (pci_dev->id.device_id == + PCI_DEVICE_ID_MELLANOX_CONNECTX3VF); + INFO("PCI information matches, using device \"%s\" (VF: %s)", + list[i]->name, (vf ? "true" : "false")); + attr_ctx = ibv_open_device(list[i]); + err = errno; + break; + } + if (attr_ctx == NULL) { + ibv_free_device_list(list); + switch (err) { + case 0: + WARN("cannot access device, is mlx4_ib loaded?"); + return 0; + case EINVAL: + WARN("cannot use device, are drivers up to date?"); + return 0; + } + assert(err > 0); + return -err; + } + ibv_dev = list[i]; + + DEBUG("device opened"); + if (ibv_query_device(attr_ctx, &device_attr)) + goto error; + INFO("%u port(s) detected", device_attr.phys_port_cnt); + + for (i = 0; i < device_attr.phys_port_cnt; i++) { + uint32_t port = i + 1; /* ports are indexed from one */ + uint32_t test = (1 << i); + struct ibv_context *ctx = NULL; + struct ibv_port_attr port_attr; + struct ibv_pd *pd = NULL; + struct priv *priv = NULL; + struct rte_eth_dev *eth_dev = NULL; +#ifdef HAVE_EXP_QUERY_DEVICE + struct ibv_exp_device_attr exp_device_attr; +#endif /* HAVE_EXP_QUERY_DEVICE */ + struct ether_addr mac; + +#ifdef HAVE_EXP_QUERY_DEVICE + exp_device_attr.comp_mask = IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS; +#ifdef RSS_SUPPORT + exp_device_attr.comp_mask |= IBV_EXP_DEVICE_ATTR_RSS_TBL_SZ; +#endif /* RSS_SUPPORT */ +#endif /* HAVE_EXP_QUERY_DEVICE */ + + DEBUG("using port %u (%08" PRIx32 ")", port, test); + + ctx = ibv_open_device(ibv_dev); + if (ctx == NULL) + goto port_error; + + /* Check port status. */ + err = ibv_query_port(ctx, port, &port_attr); + if (err) { + ERROR("port query failed: %s", strerror(err)); + goto port_error; + } + + if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { + ERROR("port %d is not configured in Ethernet mode", + port); + goto port_error; + } + + if (port_attr.state != IBV_PORT_ACTIVE) + DEBUG("port %d is not active: \"%s\" (%d)", + port, ibv_port_state_str(port_attr.state), + port_attr.state); + + /* Allocate protection domain. */ + pd = ibv_alloc_pd(ctx); + if (pd == NULL) { + ERROR("PD allocation failure"); + err = ENOMEM; + goto port_error; + } + + mlx4_dev[idx].ports |= test; + + /* from rte_ethdev.c */ + priv = rte_zmalloc("ethdev private structure", + sizeof(*priv), + RTE_CACHE_LINE_SIZE); + if (priv == NULL) { + ERROR("priv allocation failure"); + err = ENOMEM; + goto port_error; + } + + priv->ctx = ctx; + priv->device_attr = device_attr; + priv->port = port; + priv->pd = pd; + priv->mtu = ETHER_MTU; +#ifdef HAVE_EXP_QUERY_DEVICE + if (ibv_exp_query_device(ctx, &exp_device_attr)) { + ERROR("ibv_exp_query_device() failed"); + goto port_error; + } +#ifdef RSS_SUPPORT + if ((exp_device_attr.exp_device_cap_flags & + IBV_EXP_DEVICE_QPG) && + (exp_device_attr.exp_device_cap_flags & + IBV_EXP_DEVICE_UD_RSS) && + (exp_device_attr.comp_mask & + IBV_EXP_DEVICE_ATTR_RSS_TBL_SZ) && + (exp_device_attr.max_rss_tbl_sz > 0)) { + priv->hw_qpg = 1; + priv->hw_rss = 1; + priv->max_rss_tbl_sz = exp_device_attr.max_rss_tbl_sz; + } else { + priv->hw_qpg = 0; + priv->hw_rss = 0; + priv->max_rss_tbl_sz = 0; + } + priv->hw_tss = !!(exp_device_attr.exp_device_cap_flags & + IBV_EXP_DEVICE_UD_TSS); + DEBUG("device flags: %s%s%s", + (priv->hw_qpg ? "IBV_DEVICE_QPG " : ""), + (priv->hw_tss ? "IBV_DEVICE_TSS " : ""), + (priv->hw_rss ? "IBV_DEVICE_RSS " : "")); + if (priv->hw_rss) + DEBUG("maximum RSS indirection table size: %u", + exp_device_attr.max_rss_tbl_sz); +#endif /* RSS_SUPPORT */ + + priv->hw_csum = + ((exp_device_attr.exp_device_cap_flags & + IBV_EXP_DEVICE_RX_CSUM_TCP_UDP_PKT) && + (exp_device_attr.exp_device_cap_flags & + IBV_EXP_DEVICE_RX_CSUM_IP_PKT)); + DEBUG("checksum offloading is %ssupported", + (priv->hw_csum ? "" : "not ")); + + priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags & + IBV_EXP_DEVICE_VXLAN_SUPPORT); + DEBUG("L2 tunnel checksum offloads are %ssupported", + (priv->hw_csum_l2tun ? "" : "not ")); + +#ifdef INLINE_RECV + priv->inl_recv_size = mlx4_getenv_int("MLX4_INLINE_RECV_SIZE"); + + if (priv->inl_recv_size) { + exp_device_attr.comp_mask = + IBV_EXP_DEVICE_ATTR_INLINE_RECV_SZ; + if (ibv_exp_query_device(ctx, &exp_device_attr)) { + INFO("Couldn't query device for inline-receive" + " capabilities."); + priv->inl_recv_size = 0; + } else { + if ((unsigned)exp_device_attr.inline_recv_sz < + priv->inl_recv_size) { + INFO("Max inline-receive (%d) <" + " requested inline-receive (%u)", + exp_device_attr.inline_recv_sz, + priv->inl_recv_size); + priv->inl_recv_size = + exp_device_attr.inline_recv_sz; + } + } + INFO("Set inline receive size to %u", + priv->inl_recv_size); + } +#endif /* INLINE_RECV */ +#endif /* HAVE_EXP_QUERY_DEVICE */ + + (void)mlx4_getenv_int; + priv->vf = vf; + /* Configure the first MAC address by default. */ + if (priv_get_mac(priv, &mac.addr_bytes)) { + ERROR("cannot get MAC address, is mlx4_en loaded?" + " (errno: %s)", strerror(errno)); + goto port_error; + } + INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", + priv->port, + mac.addr_bytes[0], mac.addr_bytes[1], + mac.addr_bytes[2], mac.addr_bytes[3], + mac.addr_bytes[4], mac.addr_bytes[5]); + /* Register MAC and broadcast addresses. */ + claim_zero(priv_mac_addr_add(priv, 0, + (const uint8_t (*)[ETHER_ADDR_LEN]) + mac.addr_bytes)); + claim_zero(priv_mac_addr_add(priv, (elemof(priv->mac) - 1), + &(const uint8_t [ETHER_ADDR_LEN]) + { "\xff\xff\xff\xff\xff\xff" })); +#ifndef NDEBUG + { + char ifname[IF_NAMESIZE]; + + if (priv_get_ifname(priv, &ifname) == 0) + DEBUG("port %u ifname is \"%s\"", + priv->port, ifname); + else + DEBUG("port %u ifname is unknown", priv->port); + } +#endif + /* Get actual MTU if possible. */ + priv_get_mtu(priv, &priv->mtu); + DEBUG("port %u MTU is %u", priv->port, priv->mtu); + + /* from rte_ethdev.c */ + { + char name[RTE_ETH_NAME_MAX_LEN]; + + snprintf(name, sizeof(name), "%s port %u", + ibv_get_device_name(ibv_dev), port); + eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_PCI); + } + if (eth_dev == NULL) { + ERROR("can not allocate rte ethdev"); + err = ENOMEM; + goto port_error; + } + + /* Secondary processes have to use local storage for their + * private data as well as a copy of eth_dev->data, but this + * pointer must not be modified before burst functions are + * actually called. */ + if (mlx4_is_secondary()) { + struct mlx4_secondary_data *sd = + &mlx4_secondary_data[eth_dev->data->port_id]; + + sd->primary_priv = eth_dev->data->dev_private; + if (sd->primary_priv == NULL) { + ERROR("no private data for port %u", + eth_dev->data->port_id); + err = EINVAL; + goto port_error; + } + sd->shared_dev_data = eth_dev->data; + rte_spinlock_init(&sd->lock); + memcpy(sd->data.name, sd->shared_dev_data->name, + sizeof(sd->data.name)); + sd->data.dev_private = priv; + sd->data.rx_mbuf_alloc_failed = 0; + sd->data.mtu = ETHER_MTU; + sd->data.port_id = sd->shared_dev_data->port_id; + sd->data.mac_addrs = priv->mac; + eth_dev->tx_pkt_burst = mlx4_tx_burst_secondary_setup; + eth_dev->rx_pkt_burst = mlx4_rx_burst_secondary_setup; + } else { + eth_dev->data->dev_private = priv; + eth_dev->data->rx_mbuf_alloc_failed = 0; + eth_dev->data->mtu = ETHER_MTU; + eth_dev->data->mac_addrs = priv->mac; + } + eth_dev->pci_dev = pci_dev; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + eth_dev->driver = &mlx4_driver; + + priv->dev = eth_dev; + eth_dev->dev_ops = &mlx4_dev_ops; + TAILQ_INIT(ð_dev->link_intr_cbs); + + /* Bring Ethernet device up. */ + DEBUG("forcing Ethernet interface up"); + priv_set_flags(priv, ~IFF_UP, IFF_UP); + continue; + +port_error: + rte_free(priv); + if (pd) + claim_zero(ibv_dealloc_pd(pd)); + if (ctx) + claim_zero(ibv_close_device(ctx)); + if (eth_dev) + rte_eth_dev_release_port(eth_dev); + break; + } + + /* + * XXX if something went wrong in the loop above, there is a resource + * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as + * long as the dpdk does not provide a way to deallocate a ethdev and a + * way to enumerate the registered ethdevs to free the previous ones. + */ + + /* no port found, complain */ + if (!mlx4_dev[idx].ports) { + err = ENODEV; + goto error; + } + +error: + if (attr_ctx) + claim_zero(ibv_close_device(attr_ctx)); + if (list) + ibv_free_device_list(list); + assert(err >= 0); + return -err; +} + +static const struct rte_pci_id mlx4_pci_id_map[] = { + { + .vendor_id = PCI_VENDOR_ID_MELLANOX, + .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX3, + .subsystem_vendor_id = PCI_ANY_ID, + .subsystem_device_id = PCI_ANY_ID + }, + { + .vendor_id = PCI_VENDOR_ID_MELLANOX, + .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO, + .subsystem_vendor_id = PCI_ANY_ID, + .subsystem_device_id = PCI_ANY_ID + }, + { + .vendor_id = PCI_VENDOR_ID_MELLANOX, + .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX3VF, + .subsystem_vendor_id = PCI_ANY_ID, + .subsystem_device_id = PCI_ANY_ID + }, + { + .vendor_id = 0 + } +}; + +static struct eth_driver mlx4_driver = { + .pci_drv = { + .name = MLX4_DRIVER_NAME, + .id_table = mlx4_pci_id_map, + .devinit = mlx4_pci_devinit, + .drv_flags = RTE_PCI_DRV_INTR_LSC, + }, + .dev_private_size = sizeof(struct priv) +}; + +/** + * Driver initialization routine. + */ +static int +rte_mlx4_pmd_init(const char *name, const char *args) +{ + (void)name; + (void)args; + + RTE_BUILD_BUG_ON(sizeof(wr_id_t) != sizeof(uint64_t)); + /* + * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use + * huge pages. Calling ibv_fork_init() during init allows + * applications to use fork() safely for purposes other than + * using this PMD, which is not supported in forked processes. + */ + setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); + ibv_fork_init(); + rte_eal_pci_register(&mlx4_driver.pci_drv); + return 0; +} + +static struct rte_driver rte_mlx4_driver = { + .type = PMD_PDEV, + .name = MLX4_DRIVER_NAME, + .init = rte_mlx4_pmd_init, +}; + +PMD_REGISTER_DRIVER(rte_mlx4_driver) diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h new file mode 100644 index 00000000..d0c7bc29 --- /dev/null +++ b/drivers/net/mlx4/mlx4.h @@ -0,0 +1,163 @@ +/*- + * BSD LICENSE + * + * Copyright 2012-2015 6WIND S.A. + * Copyright 2012 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RTE_PMD_MLX4_H_ +#define RTE_PMD_MLX4_H_ + +#include <stddef.h> +#include <stdint.h> +#include <limits.h> + +/* + * Maximum number of simultaneous MAC addresses supported. + * + * According to ConnectX's Programmer Reference Manual: + * The L2 Address Match is implemented by comparing a MAC/VLAN combination + * of 128 MAC addresses and 127 VLAN values, comprising 128x127 possible + * L2 addresses. + */ +#define MLX4_MAX_MAC_ADDRESSES 128 + +/* Maximum number of simultaneous VLAN filters supported. See above. */ +#define MLX4_MAX_VLAN_IDS 127 + +/* Request send completion once in every 64 sends, might be less. */ +#define MLX4_PMD_TX_PER_COMP_REQ 64 + +/* Maximum number of Scatter/Gather Elements per Work Request. */ +#ifndef MLX4_PMD_SGE_WR_N +#define MLX4_PMD_SGE_WR_N 4 +#endif + +/* Maximum size for inline data. */ +#ifndef MLX4_PMD_MAX_INLINE +#define MLX4_PMD_MAX_INLINE 0 +#endif + +/* + * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP + * from which buffers are to be transmitted will have to be mapped by this + * driver to their own Memory Region (MR). This is a slow operation. + * + * This value is always 1 for RX queues. + */ +#ifndef MLX4_PMD_TX_MP_CACHE +#define MLX4_PMD_TX_MP_CACHE 8 +#endif + +/* + * If defined, only use software counters. The PMD will never ask the hardware + * for these, and many of them won't be available. + */ +#ifndef MLX4_PMD_SOFT_COUNTERS +#define MLX4_PMD_SOFT_COUNTERS 1 +#endif + +/* Alarm timeout. */ +#define MLX4_ALARM_TIMEOUT_US 100000 + +enum { + PCI_VENDOR_ID_MELLANOX = 0x15b3, +}; + +enum { + PCI_DEVICE_ID_MELLANOX_CONNECTX3 = 0x1003, + PCI_DEVICE_ID_MELLANOX_CONNECTX3VF = 0x1004, + PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO = 0x1007, +}; + +#define MLX4_DRIVER_NAME "librte_pmd_mlx4" + +/* Bit-field manipulation. */ +#define BITFIELD_DECLARE(bf, type, size) \ + type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \ + !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))] +#define BITFIELD_DEFINE(bf, type, size) \ + BITFIELD_DECLARE((bf), type, (size)) = { 0 } +#define BITFIELD_SET(bf, b) \ + (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \ + (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \ + ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))) +#define BITFIELD_RESET(bf, b) \ + (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \ + (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \ + ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))) +#define BITFIELD_ISSET(bf, b) \ + (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \ + !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \ + ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))) + +/* Number of elements in array. */ +#define elemof(a) (sizeof(a) / sizeof((a)[0])) + +/* Cast pointer p to structure member m to its parent structure of type t. */ +#define containerof(p, t, m) ((t *)((uint8_t *)(p) - offsetof(t, m))) + +/* Branch prediction helpers. */ +#ifndef likely +#define likely(c) __builtin_expect(!!(c), 1) +#endif +#ifndef unlikely +#define unlikely(c) __builtin_expect(!!(c), 0) +#endif + +/* Debugging */ +#ifndef NDEBUG +#include <stdio.h> +#define DEBUG__(m, ...) \ + (fprintf(stderr, "%s:%d: %s(): " m "%c", \ + __FILE__, __LINE__, __func__, __VA_ARGS__), \ + fflush(stderr), \ + (void)0) +/* + * Save/restore errno around DEBUG__(). + * XXX somewhat undefined behavior, but works. + */ +#define DEBUG_(...) \ + (errno = ((int []){ \ + *(volatile int *)&errno, \ + (DEBUG__(__VA_ARGS__), 0) \ + })[0]) +#define DEBUG(...) DEBUG_(__VA_ARGS__, '\n') +#define claim_zero(...) assert((__VA_ARGS__) == 0) +#define claim_nonzero(...) assert((__VA_ARGS__) != 0) +#define claim_positive(...) assert((__VA_ARGS__) >= 0) +#else /* NDEBUG */ +/* No-ops. */ +#define DEBUG(...) (void)0 +#define claim_zero(...) (__VA_ARGS__) +#define claim_nonzero(...) (__VA_ARGS__) +#define claim_positive(...) (__VA_ARGS__) +#endif /* NDEBUG */ + +#endif /* RTE_PMD_MLX4_H_ */ diff --git a/drivers/net/mlx4/rte_pmd_mlx4_version.map b/drivers/net/mlx4/rte_pmd_mlx4_version.map new file mode 100644 index 00000000..ef353984 --- /dev/null +++ b/drivers/net/mlx4/rte_pmd_mlx4_version.map @@ -0,0 +1,4 @@ +DPDK_2.0 { + + local: *; +}; diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile new file mode 100644 index 00000000..92bfa070 --- /dev/null +++ b/drivers/net/mlx5/Makefile @@ -0,0 +1,155 @@ +# BSD LICENSE +# +# Copyright 2015 6WIND S.A. +# Copyright 2015 Mellanox. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of 6WIND S.A. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# Library name. +LIB = librte_pmd_mlx5.a + +# Sources. +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxq.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_txq.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_trigger.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mac.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxmode.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_vlan.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_fdir.c + +# Dependencies. +DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_mempool + +# Basic CFLAGS. +CFLAGS += -O3 +CFLAGS += -std=gnu99 -Wall -Wextra +CFLAGS += -g +CFLAGS += -I. +CFLAGS += -D_XOPEN_SOURCE=600 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -Wno-strict-prototypes +LDLIBS += -libverbs + +# A few warnings cannot be avoided in external headers. +CFLAGS += -Wno-error=cast-qual + +EXPORT_MAP := rte_pmd_mlx5_version.map +LIBABIVER := 1 + +# DEBUG which is usually provided on the command-line may enable +# CONFIG_RTE_LIBRTE_MLX5_DEBUG. +ifeq ($(DEBUG),1) +CONFIG_RTE_LIBRTE_MLX5_DEBUG := y +endif + +# User-defined CFLAGS. +ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DEBUG),y) +CFLAGS += -pedantic -UNDEBUG -DPEDANTIC +else +CFLAGS += -DNDEBUG -UPEDANTIC +endif + +ifdef CONFIG_RTE_LIBRTE_MLX5_SGE_WR_N +CFLAGS += -DMLX5_PMD_SGE_WR_N=$(CONFIG_RTE_LIBRTE_MLX5_SGE_WR_N) +endif + +ifdef CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE +CFLAGS += -DMLX5_PMD_MAX_INLINE=$(CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE) +endif + +ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE +CFLAGS += -DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE) +endif + +include $(RTE_SDK)/mk/rte.lib.mk + +# Generate and clean-up mlx5_autoconf.h. + +export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS +export AUTO_CONFIG_CFLAGS = -Wno-error + +ifndef V +AUTOCONF_OUTPUT := >/dev/null +endif + +mlx5_autoconf.h: $(RTE_SDK)/scripts/auto-config-h.sh + $Q $(RM) -f -- '$@' + $Q sh -- '$<' '$@' \ + HAVE_EXP_QUERY_DEVICE \ + infiniband/verbs.h \ + type 'struct ibv_exp_device_attr' $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_FLOW_SPEC_IPV6 \ + infiniband/verbs.h \ + type 'struct ibv_exp_flow_spec_ipv6' $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR \ + infiniband/verbs.h \ + enum IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS \ + infiniband/verbs.h \ + enum IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_EXP_CQ_RX_TCP_PACKET \ + infiniband/verbs.h \ + enum IBV_EXP_CQ_RX_TCP_PACKET \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_VERBS_FCS \ + infiniband/verbs.h \ + enum IBV_EXP_CREATE_WQ_FLAG_SCATTER_FCS \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_VERBS_RX_END_PADDING \ + infiniband/verbs.h \ + enum IBV_EXP_CREATE_WQ_FLAG_RX_END_PADDING \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_VERBS_VLAN_INSERTION \ + infiniband/verbs.h \ + enum IBV_EXP_RECEIVE_WQ_CVLAN_INSERTION \ + $(AUTOCONF_OUTPUT) + +$(SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD):.c=.o): mlx5_autoconf.h + +clean_mlx5: FORCE + $Q rm -f -- mlx5_autoconf.h + +clean: clean_mlx5 diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c new file mode 100644 index 00000000..041cfc33 --- /dev/null +++ b/drivers/net/mlx5/mlx5.c @@ -0,0 +1,675 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stddef.h> +#include <unistd.h> +#include <string.h> +#include <assert.h> +#include <stdint.h> +#include <stdlib.h> +#include <net/if.h> + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <infiniband/verbs.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_malloc.h> +#include <rte_ethdev.h> +#include <rte_pci.h> +#include <rte_common.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5.h" +#include "mlx5_utils.h" +#include "mlx5_rxtx.h" +#include "mlx5_autoconf.h" +#include "mlx5_defs.h" + +/** + * Retrieve integer value from environment variable. + * + * @param[in] name + * Environment variable name. + * + * @return + * Integer value, 0 if the variable is not set. + */ +int +mlx5_getenv_int(const char *name) +{ + const char *val = getenv(name); + + if (val == NULL) + return 0; + return atoi(val); +} + +/** + * DPDK callback to close the device. + * + * Destroy all queues and objects, free memory. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mlx5_dev_close(struct rte_eth_dev *dev) +{ + struct priv *priv = mlx5_get_priv(dev); + void *tmp; + unsigned int i; + + priv_lock(priv); + DEBUG("%p: closing device \"%s\"", + (void *)dev, + ((priv->ctx != NULL) ? priv->ctx->device->name : "")); + /* In case mlx5_dev_stop() has not been called. */ + priv_dev_interrupt_handler_uninstall(priv, dev); + priv_special_flow_disable_all(priv); + priv_mac_addrs_disable(priv); + priv_destroy_hash_rxqs(priv); + + /* Remove flow director elements. */ + priv_fdir_disable(priv); + priv_fdir_delete_filters_list(priv); + + /* Prevent crashes when queues are still in use. */ + dev->rx_pkt_burst = removed_rx_burst; + dev->tx_pkt_burst = removed_tx_burst; + if (priv->rxqs != NULL) { + /* XXX race condition if mlx5_rx_burst() is still running. */ + usleep(1000); + for (i = 0; (i != priv->rxqs_n); ++i) { + tmp = (*priv->rxqs)[i]; + if (tmp == NULL) + continue; + (*priv->rxqs)[i] = NULL; + rxq_cleanup(tmp); + rte_free(tmp); + } + priv->rxqs_n = 0; + priv->rxqs = NULL; + } + if (priv->txqs != NULL) { + /* XXX race condition if mlx5_tx_burst() is still running. */ + usleep(1000); + for (i = 0; (i != priv->txqs_n); ++i) { + tmp = (*priv->txqs)[i]; + if (tmp == NULL) + continue; + (*priv->txqs)[i] = NULL; + txq_cleanup(tmp); + rte_free(tmp); + } + priv->txqs_n = 0; + priv->txqs = NULL; + } + if (priv->pd != NULL) { + assert(priv->ctx != NULL); + claim_zero(ibv_dealloc_pd(priv->pd)); + claim_zero(ibv_close_device(priv->ctx)); + } else + assert(priv->ctx == NULL); + if (priv->rss_conf != NULL) { + for (i = 0; (i != hash_rxq_init_n); ++i) + rte_free((*priv->rss_conf)[i]); + rte_free(priv->rss_conf); + } + if (priv->reta_idx != NULL) + rte_free(priv->reta_idx); + priv_unlock(priv); + memset(priv, 0, sizeof(*priv)); +} + +static const struct eth_dev_ops mlx5_dev_ops = { + .dev_configure = mlx5_dev_configure, + .dev_start = mlx5_dev_start, + .dev_stop = mlx5_dev_stop, + .dev_set_link_down = mlx5_set_link_down, + .dev_set_link_up = mlx5_set_link_up, + .dev_close = mlx5_dev_close, + .promiscuous_enable = mlx5_promiscuous_enable, + .promiscuous_disable = mlx5_promiscuous_disable, + .allmulticast_enable = mlx5_allmulticast_enable, + .allmulticast_disable = mlx5_allmulticast_disable, + .link_update = mlx5_link_update, + .stats_get = mlx5_stats_get, + .stats_reset = mlx5_stats_reset, + .dev_infos_get = mlx5_dev_infos_get, + .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, + .vlan_filter_set = mlx5_vlan_filter_set, + .rx_queue_setup = mlx5_rx_queue_setup, + .tx_queue_setup = mlx5_tx_queue_setup, + .rx_queue_release = mlx5_rx_queue_release, + .tx_queue_release = mlx5_tx_queue_release, + .flow_ctrl_get = mlx5_dev_get_flow_ctrl, + .flow_ctrl_set = mlx5_dev_set_flow_ctrl, + .mac_addr_remove = mlx5_mac_addr_remove, + .mac_addr_add = mlx5_mac_addr_add, + .mac_addr_set = mlx5_mac_addr_set, + .mtu_set = mlx5_dev_set_mtu, +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, + .vlan_offload_set = mlx5_vlan_offload_set, +#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + .reta_update = mlx5_dev_rss_reta_update, + .reta_query = mlx5_dev_rss_reta_query, + .rss_hash_update = mlx5_rss_hash_update, + .rss_hash_conf_get = mlx5_rss_hash_conf_get, +#ifdef MLX5_FDIR_SUPPORT + .filter_ctrl = mlx5_dev_filter_ctrl, +#endif /* MLX5_FDIR_SUPPORT */ +}; + +static struct { + struct rte_pci_addr pci_addr; /* associated PCI address */ + uint32_t ports; /* physical ports bitfield. */ +} mlx5_dev[32]; + +/** + * Get device index in mlx5_dev[] from PCI bus address. + * + * @param[in] pci_addr + * PCI bus address to look for. + * + * @return + * mlx5_dev[] index on success, -1 on failure. + */ +static int +mlx5_dev_idx(struct rte_pci_addr *pci_addr) +{ + unsigned int i; + int ret = -1; + + assert(pci_addr != NULL); + for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) { + if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) && + (mlx5_dev[i].pci_addr.bus == pci_addr->bus) && + (mlx5_dev[i].pci_addr.devid == pci_addr->devid) && + (mlx5_dev[i].pci_addr.function == pci_addr->function)) + return i; + if ((mlx5_dev[i].ports == 0) && (ret == -1)) + ret = i; + } + return ret; +} + +static struct eth_driver mlx5_driver; + +/** + * DPDK callback to register a PCI device. + * + * This function creates an Ethernet device for each port of a given + * PCI device. + * + * @param[in] pci_drv + * PCI driver structure (mlx5_driver). + * @param[in] pci_dev + * PCI device information. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + struct ibv_device **list; + struct ibv_device *ibv_dev; + int err = 0; + struct ibv_context *attr_ctx = NULL; + struct ibv_device_attr device_attr; + unsigned int vf; + unsigned int mps; + int idx; + int i; + + (void)pci_drv; + assert(pci_drv == &mlx5_driver.pci_drv); + /* Get mlx5_dev[] index. */ + idx = mlx5_dev_idx(&pci_dev->addr); + if (idx == -1) { + ERROR("this driver cannot support any more adapters"); + return -ENOMEM; + } + DEBUG("using driver device index %d", idx); + + /* Save PCI address. */ + mlx5_dev[idx].pci_addr = pci_dev->addr; + list = ibv_get_device_list(&i); + if (list == NULL) { + assert(errno); + if (errno == ENOSYS) { + WARN("cannot list devices, is ib_uverbs loaded?"); + return 0; + } + return -errno; + } + assert(i >= 0); + /* + * For each listed device, check related sysfs entry against + * the provided PCI ID. + */ + while (i != 0) { + struct rte_pci_addr pci_addr; + + --i; + DEBUG("checking device \"%s\"", list[i]->name); + if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr)) + continue; + if ((pci_dev->addr.domain != pci_addr.domain) || + (pci_dev->addr.bus != pci_addr.bus) || + (pci_dev->addr.devid != pci_addr.devid) || + (pci_dev->addr.function != pci_addr.function)) + continue; + vf = ((pci_dev->id.device_id == + PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) || + (pci_dev->id.device_id == + PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)); + /* Multi-packet send is only supported by ConnectX-4 Lx PF. */ + mps = (pci_dev->id.device_id == + PCI_DEVICE_ID_MELLANOX_CONNECTX4LX); + INFO("PCI information matches, using device \"%s\" (VF: %s," + " MPS: %s)", + list[i]->name, + vf ? "true" : "false", + mps ? "true" : "false"); + attr_ctx = ibv_open_device(list[i]); + err = errno; + break; + } + if (attr_ctx == NULL) { + ibv_free_device_list(list); + switch (err) { + case 0: + WARN("cannot access device, is mlx5_ib loaded?"); + return 0; + case EINVAL: + WARN("cannot use device, are drivers up to date?"); + return 0; + } + assert(err > 0); + return -err; + } + ibv_dev = list[i]; + + DEBUG("device opened"); + if (ibv_query_device(attr_ctx, &device_attr)) + goto error; + INFO("%u port(s) detected", device_attr.phys_port_cnt); + + for (i = 0; i < device_attr.phys_port_cnt; i++) { + uint32_t port = i + 1; /* ports are indexed from one */ + uint32_t test = (1 << i); + struct ibv_context *ctx = NULL; + struct ibv_port_attr port_attr; + struct ibv_pd *pd = NULL; + struct priv *priv = NULL; + struct rte_eth_dev *eth_dev; +#ifdef HAVE_EXP_QUERY_DEVICE + struct ibv_exp_device_attr exp_device_attr; +#endif /* HAVE_EXP_QUERY_DEVICE */ + struct ether_addr mac; + +#ifdef HAVE_EXP_QUERY_DEVICE + exp_device_attr.comp_mask = + IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS | + IBV_EXP_DEVICE_ATTR_RX_HASH | +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS | +#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ +#ifdef HAVE_VERBS_RX_END_PADDING + IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN | +#endif /* HAVE_VERBS_RX_END_PADDING */ + 0; +#endif /* HAVE_EXP_QUERY_DEVICE */ + + DEBUG("using port %u (%08" PRIx32 ")", port, test); + + ctx = ibv_open_device(ibv_dev); + if (ctx == NULL) + goto port_error; + + /* Check port status. */ + err = ibv_query_port(ctx, port, &port_attr); + if (err) { + ERROR("port query failed: %s", strerror(err)); + goto port_error; + } + + if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { + ERROR("port %d is not configured in Ethernet mode", + port); + goto port_error; + } + + if (port_attr.state != IBV_PORT_ACTIVE) + DEBUG("port %d is not active: \"%s\" (%d)", + port, ibv_port_state_str(port_attr.state), + port_attr.state); + + /* Allocate protection domain. */ + pd = ibv_alloc_pd(ctx); + if (pd == NULL) { + ERROR("PD allocation failure"); + err = ENOMEM; + goto port_error; + } + + mlx5_dev[idx].ports |= test; + + /* from rte_ethdev.c */ + priv = rte_zmalloc("ethdev private structure", + sizeof(*priv), + RTE_CACHE_LINE_SIZE); + if (priv == NULL) { + ERROR("priv allocation failure"); + err = ENOMEM; + goto port_error; + } + + priv->ctx = ctx; + priv->device_attr = device_attr; + priv->port = port; + priv->pd = pd; + priv->mtu = ETHER_MTU; +#ifdef HAVE_EXP_QUERY_DEVICE + if (ibv_exp_query_device(ctx, &exp_device_attr)) { + ERROR("ibv_exp_query_device() failed"); + goto port_error; + } + + priv->hw_csum = + ((exp_device_attr.exp_device_cap_flags & + IBV_EXP_DEVICE_RX_CSUM_TCP_UDP_PKT) && + (exp_device_attr.exp_device_cap_flags & + IBV_EXP_DEVICE_RX_CSUM_IP_PKT)); + DEBUG("checksum offloading is %ssupported", + (priv->hw_csum ? "" : "not ")); + + priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags & + IBV_EXP_DEVICE_VXLAN_SUPPORT); + DEBUG("L2 tunnel checksum offloads are %ssupported", + (priv->hw_csum_l2tun ? "" : "not ")); + + priv->ind_table_max_size = exp_device_attr.rx_hash_caps.max_rwq_indirection_table_size; + /* Remove this check once DPDK supports larger/variable + * indirection tables. */ + if (priv->ind_table_max_size > (unsigned int)RSS_INDIRECTION_TABLE_SIZE) + priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE; + DEBUG("maximum RX indirection table size is %u", + priv->ind_table_max_size); +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + priv->hw_vlan_strip = !!(exp_device_attr.wq_vlan_offloads_cap & + IBV_EXP_RECEIVE_WQ_CVLAN_STRIP); +#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + DEBUG("VLAN stripping is %ssupported", + (priv->hw_vlan_strip ? "" : "not ")); + +#ifdef HAVE_VERBS_FCS + priv->hw_fcs_strip = !!(exp_device_attr.exp_device_cap_flags & + IBV_EXP_DEVICE_SCATTER_FCS); +#endif /* HAVE_VERBS_FCS */ + DEBUG("FCS stripping configuration is %ssupported", + (priv->hw_fcs_strip ? "" : "not ")); + +#ifdef HAVE_VERBS_RX_END_PADDING + priv->hw_padding = !!exp_device_attr.rx_pad_end_addr_align; +#endif /* HAVE_VERBS_RX_END_PADDING */ + DEBUG("hardware RX end alignment padding is %ssupported", + (priv->hw_padding ? "" : "not ")); + +#else /* HAVE_EXP_QUERY_DEVICE */ + priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE; +#endif /* HAVE_EXP_QUERY_DEVICE */ + + priv->vf = vf; + priv->mps = mps; + /* Allocate and register default RSS hash keys. */ + priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n, + sizeof((*priv->rss_conf)[0]), 0); + if (priv->rss_conf == NULL) { + err = ENOMEM; + goto port_error; + } + err = rss_hash_rss_conf_new_key(priv, + rss_hash_default_key, + rss_hash_default_key_len, + ETH_RSS_PROTO_MASK); + if (err) + goto port_error; + /* Configure the first MAC address by default. */ + if (priv_get_mac(priv, &mac.addr_bytes)) { + ERROR("cannot get MAC address, is mlx5_en loaded?" + " (errno: %s)", strerror(errno)); + goto port_error; + } + INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", + priv->port, + mac.addr_bytes[0], mac.addr_bytes[1], + mac.addr_bytes[2], mac.addr_bytes[3], + mac.addr_bytes[4], mac.addr_bytes[5]); + /* Register MAC address. */ + claim_zero(priv_mac_addr_add(priv, 0, + (const uint8_t (*)[ETHER_ADDR_LEN]) + mac.addr_bytes)); + /* Initialize FD filters list. */ + err = fdir_init_filters_list(priv); + if (err) + goto port_error; +#ifndef NDEBUG + { + char ifname[IF_NAMESIZE]; + + if (priv_get_ifname(priv, &ifname) == 0) + DEBUG("port %u ifname is \"%s\"", + priv->port, ifname); + else + DEBUG("port %u ifname is unknown", priv->port); + } +#endif + /* Get actual MTU if possible. */ + priv_get_mtu(priv, &priv->mtu); + DEBUG("port %u MTU is %u", priv->port, priv->mtu); + + /* from rte_ethdev.c */ + { + char name[RTE_ETH_NAME_MAX_LEN]; + + snprintf(name, sizeof(name), "%s port %u", + ibv_get_device_name(ibv_dev), port); + eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_PCI); + } + if (eth_dev == NULL) { + ERROR("can not allocate rte ethdev"); + err = ENOMEM; + goto port_error; + } + + /* Secondary processes have to use local storage for their + * private data as well as a copy of eth_dev->data, but this + * pointer must not be modified before burst functions are + * actually called. */ + if (mlx5_is_secondary()) { + struct mlx5_secondary_data *sd = + &mlx5_secondary_data[eth_dev->data->port_id]; + sd->primary_priv = eth_dev->data->dev_private; + if (sd->primary_priv == NULL) { + ERROR("no private data for port %u", + eth_dev->data->port_id); + err = EINVAL; + goto port_error; + } + sd->shared_dev_data = eth_dev->data; + rte_spinlock_init(&sd->lock); + memcpy(sd->data.name, sd->shared_dev_data->name, + sizeof(sd->data.name)); + sd->data.dev_private = priv; + sd->data.rx_mbuf_alloc_failed = 0; + sd->data.mtu = ETHER_MTU; + sd->data.port_id = sd->shared_dev_data->port_id; + sd->data.mac_addrs = priv->mac; + eth_dev->tx_pkt_burst = mlx5_tx_burst_secondary_setup; + eth_dev->rx_pkt_burst = mlx5_rx_burst_secondary_setup; + } else { + eth_dev->data->dev_private = priv; + eth_dev->data->rx_mbuf_alloc_failed = 0; + eth_dev->data->mtu = ETHER_MTU; + eth_dev->data->mac_addrs = priv->mac; + } + + eth_dev->pci_dev = pci_dev; + rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->driver = &mlx5_driver; + priv->dev = eth_dev; + eth_dev->dev_ops = &mlx5_dev_ops; + + TAILQ_INIT(ð_dev->link_intr_cbs); + + /* Bring Ethernet device up. */ + DEBUG("forcing Ethernet interface up"); + priv_set_flags(priv, ~IFF_UP, IFF_UP); + continue; + +port_error: + if (priv) { + rte_free(priv->rss_conf); + rte_free(priv); + } + if (pd) + claim_zero(ibv_dealloc_pd(pd)); + if (ctx) + claim_zero(ibv_close_device(ctx)); + break; + } + + /* + * XXX if something went wrong in the loop above, there is a resource + * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as + * long as the dpdk does not provide a way to deallocate a ethdev and a + * way to enumerate the registered ethdevs to free the previous ones. + */ + + /* no port found, complain */ + if (!mlx5_dev[idx].ports) { + err = ENODEV; + goto error; + } + +error: + if (attr_ctx) + claim_zero(ibv_close_device(attr_ctx)); + if (list) + ibv_free_device_list(list); + assert(err >= 0); + return -err; +} + +static const struct rte_pci_id mlx5_pci_id_map[] = { + { + .vendor_id = PCI_VENDOR_ID_MELLANOX, + .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4, + .subsystem_vendor_id = PCI_ANY_ID, + .subsystem_device_id = PCI_ANY_ID + }, + { + .vendor_id = PCI_VENDOR_ID_MELLANOX, + .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4VF, + .subsystem_vendor_id = PCI_ANY_ID, + .subsystem_device_id = PCI_ANY_ID + }, + { + .vendor_id = PCI_VENDOR_ID_MELLANOX, + .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4LX, + .subsystem_vendor_id = PCI_ANY_ID, + .subsystem_device_id = PCI_ANY_ID + }, + { + .vendor_id = PCI_VENDOR_ID_MELLANOX, + .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF, + .subsystem_vendor_id = PCI_ANY_ID, + .subsystem_device_id = PCI_ANY_ID + }, + { + .vendor_id = 0 + } +}; + +static struct eth_driver mlx5_driver = { + .pci_drv = { + .name = MLX5_DRIVER_NAME, + .id_table = mlx5_pci_id_map, + .devinit = mlx5_pci_devinit, + .drv_flags = RTE_PCI_DRV_INTR_LSC, + }, + .dev_private_size = sizeof(struct priv) +}; + +/** + * Driver initialization routine. + */ +static int +rte_mlx5_pmd_init(const char *name, const char *args) +{ + (void)name; + (void)args; + /* + * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use + * huge pages. Calling ibv_fork_init() during init allows + * applications to use fork() safely for purposes other than + * using this PMD, which is not supported in forked processes. + */ + setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); + ibv_fork_init(); + rte_eal_pci_register(&mlx5_driver.pci_drv); + return 0; +} + +static struct rte_driver rte_mlx5_driver = { + .type = PMD_PDEV, + .name = MLX5_DRIVER_NAME, + .init = rte_mlx5_pmd_init, +}; + +PMD_REGISTER_DRIVER(rte_mlx5_driver) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h new file mode 100644 index 00000000..24876625 --- /dev/null +++ b/drivers/net/mlx5/mlx5.h @@ -0,0 +1,257 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RTE_PMD_MLX5_H_ +#define RTE_PMD_MLX5_H_ + +#include <stddef.h> +#include <stdint.h> +#include <limits.h> +#include <net/if.h> +#include <netinet/in.h> +#include <linux/if.h> + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <infiniband/verbs.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_spinlock.h> +#include <rte_interrupts.h> +#include <rte_errno.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5_utils.h" +#include "mlx5_rxtx.h" +#include "mlx5_autoconf.h" +#include "mlx5_defs.h" + +enum { + PCI_VENDOR_ID_MELLANOX = 0x15b3, +}; + +enum { + PCI_DEVICE_ID_MELLANOX_CONNECTX4 = 0x1013, + PCI_DEVICE_ID_MELLANOX_CONNECTX4VF = 0x1014, + PCI_DEVICE_ID_MELLANOX_CONNECTX4LX = 0x1015, + PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF = 0x1016, +}; + +struct priv { + struct rte_eth_dev *dev; /* Ethernet device. */ + struct ibv_context *ctx; /* Verbs context. */ + struct ibv_device_attr device_attr; /* Device properties. */ + struct ibv_pd *pd; /* Protection Domain. */ + /* + * MAC addresses array and configuration bit-field. + * An extra entry that cannot be modified by the DPDK is reserved + * for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff). + */ + struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; + BITFIELD_DECLARE(mac_configured, uint32_t, MLX5_MAX_MAC_ADDRESSES); + uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */ + unsigned int vlan_filter_n; /* Number of configured VLAN filters. */ + /* Device properties. */ + uint16_t mtu; /* Configured MTU. */ + uint8_t port; /* Physical port number. */ + unsigned int started:1; /* Device started, flows enabled. */ + unsigned int promisc_req:1; /* Promiscuous mode requested. */ + unsigned int allmulti_req:1; /* All multicast mode requested. */ + unsigned int hw_csum:1; /* Checksum offload is supported. */ + unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */ + unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */ + unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */ + unsigned int hw_padding:1; /* End alignment padding is supported. */ + unsigned int vf:1; /* This is a VF device. */ + unsigned int mps:1; /* Whether multi-packet send is supported. */ + unsigned int pending_alarm:1; /* An alarm is pending. */ + /* RX/TX queues. */ + unsigned int rxqs_n; /* RX queues array size. */ + unsigned int txqs_n; /* TX queues array size. */ + struct rxq *(*rxqs)[]; /* RX queues. */ + struct txq *(*txqs)[]; /* TX queues. */ + /* Indirection tables referencing all RX WQs. */ + struct ibv_exp_rwq_ind_table *(*ind_tables)[]; + unsigned int ind_tables_n; /* Number of indirection tables. */ + unsigned int ind_table_max_size; /* Maximum indirection table size. */ + /* Hash RX QPs feeding the indirection table. */ + struct hash_rxq (*hash_rxqs)[]; + unsigned int hash_rxqs_n; /* Hash RX QPs array size. */ + /* RSS configuration array indexed by hash RX queue type. */ + struct rte_eth_rss_conf *(*rss_conf)[]; + uint64_t rss_hf; /* RSS DPDK bit field of active RSS. */ + struct rte_intr_handle intr_handle; /* Interrupt handler. */ + unsigned int (*reta_idx)[]; /* RETA index table. */ + unsigned int reta_idx_n; /* RETA index size. */ + struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */ + rte_spinlock_t lock; /* Lock for control functions. */ +}; + +/* Local storage for secondary process data. */ +struct mlx5_secondary_data { + struct rte_eth_dev_data data; /* Local device data. */ + struct priv *primary_priv; /* Private structure from primary. */ + struct rte_eth_dev_data *shared_dev_data; /* Shared device data. */ + rte_spinlock_t lock; /* Port configuration lock. */ +} mlx5_secondary_data[RTE_MAX_ETHPORTS]; + +/** + * Lock private structure to protect it from concurrent access in the + * control path. + * + * @param priv + * Pointer to private structure. + */ +static inline void +priv_lock(struct priv *priv) +{ + rte_spinlock_lock(&priv->lock); +} + +/** + * Unlock private structure. + * + * @param priv + * Pointer to private structure. + */ +static inline void +priv_unlock(struct priv *priv) +{ + rte_spinlock_unlock(&priv->lock); +} + +/* mlx5.c */ + +int mlx5_getenv_int(const char *); + +/* mlx5_ethdev.c */ + +struct priv *mlx5_get_priv(struct rte_eth_dev *dev); +int mlx5_is_secondary(void); +int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]); +int priv_ifreq(const struct priv *, int req, struct ifreq *); +int priv_get_mtu(struct priv *, uint16_t *); +int priv_set_flags(struct priv *, unsigned int, unsigned int); +int mlx5_dev_configure(struct rte_eth_dev *); +void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *); +const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev); +int mlx5_link_update(struct rte_eth_dev *, int); +int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t); +int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *); +int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *); +int mlx5_ibv_device_to_pci_addr(const struct ibv_device *, + struct rte_pci_addr *); +void mlx5_dev_link_status_handler(void *); +void mlx5_dev_interrupt_handler(struct rte_intr_handle *, void *); +void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *); +void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *); +int mlx5_set_link_down(struct rte_eth_dev *dev); +int mlx5_set_link_up(struct rte_eth_dev *dev); +struct priv *mlx5_secondary_data_setup(struct priv *priv); + +/* mlx5_mac.c */ + +int priv_get_mac(struct priv *, uint8_t (*)[ETHER_ADDR_LEN]); +void hash_rxq_mac_addrs_del(struct hash_rxq *); +void priv_mac_addrs_disable(struct priv *); +void mlx5_mac_addr_remove(struct rte_eth_dev *, uint32_t); +int hash_rxq_mac_addrs_add(struct hash_rxq *); +int priv_mac_addr_add(struct priv *, unsigned int, + const uint8_t (*)[ETHER_ADDR_LEN]); +int priv_mac_addrs_enable(struct priv *); +void mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t, + uint32_t); +void mlx5_mac_addr_set(struct rte_eth_dev *, struct ether_addr *); + +/* mlx5_rss.c */ + +int rss_hash_rss_conf_new_key(struct priv *, const uint8_t *, unsigned int, + uint64_t); +int mlx5_rss_hash_update(struct rte_eth_dev *, struct rte_eth_rss_conf *); +int mlx5_rss_hash_conf_get(struct rte_eth_dev *, struct rte_eth_rss_conf *); +int priv_rss_reta_index_resize(struct priv *, unsigned int); +int mlx5_dev_rss_reta_query(struct rte_eth_dev *, + struct rte_eth_rss_reta_entry64 *, uint16_t); +int mlx5_dev_rss_reta_update(struct rte_eth_dev *, + struct rte_eth_rss_reta_entry64 *, uint16_t); + +/* mlx5_rxmode.c */ + +int priv_special_flow_enable(struct priv *, enum hash_rxq_flow_type); +void priv_special_flow_disable(struct priv *, enum hash_rxq_flow_type); +int priv_special_flow_enable_all(struct priv *); +void priv_special_flow_disable_all(struct priv *); +void mlx5_promiscuous_enable(struct rte_eth_dev *); +void mlx5_promiscuous_disable(struct rte_eth_dev *); +void mlx5_allmulticast_enable(struct rte_eth_dev *); +void mlx5_allmulticast_disable(struct rte_eth_dev *); + +/* mlx5_stats.c */ + +void mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *); +void mlx5_stats_reset(struct rte_eth_dev *); + +/* mlx5_vlan.c */ + +int mlx5_vlan_filter_set(struct rte_eth_dev *, uint16_t, int); +void mlx5_vlan_offload_set(struct rte_eth_dev *, int); +void mlx5_vlan_strip_queue_set(struct rte_eth_dev *, uint16_t, int); + +/* mlx5_trigger.c */ + +int mlx5_dev_start(struct rte_eth_dev *); +void mlx5_dev_stop(struct rte_eth_dev *); + +/* mlx5_fdir.c */ + +int fdir_init_filters_list(struct priv *); +void priv_fdir_delete_filters_list(struct priv *); +void priv_fdir_disable(struct priv *); +void priv_fdir_enable(struct priv *); +int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type, + enum rte_filter_op, void *); + +#endif /* RTE_PMD_MLX5_H_ */ diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h new file mode 100644 index 00000000..09207d9c --- /dev/null +++ b/drivers/net/mlx5/mlx5_defs.h @@ -0,0 +1,98 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RTE_PMD_MLX5_DEFS_H_ +#define RTE_PMD_MLX5_DEFS_H_ + +#include "mlx5_autoconf.h" + +/* Reported driver name. */ +#define MLX5_DRIVER_NAME "librte_pmd_mlx5" + +/* Maximum number of simultaneous MAC addresses. */ +#define MLX5_MAX_MAC_ADDRESSES 128 + +/* Maximum number of simultaneous VLAN filters. */ +#define MLX5_MAX_VLAN_IDS 128 + +/* Maximum number of special flows. */ +#define MLX5_MAX_SPECIAL_FLOWS 4 + +/* Request send completion once in every 64 sends, might be less. */ +#define MLX5_PMD_TX_PER_COMP_REQ 64 + +/* RSS Indirection table size. */ +#define RSS_INDIRECTION_TABLE_SIZE 256 + +/* Maximum number of Scatter/Gather Elements per Work Request. */ +#ifndef MLX5_PMD_SGE_WR_N +#define MLX5_PMD_SGE_WR_N 4 +#endif + +/* Maximum size for inline data. */ +#ifndef MLX5_PMD_MAX_INLINE +#define MLX5_PMD_MAX_INLINE 0 +#endif + +/* + * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP + * from which buffers are to be transmitted will have to be mapped by this + * driver to their own Memory Region (MR). This is a slow operation. + * + * This value is always 1 for RX queues. + */ +#ifndef MLX5_PMD_TX_MP_CACHE +#define MLX5_PMD_TX_MP_CACHE 8 +#endif + +/* + * If defined, only use software counters. The PMD will never ask the hardware + * for these, and many of them won't be available. + */ +#ifndef MLX5_PMD_SOFT_COUNTERS +#define MLX5_PMD_SOFT_COUNTERS 1 +#endif + +/* Alarm timeout. */ +#define MLX5_ALARM_TIMEOUT_US 100000 + +/* + * Extended flow priorities necessary to support flow director are available + * since MLNX_OFED 3.2. Considering this version adds support for VLAN + * offloads as well, their availability means flow director can be used. + */ +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS +#define MLX5_FDIR_SUPPORT 1 +#endif + +#endif /* RTE_PMD_MLX5_DEFS_H_ */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c new file mode 100644 index 00000000..36b369e7 --- /dev/null +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -0,0 +1,1282 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stddef.h> +#include <assert.h> +#include <unistd.h> +#include <stdint.h> +#include <stdio.h> +#include <string.h> +#include <stdlib.h> +#include <errno.h> +#include <dirent.h> +#include <net/if.h> +#include <sys/ioctl.h> +#include <sys/socket.h> +#include <netinet/in.h> +#include <linux/if.h> +#include <linux/ethtool.h> +#include <linux/sockios.h> +#include <fcntl.h> + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_atomic.h> +#include <rte_ethdev.h> +#include <rte_mbuf.h> +#include <rte_common.h> +#include <rte_interrupts.h> +#include <rte_alarm.h> +#include <rte_malloc.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5.h" +#include "mlx5_rxtx.h" +#include "mlx5_utils.h" + +/** + * Return private structure associated with an Ethernet device. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * Pointer to private structure. + */ +struct priv * +mlx5_get_priv(struct rte_eth_dev *dev) +{ + struct mlx5_secondary_data *sd; + + if (!mlx5_is_secondary()) + return dev->data->dev_private; + sd = &mlx5_secondary_data[dev->data->port_id]; + return sd->data.dev_private; +} + +/** + * Check if running as a secondary process. + * + * @return + * Nonzero if running as a secondary process. + */ +inline int +mlx5_is_secondary(void) +{ + return rte_eal_process_type() != RTE_PROC_PRIMARY; +} + +/** + * Get interface name from private structure. + * + * @param[in] priv + * Pointer to private structure. + * @param[out] ifname + * Interface name output buffer. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +int +priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]) +{ + DIR *dir; + struct dirent *dent; + unsigned int dev_type = 0; + unsigned int dev_port_prev = ~0u; + char match[IF_NAMESIZE] = ""; + + { + MKSTR(path, "%s/device/net", priv->ctx->device->ibdev_path); + + dir = opendir(path); + if (dir == NULL) + return -1; + } + while ((dent = readdir(dir)) != NULL) { + char *name = dent->d_name; + FILE *file; + unsigned int dev_port; + int r; + + if ((name[0] == '.') && + ((name[1] == '\0') || + ((name[1] == '.') && (name[2] == '\0')))) + continue; + + MKSTR(path, "%s/device/net/%s/%s", + priv->ctx->device->ibdev_path, name, + (dev_type ? "dev_id" : "dev_port")); + + file = fopen(path, "rb"); + if (file == NULL) { + if (errno != ENOENT) + continue; + /* + * Switch to dev_id when dev_port does not exist as + * is the case with Linux kernel versions < 3.15. + */ +try_dev_id: + match[0] = '\0'; + if (dev_type) + break; + dev_type = 1; + dev_port_prev = ~0u; + rewinddir(dir); + continue; + } + r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port); + fclose(file); + if (r != 1) + continue; + /* + * Switch to dev_id when dev_port returns the same value for + * all ports. May happen when using a MOFED release older than + * 3.0 with a Linux kernel >= 3.15. + */ + if (dev_port == dev_port_prev) + goto try_dev_id; + dev_port_prev = dev_port; + if (dev_port == (priv->port - 1u)) + snprintf(match, sizeof(match), "%s", name); + } + closedir(dir); + if (match[0] == '\0') + return -1; + strncpy(*ifname, match, sizeof(*ifname)); + return 0; +} + +/** + * Read from sysfs entry. + * + * @param[in] priv + * Pointer to private structure. + * @param[in] entry + * Entry name relative to sysfs path. + * @param[out] buf + * Data output buffer. + * @param size + * Buffer size. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_sysfs_read(const struct priv *priv, const char *entry, + char *buf, size_t size) +{ + char ifname[IF_NAMESIZE]; + FILE *file; + int ret; + int err; + + if (priv_get_ifname(priv, &ifname)) + return -1; + + MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path, + ifname, entry); + + file = fopen(path, "rb"); + if (file == NULL) + return -1; + ret = fread(buf, 1, size, file); + err = errno; + if (((size_t)ret < size) && (ferror(file))) + ret = -1; + else + ret = size; + fclose(file); + errno = err; + return ret; +} + +/** + * Write to sysfs entry. + * + * @param[in] priv + * Pointer to private structure. + * @param[in] entry + * Entry name relative to sysfs path. + * @param[in] buf + * Data buffer. + * @param size + * Buffer size. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_sysfs_write(const struct priv *priv, const char *entry, + char *buf, size_t size) +{ + char ifname[IF_NAMESIZE]; + FILE *file; + int ret; + int err; + + if (priv_get_ifname(priv, &ifname)) + return -1; + + MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path, + ifname, entry); + + file = fopen(path, "wb"); + if (file == NULL) + return -1; + ret = fwrite(buf, 1, size, file); + err = errno; + if (((size_t)ret < size) || (ferror(file))) + ret = -1; + else + ret = size; + fclose(file); + errno = err; + return ret; +} + +/** + * Get unsigned long sysfs property. + * + * @param priv + * Pointer to private structure. + * @param[in] name + * Entry name relative to sysfs path. + * @param[out] value + * Value output buffer. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value) +{ + int ret; + unsigned long value_ret; + char value_str[32]; + + ret = priv_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1)); + if (ret == -1) { + DEBUG("cannot read %s value from sysfs: %s", + name, strerror(errno)); + return -1; + } + value_str[ret] = '\0'; + errno = 0; + value_ret = strtoul(value_str, NULL, 0); + if (errno) { + DEBUG("invalid %s value `%s': %s", name, value_str, + strerror(errno)); + return -1; + } + *value = value_ret; + return 0; +} + +/** + * Set unsigned long sysfs property. + * + * @param priv + * Pointer to private structure. + * @param[in] name + * Entry name relative to sysfs path. + * @param value + * Value to set. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value) +{ + int ret; + MKSTR(value_str, "%lu", value); + + ret = priv_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1)); + if (ret == -1) { + DEBUG("cannot write %s `%s' (%lu) to sysfs: %s", + name, value_str, value, strerror(errno)); + return -1; + } + return 0; +} + +/** + * Perform ifreq ioctl() on associated Ethernet device. + * + * @param[in] priv + * Pointer to private structure. + * @param req + * Request number to pass to ioctl(). + * @param[out] ifr + * Interface request structure output buffer. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +int +priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr) +{ + int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); + int ret = -1; + + if (sock == -1) + return ret; + if (priv_get_ifname(priv, &ifr->ifr_name) == 0) + ret = ioctl(sock, req, ifr); + close(sock); + return ret; +} + +/** + * Get device MTU. + * + * @param priv + * Pointer to private structure. + * @param[out] mtu + * MTU value output buffer. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +int +priv_get_mtu(struct priv *priv, uint16_t *mtu) +{ + unsigned long ulong_mtu; + + if (priv_get_sysfs_ulong(priv, "mtu", &ulong_mtu) == -1) + return -1; + *mtu = ulong_mtu; + return 0; +} + +/** + * Set device MTU. + * + * @param priv + * Pointer to private structure. + * @param mtu + * MTU value to set. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +static int +priv_set_mtu(struct priv *priv, uint16_t mtu) +{ + return priv_set_sysfs_ulong(priv, "mtu", mtu); +} + +/** + * Set device flags. + * + * @param priv + * Pointer to private structure. + * @param keep + * Bitmask for flags that must remain untouched. + * @param flags + * Bitmask for flags to modify. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +int +priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) +{ + unsigned long tmp; + + if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1) + return -1; + tmp &= keep; + tmp |= flags; + return priv_set_sysfs_ulong(priv, "flags", tmp); +} + +/** + * Ethernet device configuration. + * + * Prepare the driver for a given number of TX and RX queues. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, errno value on failure. + */ +static int +dev_configure(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + unsigned int rxqs_n = dev->data->nb_rx_queues; + unsigned int txqs_n = dev->data->nb_tx_queues; + unsigned int i; + unsigned int j; + unsigned int reta_idx_n; + + priv->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + priv->rxqs = (void *)dev->data->rx_queues; + priv->txqs = (void *)dev->data->tx_queues; + if (txqs_n != priv->txqs_n) { + INFO("%p: TX queues number update: %u -> %u", + (void *)dev, priv->txqs_n, txqs_n); + priv->txqs_n = txqs_n; + } + if (rxqs_n > priv->ind_table_max_size) { + ERROR("cannot handle this many RX queues (%u)", rxqs_n); + return EINVAL; + } + if (rxqs_n == priv->rxqs_n) + return 0; + INFO("%p: RX queues number update: %u -> %u", + (void *)dev, priv->rxqs_n, rxqs_n); + priv->rxqs_n = rxqs_n; + /* If the requested number of RX queues is not a power of two, use the + * maximum indirection table size for better balancing. + * The result is always rounded to the next power of two. */ + reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? + priv->ind_table_max_size : + rxqs_n)); + if (priv_rss_reta_index_resize(priv, reta_idx_n)) + return ENOMEM; + /* When the number of RX queues is not a power of two, the remaining + * table entries are padded with reused WQs and hashes are not spread + * uniformly. */ + for (i = 0, j = 0; (i != reta_idx_n); ++i) { + (*priv->reta_idx)[i] = j; + if (++j == rxqs_n) + j = 0; + } + return 0; +} + +/** + * DPDK callback for Ethernet device configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_dev_configure(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + int ret; + + if (mlx5_is_secondary()) + return -E_RTE_SECONDARY; + + priv_lock(priv); + ret = dev_configure(dev); + assert(ret >= 0); + priv_unlock(priv); + return -ret; +} + +/** + * DPDK callback to get information about the device. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] info + * Info structure output buffer. + */ +void +mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) +{ + struct priv *priv = mlx5_get_priv(dev); + unsigned int max; + char ifname[IF_NAMESIZE]; + + priv_lock(priv); + /* FIXME: we should ask the device for these values. */ + info->min_rx_bufsize = 32; + info->max_rx_pktlen = 65536; + /* + * Since we need one CQ per QP, the limit is the minimum number + * between the two values. + */ + max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ? + priv->device_attr.max_qp : priv->device_attr.max_cq); + /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */ + if (max >= 65535) + max = 65535; + info->max_rx_queues = max; + info->max_tx_queues = max; + info->max_mac_addrs = RTE_DIM(priv->mac); + info->rx_offload_capa = + (priv->hw_csum ? + (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM) : + 0); + info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; + if (priv->hw_csum) + info->tx_offload_capa |= + (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM); + if (priv_get_ifname(priv, &ifname) == 0) + info->if_index = if_nametoindex(ifname); + /* FIXME: RETA update/query API expects the callee to know the size of + * the indirection table, for this PMD the size varies depending on + * the number of RX queues, it becomes impossible to find the correct + * size if it is not fixed. + * The API should be updated to solve this problem. */ + info->reta_size = priv->ind_table_max_size; + info->speed_capa = + ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_20G | + ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | + ETH_LINK_SPEED_56G | + ETH_LINK_SPEED_100G; + priv_unlock(priv); +} + +const uint32_t * +mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* refers to rxq_cq_to_pkt_type() */ + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV4, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_UNKNOWN + + }; + + if (dev->rx_pkt_burst == mlx5_rx_burst || + dev->rx_pkt_burst == mlx5_rx_burst_sp) + return ptypes; + return NULL; +} + +/** + * DPDK callback to retrieve physical link information (unlocked version). + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion (ignored). + */ +static int +mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct priv *priv = mlx5_get_priv(dev); + struct ethtool_cmd edata = { + .cmd = ETHTOOL_GSET + }; + struct ifreq ifr; + struct rte_eth_link dev_link; + int link_speed = 0; + + (void)wait_to_complete; + if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { + WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); + return -1; + } + memset(&dev_link, 0, sizeof(dev_link)); + dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && + (ifr.ifr_flags & IFF_RUNNING)); + ifr.ifr_data = &edata; + if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", + strerror(errno)); + return -1; + } + link_speed = ethtool_cmd_speed(&edata); + if (link_speed == -1) + dev_link.link_speed = 0; + else + dev_link.link_speed = link_speed; + dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? + ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); + dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) { + /* Link status changed. */ + dev->data->dev_link = dev_link; + return 0; + } + /* Link status is still the same. */ + return -1; +} + +/** + * DPDK callback to retrieve physical link information. + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion (ignored). + */ +int +mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct priv *priv = mlx5_get_priv(dev); + int ret; + + priv_lock(priv); + ret = mlx5_link_update_unlocked(dev, wait_to_complete); + priv_unlock(priv); + return ret; +} + +/** + * DPDK callback to change the MTU. + * + * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be + * received). Use this as a hint to enable/disable scattered packets support + * and improve performance when not needed. + * Since failure is not an option, reconfiguring queues on the fly is not + * recommended. + * + * @param dev + * Pointer to Ethernet device structure. + * @param in_mtu + * New MTU. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct priv *priv = dev->data->dev_private; + int ret = 0; + unsigned int i; + uint16_t (*rx_func)(void *, struct rte_mbuf **, uint16_t) = + mlx5_rx_burst; + + if (mlx5_is_secondary()) + return -E_RTE_SECONDARY; + + priv_lock(priv); + /* Set kernel interface MTU first. */ + if (priv_set_mtu(priv, mtu)) { + ret = errno; + WARN("cannot set port %u MTU to %u: %s", priv->port, mtu, + strerror(ret)); + goto out; + } else + DEBUG("adapter port %u MTU set to %u", priv->port, mtu); + priv->mtu = mtu; + /* Temporarily replace RX handler with a fake one, assuming it has not + * been copied elsewhere. */ + dev->rx_pkt_burst = removed_rx_burst; + /* Make sure everyone has left mlx5_rx_burst() and uses + * removed_rx_burst() instead. */ + rte_wmb(); + usleep(1000); + /* Reconfigure each RX queue. */ + for (i = 0; (i != priv->rxqs_n); ++i) { + struct rxq *rxq = (*priv->rxqs)[i]; + unsigned int max_frame_len; + int sp; + + if (rxq == NULL) + continue; + /* Calculate new maximum frame length according to MTU and + * toggle scattered support (sp) if necessary. */ + max_frame_len = (priv->mtu + ETHER_HDR_LEN + + (ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN)); + sp = (max_frame_len > (rxq->mb_len - RTE_PKTMBUF_HEADROOM)); + /* Provide new values to rxq_setup(). */ + dev->data->dev_conf.rxmode.jumbo_frame = sp; + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len; + ret = rxq_rehash(dev, rxq); + if (ret) { + /* Force SP RX if that queue requires it and abort. */ + if (rxq->sp) + rx_func = mlx5_rx_burst_sp; + break; + } + /* Scattered burst function takes priority. */ + if (rxq->sp) + rx_func = mlx5_rx_burst_sp; + } + /* Burst functions can now be called again. */ + rte_wmb(); + dev->rx_pkt_burst = rx_func; +out: + priv_unlock(priv); + assert(ret >= 0); + return -ret; +} + +/** + * DPDK callback to get flow control status. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] fc_conf + * Flow control output buffer. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct priv *priv = dev->data->dev_private; + struct ifreq ifr; + struct ethtool_pauseparam ethpause = { + .cmd = ETHTOOL_GPAUSEPARAM + }; + int ret; + + if (mlx5_is_secondary()) + return -E_RTE_SECONDARY; + + ifr.ifr_data = ðpause; + priv_lock(priv); + if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + ret = errno; + WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)" + " failed: %s", + strerror(ret)); + goto out; + } + + fc_conf->autoneg = ethpause.autoneg; + if (ethpause.rx_pause && ethpause.tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (ethpause.rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (ethpause.tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + ret = 0; + +out: + priv_unlock(priv); + assert(ret >= 0); + return -ret; +} + +/** + * DPDK callback to modify flow control parameters. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[in] fc_conf + * Flow control parameters. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct priv *priv = dev->data->dev_private; + struct ifreq ifr; + struct ethtool_pauseparam ethpause = { + .cmd = ETHTOOL_SPAUSEPARAM + }; + int ret; + + if (mlx5_is_secondary()) + return -E_RTE_SECONDARY; + + ifr.ifr_data = ðpause; + ethpause.autoneg = fc_conf->autoneg; + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_RX_PAUSE)) + ethpause.rx_pause = 1; + else + ethpause.rx_pause = 0; + + if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || + (fc_conf->mode & RTE_FC_TX_PAUSE)) + ethpause.tx_pause = 1; + else + ethpause.tx_pause = 0; + + priv_lock(priv); + if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + ret = errno; + WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" + " failed: %s", + strerror(ret)); + goto out; + } + ret = 0; + +out: + priv_unlock(priv); + assert(ret >= 0); + return -ret; +} + +/** + * Get PCI information from struct ibv_device. + * + * @param device + * Pointer to Ethernet device structure. + * @param[out] pci_addr + * PCI bus address output buffer. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +int +mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, + struct rte_pci_addr *pci_addr) +{ + FILE *file; + char line[32]; + MKSTR(path, "%s/device/uevent", device->ibdev_path); + + file = fopen(path, "rb"); + if (file == NULL) + return -1; + while (fgets(line, sizeof(line), file) == line) { + size_t len = strlen(line); + int ret; + + /* Truncate long lines. */ + if (len == (sizeof(line) - 1)) + while (line[(len - 1)] != '\n') { + ret = fgetc(file); + if (ret == EOF) + break; + line[(len - 1)] = ret; + } + /* Extract information. */ + if (sscanf(line, + "PCI_SLOT_NAME=" + "%" SCNx16 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n", + &pci_addr->domain, + &pci_addr->bus, + &pci_addr->devid, + &pci_addr->function) == 4) { + ret = 0; + break; + } + } + fclose(file); + return 0; +} + +/** + * Link status handler. + * + * @param priv + * Pointer to private structure. + * @param dev + * Pointer to the rte_eth_dev structure. + * + * @return + * Nonzero if the callback process can be called immediately. + */ +static int +priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev) +{ + struct ibv_async_event event; + int port_change = 0; + int ret = 0; + + /* Read all message and acknowledge them. */ + for (;;) { + if (ibv_get_async_event(priv->ctx, &event)) + break; + + if (event.event_type == IBV_EVENT_PORT_ACTIVE || + event.event_type == IBV_EVENT_PORT_ERR) + port_change = 1; + else + DEBUG("event type %d on port %d not handled", + event.event_type, event.element.port_num); + ibv_ack_async_event(&event); + } + + if (port_change ^ priv->pending_alarm) { + struct rte_eth_link *link = &dev->data->dev_link; + + priv->pending_alarm = 0; + mlx5_link_update_unlocked(dev, 0); + if (((link->link_speed == 0) && link->link_status) || + ((link->link_speed != 0) && !link->link_status)) { + /* Inconsistent status, check again later. */ + priv->pending_alarm = 1; + rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US, + mlx5_dev_link_status_handler, + dev); + } else + ret = 1; + } + return ret; +} + +/** + * Handle delayed link status event. + * + * @param arg + * Registered argument. + */ +void +mlx5_dev_link_status_handler(void *arg) +{ + struct rte_eth_dev *dev = arg; + struct priv *priv = dev->data->dev_private; + int ret; + + priv_lock(priv); + assert(priv->pending_alarm == 1); + ret = priv_dev_link_status_handler(priv, dev); + priv_unlock(priv); + if (ret) + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); +} + +/** + * Handle interrupts from the NIC. + * + * @param[in] intr_handle + * Interrupt handler. + * @param cb_arg + * Callback argument. + */ +void +mlx5_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg) +{ + struct rte_eth_dev *dev = cb_arg; + struct priv *priv = dev->data->dev_private; + int ret; + + (void)intr_handle; + priv_lock(priv); + ret = priv_dev_link_status_handler(priv, dev); + priv_unlock(priv); + if (ret) + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); +} + +/** + * Uninstall interrupt handler. + * + * @param priv + * Pointer to private structure. + * @param dev + * Pointer to the rte_eth_dev structure. + */ +void +priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) +{ + if (!dev->data->dev_conf.intr_conf.lsc) + return; + rte_intr_callback_unregister(&priv->intr_handle, + mlx5_dev_interrupt_handler, + dev); + if (priv->pending_alarm) + rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev); + priv->pending_alarm = 0; + priv->intr_handle.fd = 0; + priv->intr_handle.type = 0; +} + +/** + * Install interrupt handler. + * + * @param priv + * Pointer to private structure. + * @param dev + * Pointer to the rte_eth_dev structure. + */ +void +priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) +{ + int rc, flags; + + if (!dev->data->dev_conf.intr_conf.lsc) + return; + assert(priv->ctx->async_fd > 0); + flags = fcntl(priv->ctx->async_fd, F_GETFL); + rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); + if (rc < 0) { + INFO("failed to change file descriptor async event queue"); + dev->data->dev_conf.intr_conf.lsc = 0; + } else { + priv->intr_handle.fd = priv->ctx->async_fd; + priv->intr_handle.type = RTE_INTR_HANDLE_EXT; + rte_intr_callback_register(&priv->intr_handle, + mlx5_dev_interrupt_handler, + dev); + } +} + +/** + * Change the link state (UP / DOWN). + * + * @param dev + * Pointer to Ethernet device structure. + * @param up + * Nonzero for link up, otherwise link down. + * + * @return + * 0 on success, errno value on failure. + */ +static int +priv_set_link(struct priv *priv, int up) +{ + struct rte_eth_dev *dev = priv->dev; + int err; + unsigned int i; + + if (up) { + err = priv_set_flags(priv, ~IFF_UP, IFF_UP); + if (err) + return err; + for (i = 0; i < priv->rxqs_n; i++) + if ((*priv->rxqs)[i]->sp) + break; + /* Check if an sp queue exists. + * Note: Some old frames might be received. + */ + if (i == priv->rxqs_n) + dev->rx_pkt_burst = mlx5_rx_burst; + else + dev->rx_pkt_burst = mlx5_rx_burst_sp; + dev->tx_pkt_burst = mlx5_tx_burst; + } else { + err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP); + if (err) + return err; + dev->rx_pkt_burst = removed_rx_burst; + dev->tx_pkt_burst = removed_tx_burst; + } + return 0; +} + +/** + * DPDK callback to bring the link DOWN. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, errno value on failure. + */ +int +mlx5_set_link_down(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + int err; + + priv_lock(priv); + err = priv_set_link(priv, 0); + priv_unlock(priv); + return err; +} + +/** + * DPDK callback to bring the link UP. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, errno value on failure. + */ +int +mlx5_set_link_up(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + int err; + + priv_lock(priv); + err = priv_set_link(priv, 1); + priv_unlock(priv); + return err; +} + +/** + * Configure secondary process queues from a private data pointer (primary + * or secondary) and update burst callbacks. Can take place only once. + * + * All queues must have been previously created by the primary process to + * avoid undefined behavior. + * + * @param priv + * Private data pointer from either primary or secondary process. + * + * @return + * Private data pointer from secondary process, NULL in case of error. + */ +struct priv * +mlx5_secondary_data_setup(struct priv *priv) +{ + unsigned int port_id = 0; + struct mlx5_secondary_data *sd; + void **tx_queues; + void **rx_queues; + unsigned int nb_tx_queues; + unsigned int nb_rx_queues; + unsigned int i; + + /* priv must be valid at this point. */ + assert(priv != NULL); + /* priv->dev must also be valid but may point to local memory from + * another process, possibly with the same address and must not + * be dereferenced yet. */ + assert(priv->dev != NULL); + /* Determine port ID by finding out where priv comes from. */ + while (1) { + sd = &mlx5_secondary_data[port_id]; + rte_spinlock_lock(&sd->lock); + /* Primary process? */ + if (sd->primary_priv == priv) + break; + /* Secondary process? */ + if (sd->data.dev_private == priv) + break; + rte_spinlock_unlock(&sd->lock); + if (++port_id == RTE_DIM(mlx5_secondary_data)) + port_id = 0; + } + /* Switch to secondary private structure. If private data has already + * been updated by another thread, there is nothing else to do. */ + priv = sd->data.dev_private; + if (priv->dev->data == &sd->data) + goto end; + /* Sanity checks. Secondary private structure is supposed to point + * to local eth_dev, itself still pointing to the shared device data + * structure allocated by the primary process. */ + assert(sd->shared_dev_data != &sd->data); + assert(sd->data.nb_tx_queues == 0); + assert(sd->data.tx_queues == NULL); + assert(sd->data.nb_rx_queues == 0); + assert(sd->data.rx_queues == NULL); + assert(priv != sd->primary_priv); + assert(priv->dev->data == sd->shared_dev_data); + assert(priv->txqs_n == 0); + assert(priv->txqs == NULL); + assert(priv->rxqs_n == 0); + assert(priv->rxqs == NULL); + nb_tx_queues = sd->shared_dev_data->nb_tx_queues; + nb_rx_queues = sd->shared_dev_data->nb_rx_queues; + /* Allocate local storage for queues. */ + tx_queues = rte_zmalloc("secondary ethdev->tx_queues", + sizeof(sd->data.tx_queues[0]) * nb_tx_queues, + RTE_CACHE_LINE_SIZE); + rx_queues = rte_zmalloc("secondary ethdev->rx_queues", + sizeof(sd->data.rx_queues[0]) * nb_rx_queues, + RTE_CACHE_LINE_SIZE); + if (tx_queues == NULL || rx_queues == NULL) + goto error; + /* Lock to prevent control operations during setup. */ + priv_lock(priv); + /* TX queues. */ + for (i = 0; i != nb_tx_queues; ++i) { + struct txq *primary_txq = (*sd->primary_priv->txqs)[i]; + struct txq *txq; + + if (primary_txq == NULL) + continue; + txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, + primary_txq->socket); + if (txq != NULL) { + if (txq_setup(priv->dev, + txq, + primary_txq->elts_n * MLX5_PMD_SGE_WR_N, + primary_txq->socket, + NULL) == 0) { + txq->stats.idx = primary_txq->stats.idx; + tx_queues[i] = txq; + continue; + } + rte_free(txq); + } + while (i) { + txq = tx_queues[--i]; + txq_cleanup(txq); + rte_free(txq); + } + goto error; + } + /* RX queues. */ + for (i = 0; i != nb_rx_queues; ++i) { + struct rxq *primary_rxq = (*sd->primary_priv->rxqs)[i]; + + if (primary_rxq == NULL) + continue; + /* Not supported yet. */ + rx_queues[i] = NULL; + } + /* Update everything. */ + priv->txqs = (void *)tx_queues; + priv->txqs_n = nb_tx_queues; + priv->rxqs = (void *)rx_queues; + priv->rxqs_n = nb_rx_queues; + sd->data.rx_queues = rx_queues; + sd->data.tx_queues = tx_queues; + sd->data.nb_rx_queues = nb_rx_queues; + sd->data.nb_tx_queues = nb_tx_queues; + sd->data.dev_link = sd->shared_dev_data->dev_link; + sd->data.mtu = sd->shared_dev_data->mtu; + memcpy(sd->data.rx_queue_state, sd->shared_dev_data->rx_queue_state, + sizeof(sd->data.rx_queue_state)); + memcpy(sd->data.tx_queue_state, sd->shared_dev_data->tx_queue_state, + sizeof(sd->data.tx_queue_state)); + sd->data.dev_flags = sd->shared_dev_data->dev_flags; + /* Use local data from now on. */ + rte_mb(); + priv->dev->data = &sd->data; + rte_mb(); + priv->dev->tx_pkt_burst = mlx5_tx_burst; + priv->dev->rx_pkt_burst = removed_rx_burst; + priv_unlock(priv); +end: + /* More sanity checks. */ + assert(priv->dev->tx_pkt_burst == mlx5_tx_burst); + assert(priv->dev->rx_pkt_burst == removed_rx_burst); + assert(priv->dev->data == &sd->data); + rte_spinlock_unlock(&sd->lock); + return priv; +error: + priv_unlock(priv); + rte_free(tx_queues); + rte_free(rx_queues); + rte_spinlock_unlock(&sd->lock); + return NULL; +} diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c new file mode 100644 index 00000000..63e43ad9 --- /dev/null +++ b/drivers/net/mlx5/mlx5_fdir.c @@ -0,0 +1,980 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stddef.h> +#include <assert.h> +#include <stdint.h> +#include <string.h> +#include <errno.h> + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <infiniband/verbs.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_ether.h> +#include <rte_malloc.h> +#include <rte_ethdev.h> +#include <rte_common.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5.h" +#include "mlx5_rxtx.h" + +struct fdir_flow_desc { + uint16_t dst_port; + uint16_t src_port; + uint32_t src_ip[4]; + uint32_t dst_ip[4]; + uint8_t mac[6]; + uint16_t vlan_tag; + enum hash_rxq_type type; +}; + +struct mlx5_fdir_filter { + LIST_ENTRY(mlx5_fdir_filter) next; + uint16_t queue; /* Queue assigned to if FDIR match. */ + struct fdir_flow_desc desc; + struct ibv_exp_flow *flow; +}; + +LIST_HEAD(fdir_filter_list, mlx5_fdir_filter); + +/** + * Convert struct rte_eth_fdir_filter to mlx5 filter descriptor. + * + * @param[in] fdir_filter + * DPDK filter structure to convert. + * @param[out] desc + * Resulting mlx5 filter descriptor. + * @param mode + * Flow director mode. + */ +static void +fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter, + struct fdir_flow_desc *desc, enum rte_fdir_mode mode) +{ + /* Initialize descriptor. */ + memset(desc, 0, sizeof(*desc)); + + /* Set VLAN ID. */ + desc->vlan_tag = fdir_filter->input.flow_ext.vlan_tci; + + /* Set MAC address. */ + if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + rte_memcpy(desc->mac, + fdir_filter->input.flow.mac_vlan_flow.mac_addr. + addr_bytes, + sizeof(desc->mac)); + desc->type = HASH_RXQ_ETH; + return; + } + + /* Set mode */ + switch (fdir_filter->input.flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + desc->type = HASH_RXQ_UDPV4; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + desc->type = HASH_RXQ_TCPV4; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + desc->type = HASH_RXQ_IPV4; + break; +#ifdef HAVE_FLOW_SPEC_IPV6 + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + desc->type = HASH_RXQ_UDPV6; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + desc->type = HASH_RXQ_TCPV6; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + desc->type = HASH_RXQ_IPV6; + break; +#endif /* HAVE_FLOW_SPEC_IPV6 */ + default: + break; + } + + /* Set flow values */ + switch (fdir_filter->input.flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + desc->src_port = fdir_filter->input.flow.udp4_flow.src_port; + desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port; + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip; + desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip; + break; +#ifdef HAVE_FLOW_SPEC_IPV6 + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + desc->src_port = fdir_filter->input.flow.udp6_flow.src_port; + desc->dst_port = fdir_filter->input.flow.udp6_flow.dst_port; + /* Fall through. */ + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + rte_memcpy(desc->src_ip, + fdir_filter->input.flow.ipv6_flow.src_ip, + sizeof(desc->src_ip)); + rte_memcpy(desc->dst_ip, + fdir_filter->input.flow.ipv6_flow.dst_ip, + sizeof(desc->dst_ip)); + break; +#endif /* HAVE_FLOW_SPEC_IPV6 */ + default: + break; + } +} + +/** + * Check if two flow descriptors overlap according to configured mask. + * + * @param priv + * Private structure that provides flow director mask. + * @param desc1 + * First flow descriptor to compare. + * @param desc2 + * Second flow descriptor to compare. + * + * @return + * Nonzero if descriptors overlap. + */ +static int +priv_fdir_overlap(const struct priv *priv, + const struct fdir_flow_desc *desc1, + const struct fdir_flow_desc *desc2) +{ + const struct rte_eth_fdir_masks *mask = + &priv->dev->data->dev_conf.fdir_conf.mask; + unsigned int i; + + if (desc1->type != desc2->type) + return 0; + /* Ignore non masked bits. */ + for (i = 0; i != RTE_DIM(desc1->mac); ++i) + if ((desc1->mac[i] & mask->mac_addr_byte_mask) != + (desc2->mac[i] & mask->mac_addr_byte_mask)) + return 0; + if (((desc1->src_port & mask->src_port_mask) != + (desc2->src_port & mask->src_port_mask)) || + ((desc1->dst_port & mask->dst_port_mask) != + (desc2->dst_port & mask->dst_port_mask))) + return 0; + switch (desc1->type) { + case HASH_RXQ_IPV4: + case HASH_RXQ_UDPV4: + case HASH_RXQ_TCPV4: + if (((desc1->src_ip[0] & mask->ipv4_mask.src_ip) != + (desc2->src_ip[0] & mask->ipv4_mask.src_ip)) || + ((desc1->dst_ip[0] & mask->ipv4_mask.dst_ip) != + (desc2->dst_ip[0] & mask->ipv4_mask.dst_ip))) + return 0; + break; +#ifdef HAVE_FLOW_SPEC_IPV6 + case HASH_RXQ_IPV6: + case HASH_RXQ_UDPV6: + case HASH_RXQ_TCPV6: + for (i = 0; i != RTE_DIM(desc1->src_ip); ++i) + if (((desc1->src_ip[i] & mask->ipv6_mask.src_ip[i]) != + (desc2->src_ip[i] & mask->ipv6_mask.src_ip[i])) || + ((desc1->dst_ip[i] & mask->ipv6_mask.dst_ip[i]) != + (desc2->dst_ip[i] & mask->ipv6_mask.dst_ip[i]))) + return 0; + break; +#endif /* HAVE_FLOW_SPEC_IPV6 */ + default: + break; + } + return 1; +} + +/** + * Create flow director steering rule for a specific filter. + * + * @param priv + * Private structure. + * @param mlx5_fdir_filter + * Filter to create a steering rule for. + * @param fdir_queue + * Flow director queue for matching packets. + * + * @return + * 0 on success, errno value on failure. + */ +static int +priv_fdir_flow_add(struct priv *priv, + struct mlx5_fdir_filter *mlx5_fdir_filter, + struct fdir_queue *fdir_queue) +{ + struct ibv_exp_flow *flow; + struct fdir_flow_desc *desc = &mlx5_fdir_filter->desc; + enum rte_fdir_mode fdir_mode = + priv->dev->data->dev_conf.fdir_conf.mode; + struct rte_eth_fdir_masks *mask = + &priv->dev->data->dev_conf.fdir_conf.mask; + FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, desc->type)); + struct ibv_exp_flow_attr *attr = &data->attr; + uintptr_t spec_offset = (uintptr_t)&data->spec; + struct ibv_exp_flow_spec_eth *spec_eth; + struct ibv_exp_flow_spec_ipv4 *spec_ipv4; +#ifdef HAVE_FLOW_SPEC_IPV6 + struct ibv_exp_flow_spec_ipv6 *spec_ipv6; +#endif /* HAVE_FLOW_SPEC_IPV6 */ + struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp; + struct mlx5_fdir_filter *iter_fdir_filter; + unsigned int i; + + /* Abort if an existing flow overlaps this one to avoid packet + * duplication, even if it targets another queue. */ + LIST_FOREACH(iter_fdir_filter, priv->fdir_filter_list, next) + if ((iter_fdir_filter != mlx5_fdir_filter) && + (iter_fdir_filter->flow != NULL) && + (priv_fdir_overlap(priv, + &mlx5_fdir_filter->desc, + &iter_fdir_filter->desc))) + return EEXIST; + + /* + * No padding must be inserted by the compiler between attr and spec. + * This layout is expected by libibverbs. + */ + assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec_offset); + priv_flow_attr(priv, attr, sizeof(data), desc->type); + + /* Set Ethernet spec */ + spec_eth = (struct ibv_exp_flow_spec_eth *)spec_offset; + + /* The first specification must be Ethernet. */ + assert(spec_eth->type == IBV_EXP_FLOW_SPEC_ETH); + assert(spec_eth->size == sizeof(*spec_eth)); + + /* VLAN ID */ + spec_eth->val.vlan_tag = desc->vlan_tag & mask->vlan_tci_mask; + spec_eth->mask.vlan_tag = mask->vlan_tci_mask; + + /* Update priority */ + attr->priority = 2; + + if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + /* MAC Address */ + for (i = 0; i != RTE_DIM(spec_eth->mask.dst_mac); ++i) { + spec_eth->val.dst_mac[i] = + desc->mac[i] & mask->mac_addr_byte_mask; + spec_eth->mask.dst_mac[i] = mask->mac_addr_byte_mask; + } + goto create_flow; + } + + switch (desc->type) { + case HASH_RXQ_IPV4: + case HASH_RXQ_UDPV4: + case HASH_RXQ_TCPV4: + spec_offset += spec_eth->size; + + /* Set IP spec */ + spec_ipv4 = (struct ibv_exp_flow_spec_ipv4 *)spec_offset; + + /* The second specification must be IP. */ + assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4); + assert(spec_ipv4->size == sizeof(*spec_ipv4)); + + spec_ipv4->val.src_ip = + desc->src_ip[0] & mask->ipv4_mask.src_ip; + spec_ipv4->val.dst_ip = + desc->dst_ip[0] & mask->ipv4_mask.dst_ip; + spec_ipv4->mask.src_ip = mask->ipv4_mask.src_ip; + spec_ipv4->mask.dst_ip = mask->ipv4_mask.dst_ip; + + /* Update priority */ + attr->priority = 1; + + if (desc->type == HASH_RXQ_IPV4) + goto create_flow; + + spec_offset += spec_ipv4->size; + break; +#ifdef HAVE_FLOW_SPEC_IPV6 + case HASH_RXQ_IPV6: + case HASH_RXQ_UDPV6: + case HASH_RXQ_TCPV6: + spec_offset += spec_eth->size; + + /* Set IP spec */ + spec_ipv6 = (struct ibv_exp_flow_spec_ipv6 *)spec_offset; + + /* The second specification must be IP. */ + assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6); + assert(spec_ipv6->size == sizeof(*spec_ipv6)); + + for (i = 0; i != RTE_DIM(desc->src_ip); ++i) { + ((uint32_t *)spec_ipv6->val.src_ip)[i] = + desc->src_ip[i] & mask->ipv6_mask.src_ip[i]; + ((uint32_t *)spec_ipv6->val.dst_ip)[i] = + desc->dst_ip[i] & mask->ipv6_mask.dst_ip[i]; + } + rte_memcpy(spec_ipv6->mask.src_ip, + mask->ipv6_mask.src_ip, + sizeof(spec_ipv6->mask.src_ip)); + rte_memcpy(spec_ipv6->mask.dst_ip, + mask->ipv6_mask.dst_ip, + sizeof(spec_ipv6->mask.dst_ip)); + + /* Update priority */ + attr->priority = 1; + + if (desc->type == HASH_RXQ_IPV6) + goto create_flow; + + spec_offset += spec_ipv6->size; + break; +#endif /* HAVE_FLOW_SPEC_IPV6 */ + default: + ERROR("invalid flow attribute type"); + return EINVAL; + } + + /* Set TCP/UDP flow specification. */ + spec_tcp_udp = (struct ibv_exp_flow_spec_tcp_udp *)spec_offset; + + /* The third specification must be TCP/UDP. */ + assert(spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_TCP || + spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_UDP); + assert(spec_tcp_udp->size == sizeof(*spec_tcp_udp)); + + spec_tcp_udp->val.src_port = desc->src_port & mask->src_port_mask; + spec_tcp_udp->val.dst_port = desc->dst_port & mask->dst_port_mask; + spec_tcp_udp->mask.src_port = mask->src_port_mask; + spec_tcp_udp->mask.dst_port = mask->dst_port_mask; + + /* Update priority */ + attr->priority = 0; + +create_flow: + + errno = 0; + flow = ibv_exp_create_flow(fdir_queue->qp, attr); + if (flow == NULL) { + /* It's not clear whether errno is always set in this case. */ + ERROR("%p: flow director configuration failed, errno=%d: %s", + (void *)priv, errno, + (errno ? strerror(errno) : "Unknown error")); + if (errno) + return errno; + return EINVAL; + } + + DEBUG("%p: added flow director rule (%p)", (void *)priv, (void *)flow); + mlx5_fdir_filter->flow = flow; + return 0; +} + +/** + * Get flow director queue for a specific RX queue, create it in case + * it does not exist. + * + * @param priv + * Private structure. + * @param idx + * RX queue index. + * + * @return + * Related flow director queue on success, NULL otherwise. + */ +static struct fdir_queue * +priv_get_fdir_queue(struct priv *priv, uint16_t idx) +{ + struct fdir_queue *fdir_queue = &(*priv->rxqs)[idx]->fdir_queue; + struct ibv_exp_rwq_ind_table *ind_table = NULL; + struct ibv_qp *qp = NULL; + struct ibv_exp_rwq_ind_table_init_attr ind_init_attr; + struct ibv_exp_rx_hash_conf hash_conf; + struct ibv_exp_qp_init_attr qp_init_attr; + int err = 0; + + /* Return immediately if it has already been created. */ + if (fdir_queue->qp != NULL) + return fdir_queue; + + ind_init_attr = (struct ibv_exp_rwq_ind_table_init_attr){ + .pd = priv->pd, + .log_ind_tbl_size = 0, + .ind_tbl = &((*priv->rxqs)[idx]->wq), + .comp_mask = 0, + }; + + errno = 0; + ind_table = ibv_exp_create_rwq_ind_table(priv->ctx, + &ind_init_attr); + if (ind_table == NULL) { + /* Not clear whether errno is set. */ + err = (errno ? errno : EINVAL); + ERROR("RX indirection table creation failed with error %d: %s", + err, strerror(err)); + goto error; + } + + /* Create fdir_queue qp. */ + hash_conf = (struct ibv_exp_rx_hash_conf){ + .rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = rss_hash_default_key_len, + .rx_hash_key = rss_hash_default_key, + .rx_hash_fields_mask = 0, + .rwq_ind_tbl = ind_table, + }; + qp_init_attr = (struct ibv_exp_qp_init_attr){ + .max_inl_recv = 0, /* Currently not supported. */ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD | + IBV_EXP_QP_INIT_ATTR_RX_HASH), + .pd = priv->pd, + .rx_hash_conf = &hash_conf, + .port_num = priv->port, + }; + + qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr); + if (qp == NULL) { + err = (errno ? errno : EINVAL); + ERROR("hash RX QP creation failure: %s", strerror(err)); + goto error; + } + + fdir_queue->ind_table = ind_table; + fdir_queue->qp = qp; + + return fdir_queue; + +error: + if (qp != NULL) + claim_zero(ibv_destroy_qp(qp)); + + if (ind_table != NULL) + claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table)); + + return NULL; +} + +/** + * Enable flow director filter and create steering rules. + * + * @param priv + * Private structure. + * @param mlx5_fdir_filter + * Filter to create steering rule for. + * + * @return + * 0 on success, errno value on failure. + */ +static int +priv_fdir_filter_enable(struct priv *priv, + struct mlx5_fdir_filter *mlx5_fdir_filter) +{ + struct fdir_queue *fdir_queue; + + /* Check if flow already exists. */ + if (mlx5_fdir_filter->flow != NULL) + return 0; + + /* Get fdir_queue for specific queue. */ + fdir_queue = priv_get_fdir_queue(priv, mlx5_fdir_filter->queue); + + if (fdir_queue == NULL) { + ERROR("failed to create flow director rxq for queue %d", + mlx5_fdir_filter->queue); + return EINVAL; + } + + /* Create flow */ + return priv_fdir_flow_add(priv, mlx5_fdir_filter, fdir_queue); +} + +/** + * Initialize flow director filters list. + * + * @param priv + * Private structure. + * + * @return + * 0 on success, errno value on failure. + */ +int +fdir_init_filters_list(struct priv *priv) +{ + /* Filter list initialization should be done only once. */ + if (priv->fdir_filter_list) + return 0; + + /* Create filters list. */ + priv->fdir_filter_list = + rte_calloc(__func__, 1, sizeof(*priv->fdir_filter_list), 0); + + if (priv->fdir_filter_list == NULL) { + int err = ENOMEM; + + ERROR("cannot allocate flow director filter list: %s", + strerror(err)); + return err; + } + + LIST_INIT(priv->fdir_filter_list); + + return 0; +} + +/** + * Flush all filters. + * + * @param priv + * Private structure. + */ +static void +priv_fdir_filter_flush(struct priv *priv) +{ + struct mlx5_fdir_filter *mlx5_fdir_filter; + + while ((mlx5_fdir_filter = LIST_FIRST(priv->fdir_filter_list))) { + struct ibv_exp_flow *flow = mlx5_fdir_filter->flow; + + DEBUG("%p: flushing flow director filter %p", + (void *)priv, (void *)mlx5_fdir_filter); + LIST_REMOVE(mlx5_fdir_filter, next); + if (flow != NULL) + claim_zero(ibv_exp_destroy_flow(flow)); + rte_free(mlx5_fdir_filter); + } +} + +/** + * Remove all flow director filters and delete list. + * + * @param priv + * Private structure. + */ +void +priv_fdir_delete_filters_list(struct priv *priv) +{ + priv_fdir_filter_flush(priv); + rte_free(priv->fdir_filter_list); + priv->fdir_filter_list = NULL; +} + +/** + * Disable flow director, remove all steering rules. + * + * @param priv + * Private structure. + */ +void +priv_fdir_disable(struct priv *priv) +{ + unsigned int i; + struct mlx5_fdir_filter *mlx5_fdir_filter; + struct fdir_queue *fdir_queue; + + /* Run on every flow director filter and destroy flow handle. */ + LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) { + struct ibv_exp_flow *flow; + + /* Only valid elements should be in the list */ + assert(mlx5_fdir_filter != NULL); + flow = mlx5_fdir_filter->flow; + + /* Destroy flow handle */ + if (flow != NULL) { + claim_zero(ibv_exp_destroy_flow(flow)); + mlx5_fdir_filter->flow = NULL; + } + } + + /* Run on every RX queue to destroy related flow director QP and + * indirection table. */ + for (i = 0; (i != priv->rxqs_n); i++) { + fdir_queue = &(*priv->rxqs)[i]->fdir_queue; + + if (fdir_queue->qp != NULL) { + claim_zero(ibv_destroy_qp(fdir_queue->qp)); + fdir_queue->qp = NULL; + } + + if (fdir_queue->ind_table != NULL) { + claim_zero(ibv_exp_destroy_rwq_ind_table + (fdir_queue->ind_table)); + fdir_queue->ind_table = NULL; + } + } +} + +/** + * Enable flow director, create steering rules. + * + * @param priv + * Private structure. + */ +void +priv_fdir_enable(struct priv *priv) +{ + struct mlx5_fdir_filter *mlx5_fdir_filter; + + /* Run on every fdir filter and create flow handle */ + LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) { + /* Only valid elements should be in the list */ + assert(mlx5_fdir_filter != NULL); + + priv_fdir_filter_enable(priv, mlx5_fdir_filter); + } +} + +/** + * Find specific filter in list. + * + * @param priv + * Private structure. + * @param fdir_filter + * Flow director filter to find. + * + * @return + * Filter element if found, otherwise NULL. + */ +static struct mlx5_fdir_filter * +priv_find_filter_in_list(struct priv *priv, + const struct rte_eth_fdir_filter *fdir_filter) +{ + struct fdir_flow_desc desc; + struct mlx5_fdir_filter *mlx5_fdir_filter; + enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode; + + /* Get flow director filter to look for. */ + fdir_filter_to_flow_desc(fdir_filter, &desc, fdir_mode); + + /* Look for the requested element. */ + LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) { + /* Only valid elements should be in the list. */ + assert(mlx5_fdir_filter != NULL); + + /* Return matching filter. */ + if (!memcmp(&desc, &mlx5_fdir_filter->desc, sizeof(desc))) + return mlx5_fdir_filter; + } + + /* Filter not found */ + return NULL; +} + +/** + * Add new flow director filter and store it in list. + * + * @param priv + * Private structure. + * @param fdir_filter + * Flow director filter to add. + * + * @return + * 0 on success, errno value on failure. + */ +static int +priv_fdir_filter_add(struct priv *priv, + const struct rte_eth_fdir_filter *fdir_filter) +{ + struct mlx5_fdir_filter *mlx5_fdir_filter; + enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode; + int err = 0; + + /* Validate queue number. */ + if (fdir_filter->action.rx_queue >= priv->rxqs_n) { + ERROR("invalid queue number %d", fdir_filter->action.rx_queue); + return EINVAL; + } + + /* Duplicate filters are currently unsupported. */ + mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter); + if (mlx5_fdir_filter != NULL) { + ERROR("filter already exists"); + return EINVAL; + } + + /* Create new flow director filter. */ + mlx5_fdir_filter = + rte_calloc(__func__, 1, sizeof(*mlx5_fdir_filter), 0); + if (mlx5_fdir_filter == NULL) { + err = ENOMEM; + ERROR("cannot allocate flow director filter: %s", + strerror(err)); + return err; + } + + /* Set queue. */ + mlx5_fdir_filter->queue = fdir_filter->action.rx_queue; + + /* Convert to mlx5 filter descriptor. */ + fdir_filter_to_flow_desc(fdir_filter, + &mlx5_fdir_filter->desc, fdir_mode); + + /* Insert new filter into list. */ + LIST_INSERT_HEAD(priv->fdir_filter_list, mlx5_fdir_filter, next); + + DEBUG("%p: flow director filter %p added", + (void *)priv, (void *)mlx5_fdir_filter); + + /* Enable filter immediately if device is started. */ + if (priv->started) + err = priv_fdir_filter_enable(priv, mlx5_fdir_filter); + + return err; +} + +/** + * Update queue for specific filter. + * + * @param priv + * Private structure. + * @param fdir_filter + * Filter to be updated. + * + * @return + * 0 on success, errno value on failure. + */ +static int +priv_fdir_filter_update(struct priv *priv, + const struct rte_eth_fdir_filter *fdir_filter) +{ + struct mlx5_fdir_filter *mlx5_fdir_filter; + + /* Validate queue number. */ + if (fdir_filter->action.rx_queue >= priv->rxqs_n) { + ERROR("invalid queue number %d", fdir_filter->action.rx_queue); + return EINVAL; + } + + mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter); + if (mlx5_fdir_filter != NULL) { + struct ibv_exp_flow *flow = mlx5_fdir_filter->flow; + int err = 0; + + /* Update queue number. */ + mlx5_fdir_filter->queue = fdir_filter->action.rx_queue; + + /* Destroy flow handle. */ + if (flow != NULL) { + claim_zero(ibv_exp_destroy_flow(flow)); + mlx5_fdir_filter->flow = NULL; + } + DEBUG("%p: flow director filter %p updated", + (void *)priv, (void *)mlx5_fdir_filter); + + /* Enable filter if device is started. */ + if (priv->started) + err = priv_fdir_filter_enable(priv, mlx5_fdir_filter); + + return err; + } + + /* Filter not found, create it. */ + DEBUG("%p: filter not found for update, creating new filter", + (void *)priv); + return priv_fdir_filter_add(priv, fdir_filter); +} + +/** + * Delete specific filter. + * + * @param priv + * Private structure. + * @param fdir_filter + * Filter to be deleted. + * + * @return + * 0 on success, errno value on failure. + */ +static int +priv_fdir_filter_delete(struct priv *priv, + const struct rte_eth_fdir_filter *fdir_filter) +{ + struct mlx5_fdir_filter *mlx5_fdir_filter; + + mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter); + if (mlx5_fdir_filter != NULL) { + struct ibv_exp_flow *flow = mlx5_fdir_filter->flow; + + /* Remove element from list. */ + LIST_REMOVE(mlx5_fdir_filter, next); + + /* Destroy flow handle. */ + if (flow != NULL) { + claim_zero(ibv_exp_destroy_flow(flow)); + mlx5_fdir_filter->flow = NULL; + } + + DEBUG("%p: flow director filter %p deleted", + (void *)priv, (void *)mlx5_fdir_filter); + + /* Delete filter. */ + rte_free(mlx5_fdir_filter); + + return 0; + } + + ERROR("%p: flow director delete failed, cannot find filter", + (void *)priv); + return EINVAL; +} + +/** + * Get flow director information. + * + * @param priv + * Private structure. + * @param[out] fdir_info + * Resulting flow director information. + */ +static void +priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info) +{ + struct rte_eth_fdir_masks *mask = + &priv->dev->data->dev_conf.fdir_conf.mask; + + fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode; + fdir_info->guarant_spc = 0; + + rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); + + fdir_info->max_flexpayload = 0; + fdir_info->flow_types_mask[0] = 0; + + fdir_info->flex_payload_unit = 0; + fdir_info->max_flex_payload_segment_num = 0; + fdir_info->flex_payload_limit = 0; + memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf)); +} + +/** + * Deal with flow director operations. + * + * @param priv + * Pointer to private structure. + * @param filter_op + * Operation to perform. + * @param arg + * Pointer to operation-specific structure. + * + * @return + * 0 on success, errno value on failure. + */ +static int +priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) +{ + enum rte_fdir_mode fdir_mode = + priv->dev->data->dev_conf.fdir_conf.mode; + int ret = 0; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (fdir_mode != RTE_FDIR_MODE_PERFECT && + fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + ERROR("%p: flow director mode %d not supported", + (void *)priv, fdir_mode); + return EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = priv_fdir_filter_add(priv, arg); + break; + case RTE_ETH_FILTER_UPDATE: + ret = priv_fdir_filter_update(priv, arg); + break; + case RTE_ETH_FILTER_DELETE: + ret = priv_fdir_filter_delete(priv, arg); + break; + case RTE_ETH_FILTER_FLUSH: + priv_fdir_filter_flush(priv); + break; + case RTE_ETH_FILTER_INFO: + priv_fdir_info_get(priv, arg); + break; + default: + DEBUG("%p: unknown operation %u", (void *)priv, filter_op); + ret = EINVAL; + break; + } + return ret; +} + +/** + * Manage filter operations. + * + * @param dev + * Pointer to Ethernet device structure. + * @param filter_type + * Filter type. + * @param filter_op + * Operation to perform. + * @param arg + * Pointer to operation-specific structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = -EINVAL; + struct priv *priv = dev->data->dev_private; + + switch (filter_type) { + case RTE_ETH_FILTER_FDIR: + priv_lock(priv); + ret = priv_fdir_ctrl_func(priv, filter_op, arg); + priv_unlock(priv); + break; + default: + ERROR("%p: filter type (%d) not supported", + (void *)dev, filter_type); + break; + } + + return ret; +} diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c new file mode 100644 index 00000000..c9cea485 --- /dev/null +++ b/drivers/net/mlx5/mlx5_mac.c @@ -0,0 +1,510 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stddef.h> +#include <assert.h> +#include <stdint.h> +#include <string.h> +#include <inttypes.h> +#include <errno.h> +#include <netinet/in.h> +#include <linux/if.h> +#include <sys/ioctl.h> +#include <arpa/inet.h> + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <infiniband/verbs.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_common.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5.h" +#include "mlx5_utils.h" +#include "mlx5_rxtx.h" +#include "mlx5_defs.h" + +/** + * Get MAC address by querying netdevice. + * + * @param[in] priv + * struct priv for the requested device. + * @param[out] mac + * MAC address output buffer. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +int +priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]) +{ + struct ifreq request; + + if (priv_ifreq(priv, SIOCGIFHWADDR, &request)) + return -1; + memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN); + return 0; +} + +/** + * Delete MAC flow steering rule. + * + * @param hash_rxq + * Pointer to hash RX queue structure. + * @param mac_index + * MAC address index. + * @param vlan_index + * VLAN index to use. + */ +static void +hash_rxq_del_mac_flow(struct hash_rxq *hash_rxq, unsigned int mac_index, + unsigned int vlan_index) +{ +#ifndef NDEBUG + const uint8_t (*mac)[ETHER_ADDR_LEN] = + (const uint8_t (*)[ETHER_ADDR_LEN]) + hash_rxq->priv->mac[mac_index].addr_bytes; +#endif + + assert(mac_index < RTE_DIM(hash_rxq->mac_flow)); + assert(vlan_index < RTE_DIM(hash_rxq->mac_flow[mac_index])); + if (hash_rxq->mac_flow[mac_index][vlan_index] == NULL) + return; + DEBUG("%p: removing MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u" + " VLAN index %u", + (void *)hash_rxq, + (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5], + mac_index, + vlan_index); + claim_zero(ibv_exp_destroy_flow(hash_rxq->mac_flow + [mac_index][vlan_index])); + hash_rxq->mac_flow[mac_index][vlan_index] = NULL; +} + +/** + * Unregister a MAC address from a hash RX queue. + * + * @param hash_rxq + * Pointer to hash RX queue structure. + * @param mac_index + * MAC address index. + */ +static void +hash_rxq_mac_addr_del(struct hash_rxq *hash_rxq, unsigned int mac_index) +{ + unsigned int i; + + assert(mac_index < RTE_DIM(hash_rxq->mac_flow)); + for (i = 0; (i != RTE_DIM(hash_rxq->mac_flow[mac_index])); ++i) + hash_rxq_del_mac_flow(hash_rxq, mac_index, i); +} + +/** + * Unregister all MAC addresses from a hash RX queue. + * + * @param hash_rxq + * Pointer to hash RX queue structure. + */ +void +hash_rxq_mac_addrs_del(struct hash_rxq *hash_rxq) +{ + unsigned int i; + + for (i = 0; (i != RTE_DIM(hash_rxq->mac_flow)); ++i) + hash_rxq_mac_addr_del(hash_rxq, i); +} + +/** + * Unregister a MAC address. + * + * This is done for each hash RX queue. + * + * @param priv + * Pointer to private structure. + * @param mac_index + * MAC address index. + */ +static void +priv_mac_addr_del(struct priv *priv, unsigned int mac_index) +{ + unsigned int i; + + assert(mac_index < RTE_DIM(priv->mac)); + if (!BITFIELD_ISSET(priv->mac_configured, mac_index)) + return; + for (i = 0; (i != priv->hash_rxqs_n); ++i) + hash_rxq_mac_addr_del(&(*priv->hash_rxqs)[i], mac_index); + BITFIELD_RESET(priv->mac_configured, mac_index); +} + +/** + * Unregister all MAC addresses from all hash RX queues. + * + * @param priv + * Pointer to private structure. + */ +void +priv_mac_addrs_disable(struct priv *priv) +{ + unsigned int i; + + for (i = 0; (i != priv->hash_rxqs_n); ++i) + hash_rxq_mac_addrs_del(&(*priv->hash_rxqs)[i]); +} + +/** + * DPDK callback to remove a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param index + * MAC address index. + */ +void +mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct priv *priv = dev->data->dev_private; + + if (mlx5_is_secondary()) + return; + + priv_lock(priv); + DEBUG("%p: removing MAC address from index %" PRIu32, + (void *)dev, index); + if (index >= RTE_DIM(priv->mac)) + goto end; + priv_mac_addr_del(priv, index); +end: + priv_unlock(priv); +} + +/** + * Add MAC flow steering rule. + * + * @param hash_rxq + * Pointer to hash RX queue structure. + * @param mac_index + * MAC address index to register. + * @param vlan_index + * VLAN index to use. + * + * @return + * 0 on success, errno value on failure. + */ +static int +hash_rxq_add_mac_flow(struct hash_rxq *hash_rxq, unsigned int mac_index, + unsigned int vlan_index) +{ + struct ibv_exp_flow *flow; + struct priv *priv = hash_rxq->priv; + const uint8_t (*mac)[ETHER_ADDR_LEN] = + (const uint8_t (*)[ETHER_ADDR_LEN]) + priv->mac[mac_index].addr_bytes; + FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type)); + struct ibv_exp_flow_attr *attr = &data->attr; + struct ibv_exp_flow_spec_eth *spec = &data->spec; + unsigned int vlan_enabled = !!priv->vlan_filter_n; + unsigned int vlan_id = priv->vlan_filter[vlan_index]; + + assert(mac_index < RTE_DIM(hash_rxq->mac_flow)); + assert(vlan_index < RTE_DIM(hash_rxq->mac_flow[mac_index])); + if (hash_rxq->mac_flow[mac_index][vlan_index] != NULL) + return 0; + /* + * No padding must be inserted by the compiler between attr and spec. + * This layout is expected by libibverbs. + */ + assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec); + priv_flow_attr(priv, attr, sizeof(data), hash_rxq->type); + /* The first specification must be Ethernet. */ + assert(spec->type == IBV_EXP_FLOW_SPEC_ETH); + assert(spec->size == sizeof(*spec)); + *spec = (struct ibv_exp_flow_spec_eth){ + .type = IBV_EXP_FLOW_SPEC_ETH, + .size = sizeof(*spec), + .val = { + .dst_mac = { + (*mac)[0], (*mac)[1], (*mac)[2], + (*mac)[3], (*mac)[4], (*mac)[5] + }, + .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0), + }, + .mask = { + .dst_mac = "\xff\xff\xff\xff\xff\xff", + .vlan_tag = (vlan_enabled ? htons(0xfff) : 0), + }, + }; + DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u" + " VLAN index %u filtering %s, ID %u", + (void *)hash_rxq, + (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5], + mac_index, + vlan_index, + (vlan_enabled ? "enabled" : "disabled"), + vlan_id); + /* Create related flow. */ + errno = 0; + flow = ibv_exp_create_flow(hash_rxq->qp, attr); + if (flow == NULL) { + /* It's not clear whether errno is always set in this case. */ + ERROR("%p: flow configuration failed, errno=%d: %s", + (void *)hash_rxq, errno, + (errno ? strerror(errno) : "Unknown error")); + if (errno) + return errno; + return EINVAL; + } + hash_rxq->mac_flow[mac_index][vlan_index] = flow; + return 0; +} + +/** + * Register a MAC address in a hash RX queue. + * + * @param hash_rxq + * Pointer to hash RX queue structure. + * @param mac_index + * MAC address index to register. + * + * @return + * 0 on success, errno value on failure. + */ +static int +hash_rxq_mac_addr_add(struct hash_rxq *hash_rxq, unsigned int mac_index) +{ + struct priv *priv = hash_rxq->priv; + unsigned int i = 0; + int ret; + + assert(mac_index < RTE_DIM(hash_rxq->mac_flow)); + assert(RTE_DIM(hash_rxq->mac_flow[mac_index]) == + RTE_DIM(priv->vlan_filter)); + /* Add a MAC address for each VLAN filter, or at least once. */ + do { + ret = hash_rxq_add_mac_flow(hash_rxq, mac_index, i); + if (ret) { + /* Failure, rollback. */ + while (i != 0) + hash_rxq_del_mac_flow(hash_rxq, mac_index, + --i); + return ret; + } + } while (++i < priv->vlan_filter_n); + return 0; +} + +/** + * Register all MAC addresses in a hash RX queue. + * + * @param hash_rxq + * Pointer to hash RX queue structure. + * + * @return + * 0 on success, errno value on failure. + */ +int +hash_rxq_mac_addrs_add(struct hash_rxq *hash_rxq) +{ + struct priv *priv = hash_rxq->priv; + unsigned int i; + int ret; + + assert(RTE_DIM(priv->mac) == RTE_DIM(hash_rxq->mac_flow)); + for (i = 0; (i != RTE_DIM(priv->mac)); ++i) { + if (!BITFIELD_ISSET(priv->mac_configured, i)) + continue; + ret = hash_rxq_mac_addr_add(hash_rxq, i); + if (!ret) + continue; + /* Failure, rollback. */ + while (i != 0) + hash_rxq_mac_addr_del(hash_rxq, --i); + assert(ret > 0); + return ret; + } + return 0; +} + +/** + * Register a MAC address. + * + * This is done for each hash RX queue. + * + * @param priv + * Pointer to private structure. + * @param mac_index + * MAC address index to use. + * @param mac + * MAC address to register. + * + * @return + * 0 on success, errno value on failure. + */ +int +priv_mac_addr_add(struct priv *priv, unsigned int mac_index, + const uint8_t (*mac)[ETHER_ADDR_LEN]) +{ + unsigned int i; + int ret; + + assert(mac_index < RTE_DIM(priv->mac)); + /* First, make sure this address isn't already configured. */ + for (i = 0; (i != RTE_DIM(priv->mac)); ++i) { + /* Skip this index, it's going to be reconfigured. */ + if (i == mac_index) + continue; + if (!BITFIELD_ISSET(priv->mac_configured, i)) + continue; + if (memcmp(priv->mac[i].addr_bytes, *mac, sizeof(*mac))) + continue; + /* Address already configured elsewhere, return with error. */ + return EADDRINUSE; + } + if (BITFIELD_ISSET(priv->mac_configured, mac_index)) + priv_mac_addr_del(priv, mac_index); + priv->mac[mac_index] = (struct ether_addr){ + { + (*mac)[0], (*mac)[1], (*mac)[2], + (*mac)[3], (*mac)[4], (*mac)[5] + } + }; + if (!priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC)) + goto end; + for (i = 0; (i != priv->hash_rxqs_n); ++i) { + ret = hash_rxq_mac_addr_add(&(*priv->hash_rxqs)[i], mac_index); + if (!ret) + continue; + /* Failure, rollback. */ + while (i != 0) + hash_rxq_mac_addr_del(&(*priv->hash_rxqs)[--i], + mac_index); + return ret; + } +end: + BITFIELD_SET(priv->mac_configured, mac_index); + return 0; +} + +/** + * Register all MAC addresses in all hash RX queues. + * + * @param priv + * Pointer to private structure. + * + * @return + * 0 on success, errno value on failure. + */ +int +priv_mac_addrs_enable(struct priv *priv) +{ + unsigned int i; + int ret; + + if (!priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC)) + return 0; + for (i = 0; (i != priv->hash_rxqs_n); ++i) { + ret = hash_rxq_mac_addrs_add(&(*priv->hash_rxqs)[i]); + if (!ret) + continue; + /* Failure, rollback. */ + while (i != 0) + hash_rxq_mac_addrs_del(&(*priv->hash_rxqs)[--i]); + assert(ret > 0); + return ret; + } + return 0; +} + +/** + * DPDK callback to add a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * @param index + * MAC address index. + * @param vmdq + * VMDq pool index to associate address with (ignored). + */ +void +mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t vmdq) +{ + struct priv *priv = dev->data->dev_private; + + if (mlx5_is_secondary()) + return; + + (void)vmdq; + priv_lock(priv); + DEBUG("%p: adding MAC address at index %" PRIu32, + (void *)dev, index); + if (index >= RTE_DIM(priv->mac)) + goto end; + priv_mac_addr_add(priv, index, + (const uint8_t (*)[ETHER_ADDR_LEN]) + mac_addr->addr_bytes); +end: + priv_unlock(priv); +} + +/** + * DPDK callback to set primary MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + */ +void +mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) +{ + DEBUG("%p: setting primary MAC address", (void *)dev); + mlx5_mac_addr_remove(dev, 0); + mlx5_mac_addr_add(dev, mac_addr, 0, 0); +} diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c new file mode 100644 index 00000000..639e935b --- /dev/null +++ b/drivers/net/mlx5/mlx5_rss.c @@ -0,0 +1,367 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stddef.h> +#include <stdint.h> +#include <errno.h> +#include <string.h> +#include <assert.h> + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <infiniband/verbs.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_malloc.h> +#include <rte_ethdev.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5.h" +#include "mlx5_rxtx.h" + +/** + * Get a RSS configuration hash key. + * + * @param priv + * Pointer to private structure. + * @param rss_hf + * RSS hash functions configuration must be retrieved for. + * + * @return + * Pointer to a RSS configuration structure or NULL if rss_hf cannot + * be matched. + */ +static struct rte_eth_rss_conf * +rss_hash_get(struct priv *priv, uint64_t rss_hf) +{ + unsigned int i; + + for (i = 0; (i != hash_rxq_init_n); ++i) { + uint64_t dpdk_rss_hf = hash_rxq_init[i].dpdk_rss_hf; + + if (!(dpdk_rss_hf & rss_hf)) + continue; + return (*priv->rss_conf)[i]; + } + return NULL; +} + +/** + * Register a RSS key. + * + * @param priv + * Pointer to private structure. + * @param key + * Hash key to register. + * @param key_len + * Hash key length in bytes. + * @param rss_hf + * RSS hash functions the provided key applies to. + * + * @return + * 0 on success, errno value on failure. + */ +int +rss_hash_rss_conf_new_key(struct priv *priv, const uint8_t *key, + unsigned int key_len, uint64_t rss_hf) +{ + unsigned int i; + + for (i = 0; (i != hash_rxq_init_n); ++i) { + struct rte_eth_rss_conf *rss_conf; + uint64_t dpdk_rss_hf = hash_rxq_init[i].dpdk_rss_hf; + + if (!(dpdk_rss_hf & rss_hf)) + continue; + rss_conf = rte_realloc((*priv->rss_conf)[i], + (sizeof(*rss_conf) + key_len), + 0); + if (!rss_conf) + return ENOMEM; + rss_conf->rss_key = (void *)(rss_conf + 1); + rss_conf->rss_key_len = key_len; + rss_conf->rss_hf = dpdk_rss_hf; + memcpy(rss_conf->rss_key, key, key_len); + (*priv->rss_conf)[i] = rss_conf; + } + return 0; +} + +/** + * DPDK callback to update the RSS hash configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[in] rss_conf + * RSS configuration data. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct priv *priv = dev->data->dev_private; + int err = 0; + + priv_lock(priv); + + assert(priv->rss_conf != NULL); + + /* Apply configuration. */ + if (rss_conf->rss_key) + err = rss_hash_rss_conf_new_key(priv, + rss_conf->rss_key, + rss_conf->rss_key_len, + rss_conf->rss_hf); + /* Store protocols for which RSS is enabled. */ + priv->rss_hf = rss_conf->rss_hf; + priv_unlock(priv); + assert(err >= 0); + return -err; +} + +/** + * DPDK callback to get the RSS hash configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[in, out] rss_conf + * RSS configuration data. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct priv *priv = dev->data->dev_private; + struct rte_eth_rss_conf *priv_rss_conf; + + priv_lock(priv); + + assert(priv->rss_conf != NULL); + + priv_rss_conf = rss_hash_get(priv, rss_conf->rss_hf); + if (!priv_rss_conf) { + rss_conf->rss_hf = 0; + priv_unlock(priv); + return -EINVAL; + } + if (rss_conf->rss_key && + rss_conf->rss_key_len >= priv_rss_conf->rss_key_len) + memcpy(rss_conf->rss_key, + priv_rss_conf->rss_key, + priv_rss_conf->rss_key_len); + rss_conf->rss_key_len = priv_rss_conf->rss_key_len; + rss_conf->rss_hf = priv_rss_conf->rss_hf; + + priv_unlock(priv); + return 0; +} + +/** + * Allocate/reallocate RETA index table. + * + * @param priv + * Pointer to private structure. + * @praram reta_size + * The size of the array to allocate. + * + * @return + * 0 on success, errno value on failure. + */ +int +priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) +{ + void *mem; + unsigned int old_size = priv->reta_idx_n; + + if (priv->reta_idx_n == reta_size) + return 0; + + mem = rte_realloc(priv->reta_idx, + reta_size * sizeof((*priv->reta_idx)[0]), 0); + if (!mem) + return ENOMEM; + priv->reta_idx = mem; + priv->reta_idx_n = reta_size; + + if (old_size < reta_size) + memset(&(*priv->reta_idx)[old_size], 0, + (reta_size - old_size) * + sizeof((*priv->reta_idx)[0])); + return 0; +} + +/** + * Query RETA table. + * + * @param priv + * Pointer to private structure. + * @param[in, out] reta_conf + * Pointer to the first RETA configuration structure. + * @param reta_size + * Number of entries. + * + * @return + * 0 on success, errno value on failure. + */ +static int +priv_dev_rss_reta_query(struct priv *priv, + struct rte_eth_rss_reta_entry64 *reta_conf, + unsigned int reta_size) +{ + unsigned int idx; + unsigned int i; + int ret; + + /* See RETA comment in mlx5_dev_infos_get(). */ + ret = priv_rss_reta_index_resize(priv, priv->ind_table_max_size); + if (ret) + return ret; + + /* Fill each entry of the table even if its bit is not set. */ + for (idx = 0, i = 0; (i != reta_size); ++i) { + idx = i / RTE_RETA_GROUP_SIZE; + reta_conf[idx].reta[i % RTE_RETA_GROUP_SIZE] = + (*priv->reta_idx)[i]; + } + return 0; +} + +/** + * Update RETA table. + * + * @param priv + * Pointer to private structure. + * @param[in] reta_conf + * Pointer to the first RETA configuration structure. + * @param reta_size + * Number of entries. + * + * @return + * 0 on success, errno value on failure. + */ +static int +priv_dev_rss_reta_update(struct priv *priv, + struct rte_eth_rss_reta_entry64 *reta_conf, + unsigned int reta_size) +{ + unsigned int idx; + unsigned int i; + unsigned int pos; + int ret; + + /* See RETA comment in mlx5_dev_infos_get(). */ + ret = priv_rss_reta_index_resize(priv, priv->ind_table_max_size); + if (ret) + return ret; + + for (idx = 0, i = 0; (i != reta_size); ++i) { + idx = i / RTE_RETA_GROUP_SIZE; + pos = i % RTE_RETA_GROUP_SIZE; + if (((reta_conf[idx].mask >> i) & 0x1) == 0) + continue; + assert(reta_conf[idx].reta[pos] < priv->rxqs_n); + (*priv->reta_idx)[i] = reta_conf[idx].reta[pos]; + } + return 0; +} + +/** + * DPDK callback to get the RETA indirection table. + * + * @param dev + * Pointer to Ethernet device structure. + * @param reta_conf + * Pointer to RETA configuration structure array. + * @param reta_size + * Size of the RETA table. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + int ret; + struct priv *priv = dev->data->dev_private; + + priv_lock(priv); + ret = priv_dev_rss_reta_query(priv, reta_conf, reta_size); + priv_unlock(priv); + return -ret; +} + +/** + * DPDK callback to update the RETA indirection table. + * + * @param dev + * Pointer to Ethernet device structure. + * @param reta_conf + * Pointer to RETA configuration structure array. + * @param reta_size + * Size of the RETA table. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + int ret; + struct priv *priv = dev->data->dev_private; + + priv_lock(priv); + ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size); + priv_unlock(priv); + return -ret; +} diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c new file mode 100644 index 00000000..3a55f633 --- /dev/null +++ b/drivers/net/mlx5/mlx5_rxmode.c @@ -0,0 +1,481 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stddef.h> +#include <errno.h> +#include <string.h> + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <infiniband/verbs.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_ethdev.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5.h" +#include "mlx5_rxtx.h" +#include "mlx5_utils.h" + +/* Initialization data for special flows. */ +static const struct special_flow_init special_flow_init[] = { + [HASH_RXQ_FLOW_TYPE_PROMISC] = { + .dst_mac_val = "\x00\x00\x00\x00\x00\x00", + .dst_mac_mask = "\x00\x00\x00\x00\x00\x00", + .hash_types = + 1 << HASH_RXQ_TCPV4 | + 1 << HASH_RXQ_UDPV4 | + 1 << HASH_RXQ_IPV4 | +#ifdef HAVE_FLOW_SPEC_IPV6 + 1 << HASH_RXQ_TCPV6 | + 1 << HASH_RXQ_UDPV6 | + 1 << HASH_RXQ_IPV6 | +#endif /* HAVE_FLOW_SPEC_IPV6 */ + 1 << HASH_RXQ_ETH | + 0, + .per_vlan = 0, + }, + [HASH_RXQ_FLOW_TYPE_ALLMULTI] = { + .dst_mac_val = "\x01\x00\x00\x00\x00\x00", + .dst_mac_mask = "\x01\x00\x00\x00\x00\x00", + .hash_types = + 1 << HASH_RXQ_UDPV4 | + 1 << HASH_RXQ_IPV4 | +#ifdef HAVE_FLOW_SPEC_IPV6 + 1 << HASH_RXQ_UDPV6 | + 1 << HASH_RXQ_IPV6 | +#endif /* HAVE_FLOW_SPEC_IPV6 */ + 1 << HASH_RXQ_ETH | + 0, + .per_vlan = 0, + }, + [HASH_RXQ_FLOW_TYPE_BROADCAST] = { + .dst_mac_val = "\xff\xff\xff\xff\xff\xff", + .dst_mac_mask = "\xff\xff\xff\xff\xff\xff", + .hash_types = + 1 << HASH_RXQ_UDPV4 | + 1 << HASH_RXQ_IPV4 | +#ifdef HAVE_FLOW_SPEC_IPV6 + 1 << HASH_RXQ_UDPV6 | + 1 << HASH_RXQ_IPV6 | +#endif /* HAVE_FLOW_SPEC_IPV6 */ + 1 << HASH_RXQ_ETH | + 0, + .per_vlan = 1, + }, +#ifdef HAVE_FLOW_SPEC_IPV6 + [HASH_RXQ_FLOW_TYPE_IPV6MULTI] = { + .dst_mac_val = "\x33\x33\x00\x00\x00\x00", + .dst_mac_mask = "\xff\xff\x00\x00\x00\x00", + .hash_types = + 1 << HASH_RXQ_UDPV6 | + 1 << HASH_RXQ_IPV6 | + 1 << HASH_RXQ_ETH | + 0, + .per_vlan = 1, + }, +#endif /* HAVE_FLOW_SPEC_IPV6 */ +}; + +/** + * Enable a special flow in a hash RX queue for a given VLAN index. + * + * @param hash_rxq + * Pointer to hash RX queue structure. + * @param flow_type + * Special flow type. + * @param vlan_index + * VLAN index to use. + * + * @return + * 0 on success, errno value on failure. + */ +static int +hash_rxq_special_flow_enable_vlan(struct hash_rxq *hash_rxq, + enum hash_rxq_flow_type flow_type, + unsigned int vlan_index) +{ + struct priv *priv = hash_rxq->priv; + struct ibv_exp_flow *flow; + FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type)); + struct ibv_exp_flow_attr *attr = &data->attr; + struct ibv_exp_flow_spec_eth *spec = &data->spec; + const uint8_t *mac; + const uint8_t *mask; + unsigned int vlan_enabled = (priv->vlan_filter_n && + special_flow_init[flow_type].per_vlan); + unsigned int vlan_id = priv->vlan_filter[vlan_index]; + + /* Check if flow is relevant for this hash_rxq. */ + if (!(special_flow_init[flow_type].hash_types & (1 << hash_rxq->type))) + return 0; + /* Check if flow already exists. */ + if (hash_rxq->special_flow[flow_type][vlan_index] != NULL) + return 0; + + /* + * No padding must be inserted by the compiler between attr and spec. + * This layout is expected by libibverbs. + */ + assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec); + priv_flow_attr(priv, attr, sizeof(data), hash_rxq->type); + /* The first specification must be Ethernet. */ + assert(spec->type == IBV_EXP_FLOW_SPEC_ETH); + assert(spec->size == sizeof(*spec)); + + mac = special_flow_init[flow_type].dst_mac_val; + mask = special_flow_init[flow_type].dst_mac_mask; + *spec = (struct ibv_exp_flow_spec_eth){ + .type = IBV_EXP_FLOW_SPEC_ETH, + .size = sizeof(*spec), + .val = { + .dst_mac = { + mac[0], mac[1], mac[2], + mac[3], mac[4], mac[5], + }, + .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0), + }, + .mask = { + .dst_mac = { + mask[0], mask[1], mask[2], + mask[3], mask[4], mask[5], + }, + .vlan_tag = (vlan_enabled ? htons(0xfff) : 0), + }, + }; + + errno = 0; + flow = ibv_exp_create_flow(hash_rxq->qp, attr); + if (flow == NULL) { + /* It's not clear whether errno is always set in this case. */ + ERROR("%p: flow configuration failed, errno=%d: %s", + (void *)hash_rxq, errno, + (errno ? strerror(errno) : "Unknown error")); + if (errno) + return errno; + return EINVAL; + } + hash_rxq->special_flow[flow_type][vlan_index] = flow; + DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) enabled", + (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type, + vlan_id, vlan_index); + return 0; +} + +/** + * Disable a special flow in a hash RX queue for a given VLAN index. + * + * @param hash_rxq + * Pointer to hash RX queue structure. + * @param flow_type + * Special flow type. + * @param vlan_index + * VLAN index to use. + */ +static void +hash_rxq_special_flow_disable_vlan(struct hash_rxq *hash_rxq, + enum hash_rxq_flow_type flow_type, + unsigned int vlan_index) +{ + struct ibv_exp_flow *flow = + hash_rxq->special_flow[flow_type][vlan_index]; + + if (flow == NULL) + return; + claim_zero(ibv_exp_destroy_flow(flow)); + hash_rxq->special_flow[flow_type][vlan_index] = NULL; + DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) disabled", + (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type, + hash_rxq->priv->vlan_filter[vlan_index], vlan_index); +} + +/** + * Enable a special flow in a hash RX queue. + * + * @param hash_rxq + * Pointer to hash RX queue structure. + * @param flow_type + * Special flow type. + * @param vlan_index + * VLAN index to use. + * + * @return + * 0 on success, errno value on failure. + */ +static int +hash_rxq_special_flow_enable(struct hash_rxq *hash_rxq, + enum hash_rxq_flow_type flow_type) +{ + struct priv *priv = hash_rxq->priv; + unsigned int i = 0; + int ret; + + assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow)); + assert(RTE_DIM(hash_rxq->special_flow[flow_type]) == + RTE_DIM(priv->vlan_filter)); + /* Add a special flow for each VLAN filter when relevant. */ + do { + ret = hash_rxq_special_flow_enable_vlan(hash_rxq, flow_type, i); + if (ret) { + /* Failure, rollback. */ + while (i != 0) + hash_rxq_special_flow_disable_vlan(hash_rxq, + flow_type, + --i); + return ret; + } + } while (special_flow_init[flow_type].per_vlan && + ++i < priv->vlan_filter_n); + return 0; +} + +/** + * Disable a special flow in a hash RX queue. + * + * @param hash_rxq + * Pointer to hash RX queue structure. + * @param flow_type + * Special flow type. + */ +static void +hash_rxq_special_flow_disable(struct hash_rxq *hash_rxq, + enum hash_rxq_flow_type flow_type) +{ + unsigned int i; + + assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow)); + for (i = 0; (i != RTE_DIM(hash_rxq->special_flow[flow_type])); ++i) + hash_rxq_special_flow_disable_vlan(hash_rxq, flow_type, i); +} + +/** + * Enable a special flow in all hash RX queues. + * + * @param priv + * Private structure. + * @param flow_type + * Special flow type. + * + * @return + * 0 on success, errno value on failure. + */ +int +priv_special_flow_enable(struct priv *priv, enum hash_rxq_flow_type flow_type) +{ + unsigned int i; + + if (!priv_allow_flow_type(priv, flow_type)) + return 0; + for (i = 0; (i != priv->hash_rxqs_n); ++i) { + struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i]; + int ret; + + ret = hash_rxq_special_flow_enable(hash_rxq, flow_type); + if (!ret) + continue; + /* Failure, rollback. */ + while (i != 0) { + hash_rxq = &(*priv->hash_rxqs)[--i]; + hash_rxq_special_flow_disable(hash_rxq, flow_type); + } + return ret; + } + return 0; +} + +/** + * Disable a special flow in all hash RX queues. + * + * @param priv + * Private structure. + * @param flow_type + * Special flow type. + */ +void +priv_special_flow_disable(struct priv *priv, enum hash_rxq_flow_type flow_type) +{ + unsigned int i; + + for (i = 0; (i != priv->hash_rxqs_n); ++i) { + struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i]; + + hash_rxq_special_flow_disable(hash_rxq, flow_type); + } +} + +/** + * Enable all special flows in all hash RX queues. + * + * @param priv + * Private structure. + */ +int +priv_special_flow_enable_all(struct priv *priv) +{ + enum hash_rxq_flow_type flow_type; + + for (flow_type = 0; flow_type != HASH_RXQ_FLOW_TYPE_MAC; ++flow_type) { + int ret; + + ret = priv_special_flow_enable(priv, flow_type); + if (!ret) + continue; + /* Failure, rollback. */ + while (flow_type) + priv_special_flow_disable(priv, --flow_type); + return ret; + } + return 0; +} + +/** + * Disable all special flows in all hash RX queues. + * + * @param priv + * Private structure. + */ +void +priv_special_flow_disable_all(struct priv *priv) +{ + enum hash_rxq_flow_type flow_type; + + for (flow_type = 0; flow_type != HASH_RXQ_FLOW_TYPE_MAC; ++flow_type) + priv_special_flow_disable(priv, flow_type); +} + +/** + * DPDK callback to enable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +void +mlx5_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + int ret; + + if (mlx5_is_secondary()) + return; + + priv_lock(priv); + priv->promisc_req = 1; + ret = priv_rehash_flows(priv); + if (ret) + ERROR("error while enabling promiscuous mode: %s", + strerror(ret)); + priv_unlock(priv); +} + +/** + * DPDK callback to disable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +void +mlx5_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + int ret; + + if (mlx5_is_secondary()) + return; + + priv_lock(priv); + priv->promisc_req = 0; + ret = priv_rehash_flows(priv); + if (ret) + ERROR("error while disabling promiscuous mode: %s", + strerror(ret)); + priv_unlock(priv); +} + +/** + * DPDK callback to enable allmulti mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +void +mlx5_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + int ret; + + if (mlx5_is_secondary()) + return; + + priv_lock(priv); + priv->allmulti_req = 1; + ret = priv_rehash_flows(priv); + if (ret) + ERROR("error while enabling allmulticast mode: %s", + strerror(ret)); + priv_unlock(priv); +} + +/** + * DPDK callback to disable allmulti mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +void +mlx5_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + int ret; + + if (mlx5_is_secondary()) + return; + + priv_lock(priv); + priv->allmulti_req = 0; + ret = priv_rehash_flows(priv); + if (ret) + ERROR("error while disabling allmulticast mode: %s", + strerror(ret)); + priv_unlock(priv); +} diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c new file mode 100644 index 00000000..cbb017bb --- /dev/null +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -0,0 +1,1555 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stddef.h> +#include <assert.h> +#include <errno.h> +#include <string.h> +#include <stdint.h> + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <infiniband/verbs.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_mbuf.h> +#include <rte_malloc.h> +#include <rte_ethdev.h> +#include <rte_common.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5.h" +#include "mlx5_rxtx.h" +#include "mlx5_utils.h" +#include "mlx5_autoconf.h" +#include "mlx5_defs.h" + +/* Initialization data for hash RX queues. */ +const struct hash_rxq_init hash_rxq_init[] = { + [HASH_RXQ_TCPV4] = { + .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 | + IBV_EXP_RX_HASH_DST_IPV4 | + IBV_EXP_RX_HASH_SRC_PORT_TCP | + IBV_EXP_RX_HASH_DST_PORT_TCP), + .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP, + .flow_priority = 0, + .flow_spec.tcp_udp = { + .type = IBV_EXP_FLOW_SPEC_TCP, + .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_IPV4], + }, + [HASH_RXQ_UDPV4] = { + .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 | + IBV_EXP_RX_HASH_DST_IPV4 | + IBV_EXP_RX_HASH_SRC_PORT_UDP | + IBV_EXP_RX_HASH_DST_PORT_UDP), + .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP, + .flow_priority = 0, + .flow_spec.tcp_udp = { + .type = IBV_EXP_FLOW_SPEC_UDP, + .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_IPV4], + }, + [HASH_RXQ_IPV4] = { + .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 | + IBV_EXP_RX_HASH_DST_IPV4), + .dpdk_rss_hf = (ETH_RSS_IPV4 | + ETH_RSS_FRAG_IPV4), + .flow_priority = 1, + .flow_spec.ipv4 = { + .type = IBV_EXP_FLOW_SPEC_IPV4, + .size = sizeof(hash_rxq_init[0].flow_spec.ipv4), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_ETH], + }, +#ifdef HAVE_FLOW_SPEC_IPV6 + [HASH_RXQ_TCPV6] = { + .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 | + IBV_EXP_RX_HASH_DST_IPV6 | + IBV_EXP_RX_HASH_SRC_PORT_TCP | + IBV_EXP_RX_HASH_DST_PORT_TCP), + .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP, + .flow_priority = 0, + .flow_spec.tcp_udp = { + .type = IBV_EXP_FLOW_SPEC_TCP, + .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_IPV6], + }, + [HASH_RXQ_UDPV6] = { + .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 | + IBV_EXP_RX_HASH_DST_IPV6 | + IBV_EXP_RX_HASH_SRC_PORT_UDP | + IBV_EXP_RX_HASH_DST_PORT_UDP), + .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP, + .flow_priority = 0, + .flow_spec.tcp_udp = { + .type = IBV_EXP_FLOW_SPEC_UDP, + .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_IPV6], + }, + [HASH_RXQ_IPV6] = { + .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 | + IBV_EXP_RX_HASH_DST_IPV6), + .dpdk_rss_hf = (ETH_RSS_IPV6 | + ETH_RSS_FRAG_IPV6), + .flow_priority = 1, + .flow_spec.ipv6 = { + .type = IBV_EXP_FLOW_SPEC_IPV6, + .size = sizeof(hash_rxq_init[0].flow_spec.ipv6), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_ETH], + }, +#endif /* HAVE_FLOW_SPEC_IPV6 */ + [HASH_RXQ_ETH] = { + .hash_fields = 0, + .dpdk_rss_hf = 0, + .flow_priority = 2, + .flow_spec.eth = { + .type = IBV_EXP_FLOW_SPEC_ETH, + .size = sizeof(hash_rxq_init[0].flow_spec.eth), + }, + .underlayer = NULL, + }, +}; + +/* Number of entries in hash_rxq_init[]. */ +const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init); + +/* Initialization data for hash RX queue indirection tables. */ +static const struct ind_table_init ind_table_init[] = { + { + .max_size = -1u, /* Superseded by HW limitations. */ + .hash_types = + 1 << HASH_RXQ_TCPV4 | + 1 << HASH_RXQ_UDPV4 | + 1 << HASH_RXQ_IPV4 | +#ifdef HAVE_FLOW_SPEC_IPV6 + 1 << HASH_RXQ_TCPV6 | + 1 << HASH_RXQ_UDPV6 | + 1 << HASH_RXQ_IPV6 | +#endif /* HAVE_FLOW_SPEC_IPV6 */ + 0, +#ifdef HAVE_FLOW_SPEC_IPV6 + .hash_types_n = 6, +#else /* HAVE_FLOW_SPEC_IPV6 */ + .hash_types_n = 3, +#endif /* HAVE_FLOW_SPEC_IPV6 */ + }, + { + .max_size = 1, + .hash_types = 1 << HASH_RXQ_ETH, + .hash_types_n = 1, + }, +}; + +#define IND_TABLE_INIT_N RTE_DIM(ind_table_init) + +/* Default RSS hash key also used for ConnectX-3. */ +uint8_t rss_hash_default_key[] = { + 0x2c, 0xc6, 0x81, 0xd1, + 0x5b, 0xdb, 0xf4, 0xf7, + 0xfc, 0xa2, 0x83, 0x19, + 0xdb, 0x1a, 0x3e, 0x94, + 0x6b, 0x9e, 0x38, 0xd9, + 0x2c, 0x9c, 0x03, 0xd1, + 0xad, 0x99, 0x44, 0xa7, + 0xd9, 0x56, 0x3d, 0x59, + 0x06, 0x3c, 0x25, 0xf3, + 0xfc, 0x1f, 0xdc, 0x2a, +}; + +/* Length of the default RSS hash key. */ +const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); + +/** + * Populate flow steering rule for a given hash RX queue type using + * information from hash_rxq_init[]. Nothing is written to flow_attr when + * flow_attr_size is not large enough, but the required size is still returned. + * + * @param priv + * Pointer to private structure. + * @param[out] flow_attr + * Pointer to flow attribute structure to fill. Note that the allocated + * area must be larger and large enough to hold all flow specifications. + * @param flow_attr_size + * Entire size of flow_attr and trailing room for flow specifications. + * @param type + * Hash RX queue type to use for flow steering rule. + * + * @return + * Total size of the flow attribute buffer. No errors are defined. + */ +size_t +priv_flow_attr(struct priv *priv, struct ibv_exp_flow_attr *flow_attr, + size_t flow_attr_size, enum hash_rxq_type type) +{ + size_t offset = sizeof(*flow_attr); + const struct hash_rxq_init *init = &hash_rxq_init[type]; + + assert(priv != NULL); + assert((size_t)type < RTE_DIM(hash_rxq_init)); + do { + offset += init->flow_spec.hdr.size; + init = init->underlayer; + } while (init != NULL); + if (offset > flow_attr_size) + return offset; + flow_attr_size = offset; + init = &hash_rxq_init[type]; + *flow_attr = (struct ibv_exp_flow_attr){ + .type = IBV_EXP_FLOW_ATTR_NORMAL, +#ifdef MLX5_FDIR_SUPPORT + /* Priorities < 3 are reserved for flow director. */ + .priority = init->flow_priority + 3, +#else /* MLX5_FDIR_SUPPORT */ + .priority = init->flow_priority, +#endif /* MLX5_FDIR_SUPPORT */ + .num_of_specs = 0, + .port = priv->port, + .flags = 0, + }; + do { + offset -= init->flow_spec.hdr.size; + memcpy((void *)((uintptr_t)flow_attr + offset), + &init->flow_spec, + init->flow_spec.hdr.size); + ++flow_attr->num_of_specs; + init = init->underlayer; + } while (init != NULL); + return flow_attr_size; +} + +/** + * Convert hash type position in indirection table initializer to + * hash RX queue type. + * + * @param table + * Indirection table initializer. + * @param pos + * Hash type position. + * + * @return + * Hash RX queue type. + */ +static enum hash_rxq_type +hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos) +{ + enum hash_rxq_type type = 0; + + assert(pos < table->hash_types_n); + do { + if ((table->hash_types & (1 << type)) && (pos-- == 0)) + break; + ++type; + } while (1); + return type; +} + +/** + * Filter out disabled hash RX queue types from ind_table_init[]. + * + * @param priv + * Pointer to private structure. + * @param[out] table + * Output table. + * + * @return + * Number of table entries. + */ +static unsigned int +priv_make_ind_table_init(struct priv *priv, + struct ind_table_init (*table)[IND_TABLE_INIT_N]) +{ + uint64_t rss_hf; + unsigned int i; + unsigned int j; + unsigned int table_n = 0; + /* Mandatory to receive frames not handled by normal hash RX queues. */ + unsigned int hash_types_sup = 1 << HASH_RXQ_ETH; + + rss_hf = priv->rss_hf; + /* Process other protocols only if more than one queue. */ + if (priv->rxqs_n > 1) + for (i = 0; (i != hash_rxq_init_n); ++i) + if (rss_hf & hash_rxq_init[i].dpdk_rss_hf) + hash_types_sup |= (1 << i); + + /* Filter out entries whose protocols are not in the set. */ + for (i = 0, j = 0; (i != IND_TABLE_INIT_N); ++i) { + unsigned int nb; + unsigned int h; + + /* j is increased only if the table has valid protocols. */ + assert(j <= i); + (*table)[j] = ind_table_init[i]; + (*table)[j].hash_types &= hash_types_sup; + for (h = 0, nb = 0; (h != hash_rxq_init_n); ++h) + if (((*table)[j].hash_types >> h) & 0x1) + ++nb; + (*table)[i].hash_types_n = nb; + if (nb) { + ++table_n; + ++j; + } + } + return table_n; +} + +/** + * Initialize hash RX queues and indirection table. + * + * @param priv + * Pointer to private structure. + * + * @return + * 0 on success, errno value on failure. + */ +int +priv_create_hash_rxqs(struct priv *priv) +{ + struct ibv_exp_wq *wqs[priv->reta_idx_n]; + struct ind_table_init ind_table_init[IND_TABLE_INIT_N]; + unsigned int ind_tables_n = + priv_make_ind_table_init(priv, &ind_table_init); + unsigned int hash_rxqs_n = 0; + struct hash_rxq (*hash_rxqs)[] = NULL; + struct ibv_exp_rwq_ind_table *(*ind_tables)[] = NULL; + unsigned int i; + unsigned int j; + unsigned int k; + int err = 0; + + assert(priv->ind_tables == NULL); + assert(priv->ind_tables_n == 0); + assert(priv->hash_rxqs == NULL); + assert(priv->hash_rxqs_n == 0); + assert(priv->pd != NULL); + assert(priv->ctx != NULL); + if (priv->rxqs_n == 0) + return EINVAL; + assert(priv->rxqs != NULL); + if (ind_tables_n == 0) { + ERROR("all hash RX queue types have been filtered out," + " indirection table cannot be created"); + return EINVAL; + } + if (priv->rxqs_n & (priv->rxqs_n - 1)) { + INFO("%u RX queues are configured, consider rounding this" + " number to the next power of two for better balancing", + priv->rxqs_n); + DEBUG("indirection table extended to assume %u WQs", + priv->reta_idx_n); + } + for (i = 0; (i != priv->reta_idx_n); ++i) + wqs[i] = (*priv->rxqs)[(*priv->reta_idx)[i]]->wq; + /* Get number of hash RX queues to configure. */ + for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i) + hash_rxqs_n += ind_table_init[i].hash_types_n; + DEBUG("allocating %u hash RX queues for %u WQs, %u indirection tables", + hash_rxqs_n, priv->rxqs_n, ind_tables_n); + /* Create indirection tables. */ + ind_tables = rte_calloc(__func__, ind_tables_n, + sizeof((*ind_tables)[0]), 0); + if (ind_tables == NULL) { + err = ENOMEM; + ERROR("cannot allocate indirection tables container: %s", + strerror(err)); + goto error; + } + for (i = 0; (i != ind_tables_n); ++i) { + struct ibv_exp_rwq_ind_table_init_attr ind_init_attr = { + .pd = priv->pd, + .log_ind_tbl_size = 0, /* Set below. */ + .ind_tbl = wqs, + .comp_mask = 0, + }; + unsigned int ind_tbl_size = ind_table_init[i].max_size; + struct ibv_exp_rwq_ind_table *ind_table; + + if (priv->reta_idx_n < ind_tbl_size) + ind_tbl_size = priv->reta_idx_n; + ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size); + errno = 0; + ind_table = ibv_exp_create_rwq_ind_table(priv->ctx, + &ind_init_attr); + if (ind_table != NULL) { + (*ind_tables)[i] = ind_table; + continue; + } + /* Not clear whether errno is set. */ + err = (errno ? errno : EINVAL); + ERROR("RX indirection table creation failed with error %d: %s", + err, strerror(err)); + goto error; + } + /* Allocate array that holds hash RX queues and related data. */ + hash_rxqs = rte_calloc(__func__, hash_rxqs_n, + sizeof((*hash_rxqs)[0]), 0); + if (hash_rxqs == NULL) { + err = ENOMEM; + ERROR("cannot allocate hash RX queues container: %s", + strerror(err)); + goto error; + } + for (i = 0, j = 0, k = 0; + ((i != hash_rxqs_n) && (j != ind_tables_n)); + ++i) { + struct hash_rxq *hash_rxq = &(*hash_rxqs)[i]; + enum hash_rxq_type type = + hash_rxq_type_from_pos(&ind_table_init[j], k); + struct rte_eth_rss_conf *priv_rss_conf = + (*priv->rss_conf)[type]; + struct ibv_exp_rx_hash_conf hash_conf = { + .rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = (priv_rss_conf ? + priv_rss_conf->rss_key_len : + rss_hash_default_key_len), + .rx_hash_key = (priv_rss_conf ? + priv_rss_conf->rss_key : + rss_hash_default_key), + .rx_hash_fields_mask = hash_rxq_init[type].hash_fields, + .rwq_ind_tbl = (*ind_tables)[j], + }; + struct ibv_exp_qp_init_attr qp_init_attr = { + .max_inl_recv = 0, /* Currently not supported. */ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD | + IBV_EXP_QP_INIT_ATTR_RX_HASH), + .pd = priv->pd, + .rx_hash_conf = &hash_conf, + .port_num = priv->port, + }; + + DEBUG("using indirection table %u for hash RX queue %u type %d", + j, i, type); + *hash_rxq = (struct hash_rxq){ + .priv = priv, + .qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr), + .type = type, + }; + if (hash_rxq->qp == NULL) { + err = (errno ? errno : EINVAL); + ERROR("Hash RX QP creation failure: %s", + strerror(err)); + goto error; + } + if (++k < ind_table_init[j].hash_types_n) + continue; + /* Switch to the next indirection table and reset hash RX + * queue type array index. */ + ++j; + k = 0; + } + priv->ind_tables = ind_tables; + priv->ind_tables_n = ind_tables_n; + priv->hash_rxqs = hash_rxqs; + priv->hash_rxqs_n = hash_rxqs_n; + assert(err == 0); + return 0; +error: + if (hash_rxqs != NULL) { + for (i = 0; (i != hash_rxqs_n); ++i) { + struct ibv_qp *qp = (*hash_rxqs)[i].qp; + + if (qp == NULL) + continue; + claim_zero(ibv_destroy_qp(qp)); + } + rte_free(hash_rxqs); + } + if (ind_tables != NULL) { + for (j = 0; (j != ind_tables_n); ++j) { + struct ibv_exp_rwq_ind_table *ind_table = + (*ind_tables)[j]; + + if (ind_table == NULL) + continue; + claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table)); + } + rte_free(ind_tables); + } + return err; +} + +/** + * Clean up hash RX queues and indirection table. + * + * @param priv + * Pointer to private structure. + */ +void +priv_destroy_hash_rxqs(struct priv *priv) +{ + unsigned int i; + + DEBUG("destroying %u hash RX queues", priv->hash_rxqs_n); + if (priv->hash_rxqs_n == 0) { + assert(priv->hash_rxqs == NULL); + assert(priv->ind_tables == NULL); + return; + } + for (i = 0; (i != priv->hash_rxqs_n); ++i) { + struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i]; + unsigned int j, k; + + assert(hash_rxq->priv == priv); + assert(hash_rxq->qp != NULL); + /* Also check that there are no remaining flows. */ + for (j = 0; (j != RTE_DIM(hash_rxq->special_flow)); ++j) + for (k = 0; + (k != RTE_DIM(hash_rxq->special_flow[j])); + ++k) + assert(hash_rxq->special_flow[j][k] == NULL); + for (j = 0; (j != RTE_DIM(hash_rxq->mac_flow)); ++j) + for (k = 0; (k != RTE_DIM(hash_rxq->mac_flow[j])); ++k) + assert(hash_rxq->mac_flow[j][k] == NULL); + claim_zero(ibv_destroy_qp(hash_rxq->qp)); + } + priv->hash_rxqs_n = 0; + rte_free(priv->hash_rxqs); + priv->hash_rxqs = NULL; + for (i = 0; (i != priv->ind_tables_n); ++i) { + struct ibv_exp_rwq_ind_table *ind_table = + (*priv->ind_tables)[i]; + + assert(ind_table != NULL); + claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table)); + } + priv->ind_tables_n = 0; + rte_free(priv->ind_tables); + priv->ind_tables = NULL; +} + +/** + * Check whether a given flow type is allowed. + * + * @param priv + * Pointer to private structure. + * @param type + * Flow type to check. + * + * @return + * Nonzero if the given flow type is allowed. + */ +int +priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type) +{ + /* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode + * has been requested. */ + if (priv->promisc_req) + return type == HASH_RXQ_FLOW_TYPE_PROMISC; + switch (type) { + case HASH_RXQ_FLOW_TYPE_PROMISC: + return !!priv->promisc_req; + case HASH_RXQ_FLOW_TYPE_ALLMULTI: + return !!priv->allmulti_req; + case HASH_RXQ_FLOW_TYPE_BROADCAST: +#ifdef HAVE_FLOW_SPEC_IPV6 + case HASH_RXQ_FLOW_TYPE_IPV6MULTI: +#endif /* HAVE_FLOW_SPEC_IPV6 */ + /* If allmulti is enabled, broadcast and ipv6multi + * are unnecessary. */ + return !priv->allmulti_req; + case HASH_RXQ_FLOW_TYPE_MAC: + return 1; + default: + /* Unsupported flow type is not allowed. */ + return 0; + } + return 0; +} + +/** + * Automatically enable/disable flows according to configuration. + * + * @param priv + * Private structure. + * + * @return + * 0 on success, errno value on failure. + */ +int +priv_rehash_flows(struct priv *priv) +{ + unsigned int i; + + for (i = 0; (i != RTE_DIM((*priv->hash_rxqs)[0].special_flow)); ++i) + if (!priv_allow_flow_type(priv, i)) { + priv_special_flow_disable(priv, i); + } else { + int ret = priv_special_flow_enable(priv, i); + + if (ret) + return ret; + } + if (priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC)) + return priv_mac_addrs_enable(priv); + priv_mac_addrs_disable(priv); + return 0; +} + +/** + * Allocate RX queue elements with scattered packets support. + * + * @param rxq + * Pointer to RX queue structure. + * @param elts_n + * Number of elements to allocate. + * @param[in] pool + * If not NULL, fetch buffers from this array instead of allocating them + * with rte_pktmbuf_alloc(). + * + * @return + * 0 on success, errno value on failure. + */ +static int +rxq_alloc_elts_sp(struct rxq *rxq, unsigned int elts_n, + struct rte_mbuf **pool) +{ + unsigned int i; + struct rxq_elt_sp (*elts)[elts_n] = + rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0, + rxq->socket); + int ret = 0; + + if (elts == NULL) { + ERROR("%p: can't allocate packets array", (void *)rxq); + ret = ENOMEM; + goto error; + } + /* For each WR (packet). */ + for (i = 0; (i != elts_n); ++i) { + unsigned int j; + struct rxq_elt_sp *elt = &(*elts)[i]; + struct ibv_sge (*sges)[RTE_DIM(elt->sges)] = &elt->sges; + + /* These two arrays must have the same size. */ + assert(RTE_DIM(elt->sges) == RTE_DIM(elt->bufs)); + /* For each SGE (segment). */ + for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) { + struct ibv_sge *sge = &(*sges)[j]; + struct rte_mbuf *buf; + + if (pool != NULL) { + buf = *(pool++); + assert(buf != NULL); + rte_pktmbuf_reset(buf); + } else + buf = rte_pktmbuf_alloc(rxq->mp); + if (buf == NULL) { + assert(pool == NULL); + ERROR("%p: empty mbuf pool", (void *)rxq); + ret = ENOMEM; + goto error; + } + elt->bufs[j] = buf; + /* Headroom is reserved by rte_pktmbuf_alloc(). */ + assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); + /* Buffer is supposed to be empty. */ + assert(rte_pktmbuf_data_len(buf) == 0); + assert(rte_pktmbuf_pkt_len(buf) == 0); + /* sge->addr must be able to store a pointer. */ + assert(sizeof(sge->addr) >= sizeof(uintptr_t)); + if (j == 0) { + /* The first SGE keeps its headroom. */ + sge->addr = rte_pktmbuf_mtod(buf, uintptr_t); + sge->length = (buf->buf_len - + RTE_PKTMBUF_HEADROOM); + } else { + /* Subsequent SGEs lose theirs. */ + assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); + SET_DATA_OFF(buf, 0); + sge->addr = (uintptr_t)buf->buf_addr; + sge->length = buf->buf_len; + } + sge->lkey = rxq->mr->lkey; + /* Redundant check for tailroom. */ + assert(sge->length == rte_pktmbuf_tailroom(buf)); + } + } + DEBUG("%p: allocated and configured %u WRs (%zu segments)", + (void *)rxq, elts_n, (elts_n * RTE_DIM((*elts)[0].sges))); + rxq->elts_n = elts_n; + rxq->elts_head = 0; + rxq->elts.sp = elts; + assert(ret == 0); + return 0; +error: + if (elts != NULL) { + assert(pool == NULL); + for (i = 0; (i != RTE_DIM(*elts)); ++i) { + unsigned int j; + struct rxq_elt_sp *elt = &(*elts)[i]; + + for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) { + struct rte_mbuf *buf = elt->bufs[j]; + + if (buf != NULL) + rte_pktmbuf_free_seg(buf); + } + } + rte_free(elts); + } + DEBUG("%p: failed, freed everything", (void *)rxq); + assert(ret > 0); + return ret; +} + +/** + * Free RX queue elements with scattered packets support. + * + * @param rxq + * Pointer to RX queue structure. + */ +static void +rxq_free_elts_sp(struct rxq *rxq) +{ + unsigned int i; + unsigned int elts_n = rxq->elts_n; + struct rxq_elt_sp (*elts)[elts_n] = rxq->elts.sp; + + DEBUG("%p: freeing WRs", (void *)rxq); + rxq->elts_n = 0; + rxq->elts.sp = NULL; + if (elts == NULL) + return; + for (i = 0; (i != RTE_DIM(*elts)); ++i) { + unsigned int j; + struct rxq_elt_sp *elt = &(*elts)[i]; + + for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) { + struct rte_mbuf *buf = elt->bufs[j]; + + if (buf != NULL) + rte_pktmbuf_free_seg(buf); + } + } + rte_free(elts); +} + +/** + * Allocate RX queue elements. + * + * @param rxq + * Pointer to RX queue structure. + * @param elts_n + * Number of elements to allocate. + * @param[in] pool + * If not NULL, fetch buffers from this array instead of allocating them + * with rte_pktmbuf_alloc(). + * + * @return + * 0 on success, errno value on failure. + */ +static int +rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n, struct rte_mbuf **pool) +{ + unsigned int i; + struct rxq_elt (*elts)[elts_n] = + rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0, + rxq->socket); + int ret = 0; + + if (elts == NULL) { + ERROR("%p: can't allocate packets array", (void *)rxq); + ret = ENOMEM; + goto error; + } + /* For each WR (packet). */ + for (i = 0; (i != elts_n); ++i) { + struct rxq_elt *elt = &(*elts)[i]; + struct ibv_sge *sge = &(*elts)[i].sge; + struct rte_mbuf *buf; + + if (pool != NULL) { + buf = *(pool++); + assert(buf != NULL); + rte_pktmbuf_reset(buf); + } else + buf = rte_pktmbuf_alloc(rxq->mp); + if (buf == NULL) { + assert(pool == NULL); + ERROR("%p: empty mbuf pool", (void *)rxq); + ret = ENOMEM; + goto error; + } + elt->buf = buf; + /* Headroom is reserved by rte_pktmbuf_alloc(). */ + assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); + /* Buffer is supposed to be empty. */ + assert(rte_pktmbuf_data_len(buf) == 0); + assert(rte_pktmbuf_pkt_len(buf) == 0); + /* sge->addr must be able to store a pointer. */ + assert(sizeof(sge->addr) >= sizeof(uintptr_t)); + /* SGE keeps its headroom. */ + sge->addr = (uintptr_t) + ((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM); + sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM); + sge->lkey = rxq->mr->lkey; + /* Redundant check for tailroom. */ + assert(sge->length == rte_pktmbuf_tailroom(buf)); + } + DEBUG("%p: allocated and configured %u single-segment WRs", + (void *)rxq, elts_n); + rxq->elts_n = elts_n; + rxq->elts_head = 0; + rxq->elts.no_sp = elts; + assert(ret == 0); + return 0; +error: + if (elts != NULL) { + assert(pool == NULL); + for (i = 0; (i != RTE_DIM(*elts)); ++i) { + struct rxq_elt *elt = &(*elts)[i]; + struct rte_mbuf *buf = elt->buf; + + if (buf != NULL) + rte_pktmbuf_free_seg(buf); + } + rte_free(elts); + } + DEBUG("%p: failed, freed everything", (void *)rxq); + assert(ret > 0); + return ret; +} + +/** + * Free RX queue elements. + * + * @param rxq + * Pointer to RX queue structure. + */ +static void +rxq_free_elts(struct rxq *rxq) +{ + unsigned int i; + unsigned int elts_n = rxq->elts_n; + struct rxq_elt (*elts)[elts_n] = rxq->elts.no_sp; + + DEBUG("%p: freeing WRs", (void *)rxq); + rxq->elts_n = 0; + rxq->elts.no_sp = NULL; + if (elts == NULL) + return; + for (i = 0; (i != RTE_DIM(*elts)); ++i) { + struct rxq_elt *elt = &(*elts)[i]; + struct rte_mbuf *buf = elt->buf; + + if (buf != NULL) + rte_pktmbuf_free_seg(buf); + } + rte_free(elts); +} + +/** + * Clean up a RX queue. + * + * Destroy objects, free allocated memory and reset the structure for reuse. + * + * @param rxq + * Pointer to RX queue structure. + */ +void +rxq_cleanup(struct rxq *rxq) +{ + struct ibv_exp_release_intf_params params; + + DEBUG("cleaning up %p", (void *)rxq); + if (rxq->sp) + rxq_free_elts_sp(rxq); + else + rxq_free_elts(rxq); + rxq->poll = NULL; + rxq->recv = NULL; + if (rxq->if_wq != NULL) { + assert(rxq->priv != NULL); + assert(rxq->priv->ctx != NULL); + assert(rxq->wq != NULL); + params = (struct ibv_exp_release_intf_params){ + .comp_mask = 0, + }; + claim_zero(ibv_exp_release_intf(rxq->priv->ctx, + rxq->if_wq, + ¶ms)); + } + if (rxq->if_cq != NULL) { + assert(rxq->priv != NULL); + assert(rxq->priv->ctx != NULL); + assert(rxq->cq != NULL); + params = (struct ibv_exp_release_intf_params){ + .comp_mask = 0, + }; + claim_zero(ibv_exp_release_intf(rxq->priv->ctx, + rxq->if_cq, + ¶ms)); + } + if (rxq->wq != NULL) + claim_zero(ibv_exp_destroy_wq(rxq->wq)); + if (rxq->cq != NULL) + claim_zero(ibv_destroy_cq(rxq->cq)); + if (rxq->rd != NULL) { + struct ibv_exp_destroy_res_domain_attr attr = { + .comp_mask = 0, + }; + + assert(rxq->priv != NULL); + assert(rxq->priv->ctx != NULL); + claim_zero(ibv_exp_destroy_res_domain(rxq->priv->ctx, + rxq->rd, + &attr)); + } + if (rxq->mr != NULL) + claim_zero(ibv_dereg_mr(rxq->mr)); + memset(rxq, 0, sizeof(*rxq)); +} + +/** + * Reconfigure a RX queue with new parameters. + * + * rxq_rehash() does not allocate mbufs, which, if not done from the right + * thread (such as a control thread), may corrupt the pool. + * In case of failure, the queue is left untouched. + * + * @param dev + * Pointer to Ethernet device structure. + * @param rxq + * RX queue pointer. + * + * @return + * 0 on success, errno value on failure. + */ +int +rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) +{ + struct priv *priv = rxq->priv; + struct rxq tmpl = *rxq; + unsigned int mbuf_n; + unsigned int desc_n; + struct rte_mbuf **pool; + unsigned int i, k; + struct ibv_exp_wq_attr mod; + int err; + + DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq); + /* Number of descriptors and mbufs currently allocated. */ + desc_n = (tmpl.elts_n * (tmpl.sp ? MLX5_PMD_SGE_WR_N : 1)); + mbuf_n = desc_n; + /* Toggle RX checksum offload if hardware supports it. */ + if (priv->hw_csum) { + tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; + rxq->csum = tmpl.csum; + } + if (priv->hw_csum_l2tun) { + tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum; + rxq->csum_l2tun = tmpl.csum_l2tun; + } + /* Enable scattered packets support for this queue if necessary. */ + if ((dev->data->dev_conf.rxmode.jumbo_frame) && + (dev->data->dev_conf.rxmode.max_rx_pkt_len > + (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) { + tmpl.sp = 1; + desc_n /= MLX5_PMD_SGE_WR_N; + } else + tmpl.sp = 0; + DEBUG("%p: %s scattered packets support (%u WRs)", + (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc_n); + /* If scatter mode is the same as before, nothing to do. */ + if (tmpl.sp == rxq->sp) { + DEBUG("%p: nothing to do", (void *)dev); + return 0; + } + /* From now on, any failure will render the queue unusable. + * Reinitialize WQ. */ + mod = (struct ibv_exp_wq_attr){ + .attr_mask = IBV_EXP_WQ_ATTR_STATE, + .wq_state = IBV_EXP_WQS_RESET, + }; + err = ibv_exp_modify_wq(tmpl.wq, &mod); + if (err) { + ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err)); + assert(err > 0); + return err; + } + /* Allocate pool. */ + pool = rte_malloc(__func__, (mbuf_n * sizeof(*pool)), 0); + if (pool == NULL) { + ERROR("%p: cannot allocate memory", (void *)dev); + return ENOBUFS; + } + /* Snatch mbufs from original queue. */ + k = 0; + if (rxq->sp) { + struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp; + + for (i = 0; (i != RTE_DIM(*elts)); ++i) { + struct rxq_elt_sp *elt = &(*elts)[i]; + unsigned int j; + + for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) { + assert(elt->bufs[j] != NULL); + pool[k++] = elt->bufs[j]; + } + } + } else { + struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp; + + for (i = 0; (i != RTE_DIM(*elts)); ++i) { + struct rxq_elt *elt = &(*elts)[i]; + struct rte_mbuf *buf = elt->buf; + + pool[k++] = buf; + } + } + assert(k == mbuf_n); + tmpl.elts_n = 0; + tmpl.elts.sp = NULL; + assert((void *)&tmpl.elts.sp == (void *)&tmpl.elts.no_sp); + err = ((tmpl.sp) ? + rxq_alloc_elts_sp(&tmpl, desc_n, pool) : + rxq_alloc_elts(&tmpl, desc_n, pool)); + if (err) { + ERROR("%p: cannot reallocate WRs, aborting", (void *)dev); + rte_free(pool); + assert(err > 0); + return err; + } + assert(tmpl.elts_n == desc_n); + assert(tmpl.elts.sp != NULL); + rte_free(pool); + /* Clean up original data. */ + rxq->elts_n = 0; + rte_free(rxq->elts.sp); + rxq->elts.sp = NULL; + /* Change queue state to ready. */ + mod = (struct ibv_exp_wq_attr){ + .attr_mask = IBV_EXP_WQ_ATTR_STATE, + .wq_state = IBV_EXP_WQS_RDY, + }; + err = ibv_exp_modify_wq(tmpl.wq, &mod); + if (err) { + ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s", + (void *)dev, strerror(err)); + goto error; + } + /* Post SGEs. */ + assert(tmpl.if_wq != NULL); + if (tmpl.sp) { + struct rxq_elt_sp (*elts)[tmpl.elts_n] = tmpl.elts.sp; + + for (i = 0; (i != RTE_DIM(*elts)); ++i) { + err = tmpl.if_wq->recv_sg_list + (tmpl.wq, + (*elts)[i].sges, + RTE_DIM((*elts)[i].sges)); + if (err) + break; + } + } else { + struct rxq_elt (*elts)[tmpl.elts_n] = tmpl.elts.no_sp; + + for (i = 0; (i != RTE_DIM(*elts)); ++i) { + err = tmpl.if_wq->recv_burst( + tmpl.wq, + &(*elts)[i].sge, + 1); + if (err) + break; + } + } + if (err) { + ERROR("%p: failed to post SGEs with error %d", + (void *)dev, err); + /* Set err because it does not contain a valid errno value. */ + err = EIO; + goto error; + } + if (tmpl.sp) + tmpl.recv = tmpl.if_wq->recv_sg_list; + else + tmpl.recv = tmpl.if_wq->recv_burst; +error: + *rxq = tmpl; + assert(err >= 0); + return err; +} + +/** + * Configure a RX queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param rxq + * Pointer to RX queue structure. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * @param mp + * Memory pool for buffer allocations. + * + * @return + * 0 on success, errno value on failure. + */ +int +rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) +{ + struct priv *priv = dev->data->dev_private; + struct rxq tmpl = { + .priv = priv, + .mp = mp, + .socket = socket + }; + struct ibv_exp_wq_attr mod; + union { + struct ibv_exp_query_intf_params params; + struct ibv_exp_cq_init_attr cq; + struct ibv_exp_res_domain_init_attr rd; + struct ibv_exp_wq_init_attr wq; + } attr; + enum ibv_exp_query_intf_status status; + struct rte_mbuf *buf; + int ret = 0; + unsigned int i; + unsigned int cq_size = desc; + + (void)conf; /* Thresholds configuration (ignored). */ + if ((desc == 0) || (desc % MLX5_PMD_SGE_WR_N)) { + ERROR("%p: invalid number of RX descriptors (must be a" + " multiple of %d)", (void *)dev, MLX5_PMD_SGE_WR_N); + return EINVAL; + } + /* Get mbuf length. */ + buf = rte_pktmbuf_alloc(mp); + if (buf == NULL) { + ERROR("%p: unable to allocate mbuf", (void *)dev); + return ENOMEM; + } + tmpl.mb_len = buf->buf_len; + assert((rte_pktmbuf_headroom(buf) + + rte_pktmbuf_tailroom(buf)) == tmpl.mb_len); + assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM); + rte_pktmbuf_free(buf); + /* Toggle RX checksum offload if hardware supports it. */ + if (priv->hw_csum) + tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; + if (priv->hw_csum_l2tun) + tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum; + /* Enable scattered packets support for this queue if necessary. */ + if ((dev->data->dev_conf.rxmode.jumbo_frame) && + (dev->data->dev_conf.rxmode.max_rx_pkt_len > + (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) { + tmpl.sp = 1; + desc /= MLX5_PMD_SGE_WR_N; + } + DEBUG("%p: %s scattered packets support (%u WRs)", + (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc); + /* Use the entire RX mempool as the memory region. */ + tmpl.mr = mlx5_mp2mr(priv->pd, mp); + if (tmpl.mr == NULL) { + ret = EINVAL; + ERROR("%p: MR creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } + attr.rd = (struct ibv_exp_res_domain_init_attr){ + .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | + IBV_EXP_RES_DOMAIN_MSG_MODEL), + .thread_model = IBV_EXP_THREAD_SINGLE, + .msg_model = IBV_EXP_MSG_HIGH_BW, + }; + tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd); + if (tmpl.rd == NULL) { + ret = ENOMEM; + ERROR("%p: RD creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } + attr.cq = (struct ibv_exp_cq_init_attr){ + .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, + .res_domain = tmpl.rd, + }; + tmpl.cq = ibv_exp_create_cq(priv->ctx, cq_size, NULL, NULL, 0, + &attr.cq); + if (tmpl.cq == NULL) { + ret = ENOMEM; + ERROR("%p: CQ creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } + DEBUG("priv->device_attr.max_qp_wr is %d", + priv->device_attr.max_qp_wr); + DEBUG("priv->device_attr.max_sge is %d", + priv->device_attr.max_sge); + /* Configure VLAN stripping. */ + tmpl.vlan_strip = dev->data->dev_conf.rxmode.hw_vlan_strip; + attr.wq = (struct ibv_exp_wq_init_attr){ + .wq_context = NULL, /* Could be useful in the future. */ + .wq_type = IBV_EXP_WQT_RQ, + /* Max number of outstanding WRs. */ + .max_recv_wr = ((priv->device_attr.max_qp_wr < (int)cq_size) ? + priv->device_attr.max_qp_wr : + (int)cq_size), + /* Max number of scatter/gather elements in a WR. */ + .max_recv_sge = ((priv->device_attr.max_sge < + MLX5_PMD_SGE_WR_N) ? + priv->device_attr.max_sge : + MLX5_PMD_SGE_WR_N), + .pd = priv->pd, + .cq = tmpl.cq, + .comp_mask = + IBV_EXP_CREATE_WQ_RES_DOMAIN | +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + IBV_EXP_CREATE_WQ_VLAN_OFFLOADS | +#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + 0, + .res_domain = tmpl.rd, +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + .vlan_offloads = (tmpl.vlan_strip ? + IBV_EXP_RECEIVE_WQ_CVLAN_STRIP : + 0), +#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + }; + +#ifdef HAVE_VERBS_FCS + /* By default, FCS (CRC) is stripped by hardware. */ + if (dev->data->dev_conf.rxmode.hw_strip_crc) { + tmpl.crc_present = 0; + } else if (priv->hw_fcs_strip) { + /* Ask HW/Verbs to leave CRC in place when supported. */ + attr.wq.flags |= IBV_EXP_CREATE_WQ_FLAG_SCATTER_FCS; + attr.wq.comp_mask |= IBV_EXP_CREATE_WQ_FLAGS; + tmpl.crc_present = 1; + } else { + WARN("%p: CRC stripping has been disabled but will still" + " be performed by hardware, make sure MLNX_OFED and" + " firmware are up to date", + (void *)dev); + tmpl.crc_present = 0; + } + DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from" + " incoming frames to hide it", + (void *)dev, + tmpl.crc_present ? "disabled" : "enabled", + tmpl.crc_present << 2); +#endif /* HAVE_VERBS_FCS */ + +#ifdef HAVE_VERBS_RX_END_PADDING + if (!mlx5_getenv_int("MLX5_PMD_ENABLE_PADDING")) + ; /* Nothing else to do. */ + else if (priv->hw_padding) { + INFO("%p: enabling packet padding on queue %p", + (void *)dev, (void *)rxq); + attr.wq.flags |= IBV_EXP_CREATE_WQ_FLAG_RX_END_PADDING; + attr.wq.comp_mask |= IBV_EXP_CREATE_WQ_FLAGS; + } else + WARN("%p: packet padding has been requested but is not" + " supported, make sure MLNX_OFED and firmware are" + " up to date", + (void *)dev); +#endif /* HAVE_VERBS_RX_END_PADDING */ + + tmpl.wq = ibv_exp_create_wq(priv->ctx, &attr.wq); + if (tmpl.wq == NULL) { + ret = (errno ? errno : EINVAL); + ERROR("%p: WQ creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } + if (tmpl.sp) + ret = rxq_alloc_elts_sp(&tmpl, desc, NULL); + else + ret = rxq_alloc_elts(&tmpl, desc, NULL); + if (ret) { + ERROR("%p: RXQ allocation failed: %s", + (void *)dev, strerror(ret)); + goto error; + } + /* Save port ID. */ + tmpl.port_id = dev->data->port_id; + DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id); + attr.params = (struct ibv_exp_query_intf_params){ + .intf_scope = IBV_EXP_INTF_GLOBAL, +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + .intf_version = 1, +#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + .intf = IBV_EXP_INTF_CQ, + .obj = tmpl.cq, + }; + tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); + if (tmpl.if_cq == NULL) { + ERROR("%p: CQ interface family query failed with status %d", + (void *)dev, status); + goto error; + } + attr.params = (struct ibv_exp_query_intf_params){ + .intf_scope = IBV_EXP_INTF_GLOBAL, + .intf = IBV_EXP_INTF_WQ, + .obj = tmpl.wq, + }; + tmpl.if_wq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); + if (tmpl.if_wq == NULL) { + ERROR("%p: WQ interface family query failed with status %d", + (void *)dev, status); + goto error; + } + /* Change queue state to ready. */ + mod = (struct ibv_exp_wq_attr){ + .attr_mask = IBV_EXP_WQ_ATTR_STATE, + .wq_state = IBV_EXP_WQS_RDY, + }; + ret = ibv_exp_modify_wq(tmpl.wq, &mod); + if (ret) { + ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s", + (void *)dev, strerror(ret)); + goto error; + } + /* Post SGEs. */ + if (tmpl.sp) { + struct rxq_elt_sp (*elts)[tmpl.elts_n] = tmpl.elts.sp; + + for (i = 0; (i != RTE_DIM(*elts)); ++i) { + ret = tmpl.if_wq->recv_sg_list + (tmpl.wq, + (*elts)[i].sges, + RTE_DIM((*elts)[i].sges)); + if (ret) + break; + } + } else { + struct rxq_elt (*elts)[tmpl.elts_n] = tmpl.elts.no_sp; + + for (i = 0; (i != RTE_DIM(*elts)); ++i) { + ret = tmpl.if_wq->recv_burst( + tmpl.wq, + &(*elts)[i].sge, + 1); + if (ret) + break; + } + } + if (ret) { + ERROR("%p: failed to post SGEs with error %d", + (void *)dev, ret); + /* Set ret because it does not contain a valid errno value. */ + ret = EIO; + goto error; + } + /* Clean up rxq in case we're reinitializing it. */ + DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq); + rxq_cleanup(rxq); + *rxq = tmpl; + DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl); + assert(ret == 0); + /* Assign function in queue. */ +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + rxq->poll = rxq->if_cq->poll_length_flags_cvlan; +#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + rxq->poll = rxq->if_cq->poll_length_flags; +#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + if (rxq->sp) + rxq->recv = rxq->if_wq->recv_sg_list; + else + rxq->recv = rxq->if_wq->recv_burst; + return 0; +error: + rxq_cleanup(&tmpl); + assert(ret > 0); + return ret; +} + +/** + * DPDK callback to configure a RX queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * @param mp + * Memory pool for buffer allocations. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) +{ + struct priv *priv = dev->data->dev_private; + struct rxq *rxq = (*priv->rxqs)[idx]; + int ret; + + if (mlx5_is_secondary()) + return -E_RTE_SECONDARY; + + priv_lock(priv); + DEBUG("%p: configuring queue %u for %u descriptors", + (void *)dev, idx, desc); + if (idx >= priv->rxqs_n) { + ERROR("%p: queue index out of range (%u >= %u)", + (void *)dev, idx, priv->rxqs_n); + priv_unlock(priv); + return -EOVERFLOW; + } + if (rxq != NULL) { + DEBUG("%p: reusing already allocated queue index %u (%p)", + (void *)dev, idx, (void *)rxq); + if (priv->started) { + priv_unlock(priv); + return -EEXIST; + } + (*priv->rxqs)[idx] = NULL; + rxq_cleanup(rxq); + } else { + rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket); + if (rxq == NULL) { + ERROR("%p: unable to allocate queue index %u", + (void *)dev, idx); + priv_unlock(priv); + return -ENOMEM; + } + } + ret = rxq_setup(dev, rxq, desc, socket, conf, mp); + if (ret) + rte_free(rxq); + else { + rxq->stats.idx = idx; + DEBUG("%p: adding RX queue %p to list", + (void *)dev, (void *)rxq); + (*priv->rxqs)[idx] = rxq; + /* Update receive callback. */ + if (rxq->sp) + dev->rx_pkt_burst = mlx5_rx_burst_sp; + else + dev->rx_pkt_burst = mlx5_rx_burst; + } + priv_unlock(priv); + return -ret; +} + +/** + * DPDK callback to release a RX queue. + * + * @param dpdk_rxq + * Generic RX queue pointer. + */ +void +mlx5_rx_queue_release(void *dpdk_rxq) +{ + struct rxq *rxq = (struct rxq *)dpdk_rxq; + struct priv *priv; + unsigned int i; + + if (mlx5_is_secondary()) + return; + + if (rxq == NULL) + return; + priv = rxq->priv; + priv_lock(priv); + for (i = 0; (i != priv->rxqs_n); ++i) + if ((*priv->rxqs)[i] == rxq) { + DEBUG("%p: removing RX queue %p from list", + (void *)priv->dev, (void *)rxq); + (*priv->rxqs)[i] = NULL; + break; + } + rxq_cleanup(rxq); + rte_free(rxq); + priv_unlock(priv); +} + +/** + * DPDK callback for RX in secondary processes. + * + * This function configures all queues from primary process information + * if necessary before reverting to the normal RX burst callback. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +uint16_t +mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n) +{ + struct rxq *rxq = dpdk_rxq; + struct priv *priv = mlx5_secondary_data_setup(rxq->priv); + struct priv *primary_priv; + unsigned int index; + + if (priv == NULL) + return 0; + primary_priv = + mlx5_secondary_data[priv->dev->data->port_id].primary_priv; + /* Look for queue index in both private structures. */ + for (index = 0; index != priv->rxqs_n; ++index) + if (((*primary_priv->rxqs)[index] == rxq) || + ((*priv->rxqs)[index] == rxq)) + break; + if (index == priv->rxqs_n) + return 0; + rxq = (*priv->rxqs)[index]; + return priv->dev->rx_pkt_burst(rxq, pkts, pkts_n); +} diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c new file mode 100644 index 00000000..9d1380a0 --- /dev/null +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -0,0 +1,1249 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <assert.h> +#include <stdint.h> +#include <string.h> +#include <stdlib.h> + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <infiniband/verbs.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_mbuf.h> +#include <rte_mempool.h> +#include <rte_prefetch.h> +#include <rte_common.h> +#include <rte_branch_prediction.h> +#include <rte_memory.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5.h" +#include "mlx5_utils.h" +#include "mlx5_rxtx.h" +#include "mlx5_autoconf.h" +#include "mlx5_defs.h" + +/** + * Manage TX completions. + * + * When sending a burst, mlx5_tx_burst() posts several WRs. + * To improve performance, a completion event is only required once every + * MLX5_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information + * for other WRs, but this information would not be used anyway. + * + * @param txq + * Pointer to TX queue structure. + * + * @return + * 0 on success, -1 on failure. + */ +static int +txq_complete(struct txq *txq) +{ + unsigned int elts_comp = txq->elts_comp; + unsigned int elts_tail = txq->elts_tail; + unsigned int elts_free = txq->elts_tail; + const unsigned int elts_n = txq->elts_n; + int wcs_n; + + if (unlikely(elts_comp == 0)) + return 0; +#ifdef DEBUG_SEND + DEBUG("%p: processing %u work requests completions", + (void *)txq, elts_comp); +#endif + wcs_n = txq->poll_cnt(txq->cq, elts_comp); + if (unlikely(wcs_n == 0)) + return 0; + if (unlikely(wcs_n < 0)) { + DEBUG("%p: ibv_poll_cq() failed (wcs_n=%d)", + (void *)txq, wcs_n); + return -1; + } + elts_comp -= wcs_n; + assert(elts_comp <= txq->elts_comp); + /* + * Assume WC status is successful as nothing can be done about it + * anyway. + */ + elts_tail += wcs_n * txq->elts_comp_cd_init; + if (elts_tail >= elts_n) + elts_tail -= elts_n; + + while (elts_free != elts_tail) { + struct txq_elt *elt = &(*txq->elts)[elts_free]; + unsigned int elts_free_next = + (((elts_free + 1) == elts_n) ? 0 : elts_free + 1); + struct rte_mbuf *tmp = elt->buf; + struct txq_elt *elt_next = &(*txq->elts)[elts_free_next]; + +#ifndef NDEBUG + /* Poisoning. */ + memset(elt, 0x66, sizeof(*elt)); +#endif + RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf); + /* Faster than rte_pktmbuf_free(). */ + do { + struct rte_mbuf *next = NEXT(tmp); + + rte_pktmbuf_free_seg(tmp); + tmp = next; + } while (tmp != NULL); + elts_free = elts_free_next; + } + + txq->elts_tail = elts_tail; + txq->elts_comp = elts_comp; + return 0; +} + +/* For best performance, this function should not be inlined. */ +struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, const struct rte_mempool *) + __attribute__((noinline)); + +/** + * Register mempool as a memory region. + * + * @param pd + * Pointer to protection domain. + * @param mp + * Pointer to memory pool. + * + * @return + * Memory region pointer, NULL in case of error. + */ +struct ibv_mr * +mlx5_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp) +{ + const struct rte_memseg *ms = rte_eal_get_physmem_layout(); + uintptr_t start = mp->elt_va_start; + uintptr_t end = mp->elt_va_end; + unsigned int i; + + DEBUG("mempool %p area start=%p end=%p size=%zu", + (const void *)mp, (void *)start, (void *)end, + (size_t)(end - start)); + /* Round start and end to page boundary if found in memory segments. */ + for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { + uintptr_t addr = (uintptr_t)ms[i].addr; + size_t len = ms[i].len; + unsigned int align = ms[i].hugepage_sz; + + if ((start > addr) && (start < addr + len)) + start = RTE_ALIGN_FLOOR(start, align); + if ((end > addr) && (end < addr + len)) + end = RTE_ALIGN_CEIL(end, align); + } + DEBUG("mempool %p using start=%p end=%p size=%zu for MR", + (const void *)mp, (void *)start, (void *)end, + (size_t)(end - start)); + return ibv_reg_mr(pd, + (void *)start, + end - start, + IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); +} + +/** + * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which + * the cloned mbuf is allocated is returned instead. + * + * @param buf + * Pointer to mbuf. + * + * @return + * Memory pool where data is located for given mbuf. + */ +static struct rte_mempool * +txq_mb2mp(struct rte_mbuf *buf) +{ + if (unlikely(RTE_MBUF_INDIRECT(buf))) + return rte_mbuf_from_indirect(buf)->pool; + return buf->pool; +} + +/** + * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[]. + * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full, + * remove an entry first. + * + * @param txq + * Pointer to TX queue structure. + * @param[in] mp + * Memory Pool for which a Memory Region lkey must be returned. + * + * @return + * mr->lkey on success, (uint32_t)-1 on failure. + */ +static uint32_t +txq_mp2mr(struct txq *txq, const struct rte_mempool *mp) +{ + unsigned int i; + struct ibv_mr *mr; + + for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { + if (unlikely(txq->mp2mr[i].mp == NULL)) { + /* Unknown MP, add a new MR for it. */ + break; + } + if (txq->mp2mr[i].mp == mp) { + assert(txq->mp2mr[i].lkey != (uint32_t)-1); + assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey); + return txq->mp2mr[i].lkey; + } + } + /* Add a new entry, register MR first. */ + DEBUG("%p: discovered new memory pool \"%s\" (%p)", + (void *)txq, mp->name, (const void *)mp); + mr = mlx5_mp2mr(txq->priv->pd, mp); + if (unlikely(mr == NULL)) { + DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", + (void *)txq); + return (uint32_t)-1; + } + if (unlikely(i == RTE_DIM(txq->mp2mr))) { + /* Table is full, remove oldest entry. */ + DEBUG("%p: MR <-> MP table full, dropping oldest entry.", + (void *)txq); + --i; + claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr)); + memmove(&txq->mp2mr[0], &txq->mp2mr[1], + (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); + } + /* Store the new entry. */ + txq->mp2mr[i].mp = mp; + txq->mp2mr[i].mr = mr; + txq->mp2mr[i].lkey = mr->lkey; + DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, + (void *)txq, mp->name, (const void *)mp, txq->mp2mr[i].lkey); + return txq->mp2mr[i].lkey; +} + +struct txq_mp2mr_mbuf_check_data { + const struct rte_mempool *mp; + int ret; +}; + +/** + * Callback function for rte_mempool_obj_iter() to check whether a given + * mempool object looks like a mbuf. + * + * @param[in, out] arg + * Context data (struct txq_mp2mr_mbuf_check_data). Contains mempool pointer + * and return value. + * @param[in] start + * Object start address. + * @param[in] end + * Object end address. + * @param index + * Unused. + * + * @return + * Nonzero value when object is not a mbuf. + */ +static void +txq_mp2mr_mbuf_check(void *arg, void *start, void *end, + uint32_t index __rte_unused) +{ + struct txq_mp2mr_mbuf_check_data *data = arg; + struct rte_mbuf *buf = + (void *)((uintptr_t)start + data->mp->header_size); + + (void)index; + /* Check whether mbuf structure fits element size and whether mempool + * pointer is valid. */ + if (((uintptr_t)end >= (uintptr_t)(buf + 1)) && + (buf->pool == data->mp)) + data->ret = 0; + else + data->ret = -1; +} + +/** + * Iterator function for rte_mempool_walk() to register existing mempools and + * fill the MP to MR cache of a TX queue. + * + * @param[in] mp + * Memory Pool to register. + * @param *arg + * Pointer to TX queue structure. + */ +void +txq_mp2mr_iter(const struct rte_mempool *mp, void *arg) +{ + struct txq *txq = arg; + struct txq_mp2mr_mbuf_check_data data = { + .mp = mp, + .ret = -1, + }; + + /* Discard empty mempools. */ + if (mp->size == 0) + return; + /* Register mempool only if the first element looks like a mbuf. */ + rte_mempool_obj_iter((void *)mp->elt_va_start, + 1, + mp->header_size + mp->elt_size + mp->trailer_size, + 1, + mp->elt_pa, + mp->pg_num, + mp->pg_shift, + txq_mp2mr_mbuf_check, + &data); + if (data.ret) + return; + txq_mp2mr(txq, mp); +} + +/** + * Insert VLAN using mbuf headroom space. + * + * @param buf + * Buffer for VLAN insertion. + * + * @return + * 0 on success, errno value on failure. + */ +static inline int +insert_vlan_sw(struct rte_mbuf *buf) +{ + uintptr_t addr; + uint32_t vlan; + uint16_t head_room_len = rte_pktmbuf_headroom(buf); + + if (head_room_len < 4) + return EINVAL; + + addr = rte_pktmbuf_mtod(buf, uintptr_t); + vlan = htonl(0x81000000 | buf->vlan_tci); + memmove((void *)(addr - 4), (void *)addr, 12); + memcpy((void *)(addr + 8), &vlan, sizeof(vlan)); + + SET_DATA_OFF(buf, head_room_len - 4); + DATA_LEN(buf) += 4; + + return 0; +} + +#if MLX5_PMD_SGE_WR_N > 1 + +/** + * Copy scattered mbuf contents to a single linear buffer. + * + * @param[out] linear + * Linear output buffer. + * @param[in] buf + * Scattered input buffer. + * + * @return + * Number of bytes copied to the output buffer or 0 if not large enough. + */ +static unsigned int +linearize_mbuf(linear_t *linear, struct rte_mbuf *buf) +{ + unsigned int size = 0; + unsigned int offset; + + do { + unsigned int len = DATA_LEN(buf); + + offset = size; + size += len; + if (unlikely(size > sizeof(*linear))) + return 0; + memcpy(&(*linear)[offset], + rte_pktmbuf_mtod(buf, uint8_t *), + len); + buf = NEXT(buf); + } while (buf != NULL); + return size; +} + +/** + * Handle scattered buffers for mlx5_tx_burst(). + * + * @param txq + * TX queue structure. + * @param segs + * Number of segments in buf. + * @param elt + * TX queue element to fill. + * @param[in] buf + * Buffer to process. + * @param elts_head + * Index of the linear buffer to use if necessary (normally txq->elts_head). + * @param[out] sges + * Array filled with SGEs on success. + * + * @return + * A structure containing the processed packet size in bytes and the + * number of SGEs. Both fields are set to (unsigned int)-1 in case of + * failure. + */ +static struct tx_burst_sg_ret { + unsigned int length; + unsigned int num; +} +tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt, + struct rte_mbuf *buf, unsigned int elts_head, + struct ibv_sge (*sges)[MLX5_PMD_SGE_WR_N]) +{ + unsigned int sent_size = 0; + unsigned int j; + int linearize = 0; + + /* When there are too many segments, extra segments are + * linearized in the last SGE. */ + if (unlikely(segs > RTE_DIM(*sges))) { + segs = (RTE_DIM(*sges) - 1); + linearize = 1; + } + /* Update element. */ + elt->buf = buf; + /* Register segments as SGEs. */ + for (j = 0; (j != segs); ++j) { + struct ibv_sge *sge = &(*sges)[j]; + uint32_t lkey; + + /* Retrieve Memory Region key for this memory pool. */ + lkey = txq_mp2mr(txq, txq_mb2mp(buf)); + if (unlikely(lkey == (uint32_t)-1)) { + /* MR does not exist. */ + DEBUG("%p: unable to get MP <-> MR association", + (void *)txq); + /* Clean up TX element. */ + elt->buf = NULL; + goto stop; + } + /* Update SGE. */ + sge->addr = rte_pktmbuf_mtod(buf, uintptr_t); + if (txq->priv->vf) + rte_prefetch0((volatile void *) + (uintptr_t)sge->addr); + sge->length = DATA_LEN(buf); + sge->lkey = lkey; + sent_size += sge->length; + buf = NEXT(buf); + } + /* If buf is not NULL here and is not going to be linearized, + * nb_segs is not valid. */ + assert(j == segs); + assert((buf == NULL) || (linearize)); + /* Linearize extra segments. */ + if (linearize) { + struct ibv_sge *sge = &(*sges)[segs]; + linear_t *linear = &(*txq->elts_linear)[elts_head]; + unsigned int size = linearize_mbuf(linear, buf); + + assert(segs == (RTE_DIM(*sges) - 1)); + if (size == 0) { + /* Invalid packet. */ + DEBUG("%p: packet too large to be linearized.", + (void *)txq); + /* Clean up TX element. */ + elt->buf = NULL; + goto stop; + } + /* If MLX5_PMD_SGE_WR_N is 1, free mbuf immediately. */ + if (RTE_DIM(*sges) == 1) { + do { + struct rte_mbuf *next = NEXT(buf); + + rte_pktmbuf_free_seg(buf); + buf = next; + } while (buf != NULL); + elt->buf = NULL; + } + /* Update SGE. */ + sge->addr = (uintptr_t)&(*linear)[0]; + sge->length = size; + sge->lkey = txq->mr_linear->lkey; + sent_size += size; + /* Include last segment. */ + segs++; + } + return (struct tx_burst_sg_ret){ + .length = sent_size, + .num = segs, + }; +stop: + return (struct tx_burst_sg_ret){ + .length = -1, + .num = -1, + }; +} + +#endif /* MLX5_PMD_SGE_WR_N > 1 */ + +/** + * DPDK callback for TX. + * + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +uint16_t +mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct txq *txq = (struct txq *)dpdk_txq; + unsigned int elts_head = txq->elts_head; + const unsigned int elts_n = txq->elts_n; + unsigned int elts_comp_cd = txq->elts_comp_cd; + unsigned int elts_comp = 0; + unsigned int i; + unsigned int max; + int err; + struct rte_mbuf *buf = pkts[0]; + + assert(elts_comp_cd != 0); + /* Prefetch first packet cacheline. */ + rte_prefetch0(buf); + txq_complete(txq); + max = (elts_n - (elts_head - txq->elts_tail)); + if (max > elts_n) + max -= elts_n; + assert(max >= 1); + assert(max <= elts_n); + /* Always leave one free entry in the ring. */ + --max; + if (max == 0) + return 0; + if (max > pkts_n) + max = pkts_n; + for (i = 0; (i != max); ++i) { + struct rte_mbuf *buf_next = pkts[i + 1]; + unsigned int elts_head_next = + (((elts_head + 1) == elts_n) ? 0 : elts_head + 1); + struct txq_elt *elt = &(*txq->elts)[elts_head]; + unsigned int segs = NB_SEGS(buf); +#ifdef MLX5_PMD_SOFT_COUNTERS + unsigned int sent_size = 0; +#endif + uint32_t send_flags = 0; +#ifdef HAVE_VERBS_VLAN_INSERTION + int insert_vlan = 0; +#endif /* HAVE_VERBS_VLAN_INSERTION */ + + if (i + 1 < max) + rte_prefetch0(buf_next); + /* Request TX completion. */ + if (unlikely(--elts_comp_cd == 0)) { + elts_comp_cd = txq->elts_comp_cd_init; + ++elts_comp; + send_flags |= IBV_EXP_QP_BURST_SIGNALED; + } + /* Should we enable HW CKSUM offload */ + if (buf->ol_flags & + (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { + send_flags |= IBV_EXP_QP_BURST_IP_CSUM; + /* HW does not support checksum offloads at arbitrary + * offsets but automatically recognizes the packet + * type. For inner L3/L4 checksums, only VXLAN (UDP) + * tunnels are currently supported. */ + if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type)) + send_flags |= IBV_EXP_QP_BURST_TUNNEL; + } + if (buf->ol_flags & PKT_TX_VLAN_PKT) { +#ifdef HAVE_VERBS_VLAN_INSERTION + if (!txq->priv->mps) + insert_vlan = 1; + else +#endif /* HAVE_VERBS_VLAN_INSERTION */ + { + err = insert_vlan_sw(buf); + if (unlikely(err)) + goto stop; + } + } + if (likely(segs == 1)) { + uintptr_t addr; + uint32_t length; + uint32_t lkey; + uintptr_t buf_next_addr; + + /* Retrieve buffer information. */ + addr = rte_pktmbuf_mtod(buf, uintptr_t); + length = DATA_LEN(buf); + /* Update element. */ + elt->buf = buf; + if (txq->priv->vf) + rte_prefetch0((volatile void *) + (uintptr_t)addr); + /* Prefetch next buffer data. */ + if (i + 1 < max) { + buf_next_addr = + rte_pktmbuf_mtod(buf_next, uintptr_t); + rte_prefetch0((volatile void *) + (uintptr_t)buf_next_addr); + } + /* Put packet into send queue. */ +#if MLX5_PMD_MAX_INLINE > 0 + if (length <= txq->max_inline) { +#ifdef HAVE_VERBS_VLAN_INSERTION + if (insert_vlan) + err = txq->send_pending_inline_vlan + (txq->qp, + (void *)addr, + length, + send_flags, + &buf->vlan_tci); + else +#endif /* HAVE_VERBS_VLAN_INSERTION */ + err = txq->send_pending_inline + (txq->qp, + (void *)addr, + length, + send_flags); + } else +#endif + { + /* Retrieve Memory Region key for this + * memory pool. */ + lkey = txq_mp2mr(txq, txq_mb2mp(buf)); + if (unlikely(lkey == (uint32_t)-1)) { + /* MR does not exist. */ + DEBUG("%p: unable to get MP <-> MR" + " association", (void *)txq); + /* Clean up TX element. */ + elt->buf = NULL; + goto stop; + } +#ifdef HAVE_VERBS_VLAN_INSERTION + if (insert_vlan) + err = txq->send_pending_vlan + (txq->qp, + addr, + length, + lkey, + send_flags, + &buf->vlan_tci); + else +#endif /* HAVE_VERBS_VLAN_INSERTION */ + err = txq->send_pending + (txq->qp, + addr, + length, + lkey, + send_flags); + } + if (unlikely(err)) + goto stop; +#ifdef MLX5_PMD_SOFT_COUNTERS + sent_size += length; +#endif + } else { +#if MLX5_PMD_SGE_WR_N > 1 + struct ibv_sge sges[MLX5_PMD_SGE_WR_N]; + struct tx_burst_sg_ret ret; + + ret = tx_burst_sg(txq, segs, elt, buf, elts_head, + &sges); + if (ret.length == (unsigned int)-1) + goto stop; + /* Put SG list into send queue. */ +#ifdef HAVE_VERBS_VLAN_INSERTION + if (insert_vlan) + err = txq->send_pending_sg_list_vlan + (txq->qp, + sges, + ret.num, + send_flags, + &buf->vlan_tci); + else +#endif /* HAVE_VERBS_VLAN_INSERTION */ + err = txq->send_pending_sg_list + (txq->qp, + sges, + ret.num, + send_flags); + if (unlikely(err)) + goto stop; +#ifdef MLX5_PMD_SOFT_COUNTERS + sent_size += ret.length; +#endif +#else /* MLX5_PMD_SGE_WR_N > 1 */ + DEBUG("%p: TX scattered buffers support not" + " compiled in", (void *)txq); + goto stop; +#endif /* MLX5_PMD_SGE_WR_N > 1 */ + } + elts_head = elts_head_next; + buf = buf_next; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment sent bytes counter. */ + txq->stats.obytes += sent_size; +#endif + } +stop: + /* Take a shortcut if nothing must be sent. */ + if (unlikely(i == 0)) + return 0; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment sent packets counter. */ + txq->stats.opackets += i; +#endif + /* Ring QP doorbell. */ + err = txq->send_flush(txq->qp); + if (unlikely(err)) { + /* A nonzero value is not supposed to be returned. + * Nothing can be done about it. */ + DEBUG("%p: send_flush() failed with error %d", + (void *)txq, err); + } + txq->elts_head = elts_head; + txq->elts_comp += elts_comp; + txq->elts_comp_cd = elts_comp_cd; + return i; +} + +/** + * Translate RX completion flags to packet type. + * + * @param flags + * RX completion flags returned by poll_length_flags(). + * + * @note: fix mlx5_dev_supported_ptypes_get() if any change here. + * + * @return + * Packet type for struct rte_mbuf. + */ +static inline uint32_t +rxq_cq_to_pkt_type(uint32_t flags) +{ + uint32_t pkt_type; + + if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) + pkt_type = + TRANSPOSE(flags, + IBV_EXP_CQ_RX_OUTER_IPV4_PACKET, + RTE_PTYPE_L3_IPV4) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_OUTER_IPV6_PACKET, + RTE_PTYPE_L3_IPV6) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV4_PACKET, + RTE_PTYPE_INNER_L3_IPV4) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV6_PACKET, + RTE_PTYPE_INNER_L3_IPV6); + else + pkt_type = + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV4_PACKET, + RTE_PTYPE_L3_IPV4) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV6_PACKET, + RTE_PTYPE_L3_IPV6); + return pkt_type; +} + +/** + * Translate RX completion flags to offload flags. + * + * @param[in] rxq + * Pointer to RX queue structure. + * @param flags + * RX completion flags returned by poll_length_flags(). + * + * @return + * Offload flags (ol_flags) for struct rte_mbuf. + */ +static inline uint32_t +rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags) +{ + uint32_t ol_flags = 0; + + if (rxq->csum) { + /* Set IP checksum flag only for IPv4/IPv6 packets. */ + if (flags & + (IBV_EXP_CQ_RX_IPV4_PACKET | IBV_EXP_CQ_RX_IPV6_PACKET)) + ol_flags |= + TRANSPOSE(~flags, + IBV_EXP_CQ_RX_IP_CSUM_OK, + PKT_RX_IP_CKSUM_BAD); +#ifdef HAVE_EXP_CQ_RX_TCP_PACKET + /* Set L4 checksum flag only for TCP/UDP packets. */ + if (flags & + (IBV_EXP_CQ_RX_TCP_PACKET | IBV_EXP_CQ_RX_UDP_PACKET)) +#endif /* HAVE_EXP_CQ_RX_TCP_PACKET */ + ol_flags |= + TRANSPOSE(~flags, + IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK, + PKT_RX_L4_CKSUM_BAD); + } + /* + * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place + * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional + * (its value is 0). + */ + if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) + ol_flags |= + TRANSPOSE(~flags, + IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK, + PKT_RX_IP_CKSUM_BAD) | + TRANSPOSE(~flags, + IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK, + PKT_RX_L4_CKSUM_BAD); + return ol_flags; +} + +/** + * DPDK callback for RX with scattered packets support. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +uint16_t +mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct rxq *rxq = (struct rxq *)dpdk_rxq; + struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp; + const unsigned int elts_n = rxq->elts_n; + unsigned int elts_head = rxq->elts_head; + unsigned int i; + unsigned int pkts_ret = 0; + int ret; + + if (unlikely(!rxq->sp)) + return mlx5_rx_burst(dpdk_rxq, pkts, pkts_n); + if (unlikely(elts == NULL)) /* See RTE_DEV_CMD_SET_MTU. */ + return 0; + for (i = 0; (i != pkts_n); ++i) { + struct rxq_elt_sp *elt = &(*elts)[elts_head]; + unsigned int len; + unsigned int pkt_buf_len; + struct rte_mbuf *pkt_buf = NULL; /* Buffer returned in pkts. */ + struct rte_mbuf **pkt_buf_next = &pkt_buf; + unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM; + unsigned int j = 0; + uint32_t flags; + uint16_t vlan_tci; + + /* Sanity checks. */ + assert(elts_head < rxq->elts_n); + assert(rxq->elts_head < rxq->elts_n); + ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci); + if (unlikely(ret < 0)) { + struct ibv_wc wc; + int wcs_n; + + DEBUG("rxq=%p, poll_length() failed (ret=%d)", + (void *)rxq, ret); + /* ibv_poll_cq() must be used in case of failure. */ + wcs_n = ibv_poll_cq(rxq->cq, 1, &wc); + if (unlikely(wcs_n == 0)) + break; + if (unlikely(wcs_n < 0)) { + DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)", + (void *)rxq, wcs_n); + break; + } + assert(wcs_n == 1); + if (unlikely(wc.status != IBV_WC_SUCCESS)) { + /* Whatever, just repost the offending WR. */ + DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work" + " completion status (%d): %s", + (void *)rxq, wc.wr_id, wc.status, + ibv_wc_status_str(wc.status)); +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment dropped packets counter. */ + ++rxq->stats.idropped; +#endif + goto repost; + } + ret = wc.byte_len; + } + if (ret == 0) + break; + assert(ret >= (rxq->crc_present << 2)); + len = ret - (rxq->crc_present << 2); + pkt_buf_len = len; + /* + * Replace spent segments with new ones, concatenate and + * return them as pkt_buf. + */ + while (1) { + struct ibv_sge *sge = &elt->sges[j]; + struct rte_mbuf *seg = elt->bufs[j]; + struct rte_mbuf *rep; + unsigned int seg_tailroom; + + assert(seg != NULL); + /* + * Fetch initial bytes of packet descriptor into a + * cacheline while allocating rep. + */ + rte_prefetch0(seg); + rep = __rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(rep == NULL)) { + /* + * Unable to allocate a replacement mbuf, + * repost WR. + */ + DEBUG("rxq=%p: can't allocate a new mbuf", + (void *)rxq); + if (pkt_buf != NULL) { + *pkt_buf_next = NULL; + rte_pktmbuf_free(pkt_buf); + } + /* Increment out of memory counters. */ + ++rxq->stats.rx_nombuf; + ++rxq->priv->dev->data->rx_mbuf_alloc_failed; + goto repost; + } +#ifndef NDEBUG + /* Poison user-modifiable fields in rep. */ + NEXT(rep) = (void *)((uintptr_t)-1); + SET_DATA_OFF(rep, 0xdead); + DATA_LEN(rep) = 0xd00d; + PKT_LEN(rep) = 0xdeadd00d; + NB_SEGS(rep) = 0x2a; + PORT(rep) = 0x2a; + rep->ol_flags = -1; +#endif + assert(rep->buf_len == seg->buf_len); + assert(rep->buf_len == rxq->mb_len); + /* Reconfigure sge to use rep instead of seg. */ + assert(sge->lkey == rxq->mr->lkey); + sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom); + elt->bufs[j] = rep; + ++j; + /* Update pkt_buf if it's the first segment, or link + * seg to the previous one and update pkt_buf_next. */ + *pkt_buf_next = seg; + pkt_buf_next = &NEXT(seg); + /* Update seg information. */ + seg_tailroom = (seg->buf_len - seg_headroom); + assert(sge->length == seg_tailroom); + SET_DATA_OFF(seg, seg_headroom); + if (likely(len <= seg_tailroom)) { + /* Last segment. */ + DATA_LEN(seg) = len; + PKT_LEN(seg) = len; + /* Sanity check. */ + assert(rte_pktmbuf_headroom(seg) == + seg_headroom); + assert(rte_pktmbuf_tailroom(seg) == + (seg_tailroom - len)); + break; + } + DATA_LEN(seg) = seg_tailroom; + PKT_LEN(seg) = seg_tailroom; + /* Sanity check. */ + assert(rte_pktmbuf_headroom(seg) == seg_headroom); + assert(rte_pktmbuf_tailroom(seg) == 0); + /* Fix len and clear headroom for next segments. */ + len -= seg_tailroom; + seg_headroom = 0; + } + /* Update head and tail segments. */ + *pkt_buf_next = NULL; + assert(pkt_buf != NULL); + assert(j != 0); + NB_SEGS(pkt_buf) = j; + PORT(pkt_buf) = rxq->port_id; + PKT_LEN(pkt_buf) = pkt_buf_len; + if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) { + pkt_buf->packet_type = rxq_cq_to_pkt_type(flags); + pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags); +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) { + pkt_buf->ol_flags |= PKT_RX_VLAN_PKT; + pkt_buf->vlan_tci = vlan_tci; + } +#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + } + + /* Return packet. */ + *(pkts++) = pkt_buf; + ++pkts_ret; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment bytes counter. */ + rxq->stats.ibytes += pkt_buf_len; +#endif +repost: + ret = rxq->recv(rxq->wq, elt->sges, RTE_DIM(elt->sges)); + if (unlikely(ret)) { + /* Inability to repost WRs is fatal. */ + DEBUG("%p: recv_sg_list(): failed (ret=%d)", + (void *)rxq->priv, + ret); + abort(); + } + if (++elts_head >= elts_n) + elts_head = 0; + continue; + } + if (unlikely(i == 0)) + return 0; + rxq->elts_head = elts_head; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment packets counter. */ + rxq->stats.ipackets += pkts_ret; +#endif + return pkts_ret; +} + +/** + * DPDK callback for RX. + * + * The following function is the same as mlx5_rx_burst_sp(), except it doesn't + * manage scattered packets. Improves performance when MRU is lower than the + * size of the first segment. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +uint16_t +mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct rxq *rxq = (struct rxq *)dpdk_rxq; + struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp; + const unsigned int elts_n = rxq->elts_n; + unsigned int elts_head = rxq->elts_head; + struct ibv_sge sges[pkts_n]; + unsigned int i; + unsigned int pkts_ret = 0; + int ret; + + if (unlikely(rxq->sp)) + return mlx5_rx_burst_sp(dpdk_rxq, pkts, pkts_n); + for (i = 0; (i != pkts_n); ++i) { + struct rxq_elt *elt = &(*elts)[elts_head]; + unsigned int len; + struct rte_mbuf *seg = elt->buf; + struct rte_mbuf *rep; + uint32_t flags; + uint16_t vlan_tci; + + /* Sanity checks. */ + assert(seg != NULL); + assert(elts_head < rxq->elts_n); + assert(rxq->elts_head < rxq->elts_n); + /* + * Fetch initial bytes of packet descriptor into a + * cacheline while allocating rep. + */ + rte_prefetch0(seg); + rte_prefetch0(&seg->cacheline1); + ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci); + if (unlikely(ret < 0)) { + struct ibv_wc wc; + int wcs_n; + + DEBUG("rxq=%p, poll_length() failed (ret=%d)", + (void *)rxq, ret); + /* ibv_poll_cq() must be used in case of failure. */ + wcs_n = ibv_poll_cq(rxq->cq, 1, &wc); + if (unlikely(wcs_n == 0)) + break; + if (unlikely(wcs_n < 0)) { + DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)", + (void *)rxq, wcs_n); + break; + } + assert(wcs_n == 1); + if (unlikely(wc.status != IBV_WC_SUCCESS)) { + /* Whatever, just repost the offending WR. */ + DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work" + " completion status (%d): %s", + (void *)rxq, wc.wr_id, wc.status, + ibv_wc_status_str(wc.status)); +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment dropped packets counter. */ + ++rxq->stats.idropped; +#endif + /* Add SGE to array for repost. */ + sges[i] = elt->sge; + goto repost; + } + ret = wc.byte_len; + } + if (ret == 0) + break; + assert(ret >= (rxq->crc_present << 2)); + len = ret - (rxq->crc_present << 2); + rep = __rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(rep == NULL)) { + /* + * Unable to allocate a replacement mbuf, + * repost WR. + */ + DEBUG("rxq=%p: can't allocate a new mbuf", + (void *)rxq); + /* Increment out of memory counters. */ + ++rxq->stats.rx_nombuf; + ++rxq->priv->dev->data->rx_mbuf_alloc_failed; + goto repost; + } + + /* Reconfigure sge to use rep instead of seg. */ + elt->sge.addr = (uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM; + assert(elt->sge.lkey == rxq->mr->lkey); + elt->buf = rep; + + /* Add SGE to array for repost. */ + sges[i] = elt->sge; + + /* Update seg information. */ + SET_DATA_OFF(seg, RTE_PKTMBUF_HEADROOM); + NB_SEGS(seg) = 1; + PORT(seg) = rxq->port_id; + NEXT(seg) = NULL; + PKT_LEN(seg) = len; + DATA_LEN(seg) = len; + if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) { + seg->packet_type = rxq_cq_to_pkt_type(flags); + seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags); +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) { + seg->ol_flags |= PKT_RX_VLAN_PKT; + seg->vlan_tci = vlan_tci; + } +#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + } + /* Return packet. */ + *(pkts++) = seg; + ++pkts_ret; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment bytes counter. */ + rxq->stats.ibytes += len; +#endif +repost: + if (++elts_head >= elts_n) + elts_head = 0; + continue; + } + if (unlikely(i == 0)) + return 0; + /* Repost WRs. */ +#ifdef DEBUG_RECV + DEBUG("%p: reposting %u WRs", (void *)rxq, i); +#endif + ret = rxq->recv(rxq->wq, sges, i); + if (unlikely(ret)) { + /* Inability to repost WRs is fatal. */ + DEBUG("%p: recv_burst(): failed (ret=%d)", + (void *)rxq->priv, + ret); + abort(); + } + rxq->elts_head = elts_head; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment packets counter. */ + rxq->stats.ipackets += pkts_ret; +#endif + return pkts_ret; +} + +/** + * Dummy DPDK callback for TX. + * + * This function is used to temporarily replace the real callback during + * unsafe control operations on the queue, or in case of error. + * + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +uint16_t +removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + (void)dpdk_txq; + (void)pkts; + (void)pkts_n; + return 0; +} + +/** + * Dummy DPDK callback for RX. + * + * This function is used to temporarily replace the real callback during + * unsafe control operations on the queue, or in case of error. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +uint16_t +removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + (void)dpdk_rxq; + (void)pkts; + (void)pkts_n; + return 0; +} diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h new file mode 100644 index 00000000..0e2b607d --- /dev/null +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -0,0 +1,352 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RTE_PMD_MLX5_RXTX_H_ +#define RTE_PMD_MLX5_RXTX_H_ + +#include <stddef.h> +#include <stdint.h> + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <infiniband/verbs.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_mbuf.h> +#include <rte_mempool.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5_utils.h" +#include "mlx5.h" +#include "mlx5_autoconf.h" +#include "mlx5_defs.h" + +struct mlx5_rxq_stats { + unsigned int idx; /**< Mapping index. */ +#ifdef MLX5_PMD_SOFT_COUNTERS + uint64_t ipackets; /**< Total of successfully received packets. */ + uint64_t ibytes; /**< Total of successfully received bytes. */ +#endif + uint64_t idropped; /**< Total of packets dropped when RX ring full. */ + uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */ +}; + +struct mlx5_txq_stats { + unsigned int idx; /**< Mapping index. */ +#ifdef MLX5_PMD_SOFT_COUNTERS + uint64_t opackets; /**< Total of successfully sent packets. */ + uint64_t obytes; /**< Total of successfully sent bytes. */ +#endif + uint64_t odropped; /**< Total of packets not sent when TX ring full. */ +}; + +/* RX element (scattered packets). */ +struct rxq_elt_sp { + struct ibv_sge sges[MLX5_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */ + struct rte_mbuf *bufs[MLX5_PMD_SGE_WR_N]; /* SGEs buffers. */ +}; + +/* RX element. */ +struct rxq_elt { + struct ibv_sge sge; /* Scatter/Gather Element. */ + struct rte_mbuf *buf; /* SGE buffer. */ +}; + +/* Flow director queue structure. */ +struct fdir_queue { + struct ibv_qp *qp; /* Associated RX QP. */ + struct ibv_exp_rwq_ind_table *ind_table; /* Indirection table. */ +}; + +struct priv; + +/* RX queue descriptor. */ +struct rxq { + struct priv *priv; /* Back pointer to private data. */ + struct rte_mempool *mp; /* Memory Pool for allocations. */ + struct ibv_cq *cq; /* Completion Queue. */ + struct ibv_exp_wq *wq; /* Work Queue. */ + int32_t (*poll)(); /* Verbs poll function. */ + int32_t (*recv)(); /* Verbs receive function. */ + unsigned int port_id; /* Port ID for incoming packets. */ + unsigned int elts_n; /* (*elts)[] length. */ + unsigned int elts_head; /* Current index in (*elts)[]. */ + unsigned int sp:1; /* Use scattered RX elements. */ + unsigned int csum:1; /* Enable checksum offloading. */ + unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ + unsigned int vlan_strip:1; /* Enable VLAN stripping. */ + unsigned int crc_present:1; /* CRC must be subtracted. */ + union { + struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */ + struct rxq_elt (*no_sp)[]; /* RX elements. */ + } elts; + uint32_t mb_len; /* Length of a mp-issued mbuf. */ + unsigned int socket; /* CPU socket ID for allocations. */ + struct mlx5_rxq_stats stats; /* RX queue counters. */ + struct ibv_exp_res_domain *rd; /* Resource Domain. */ + struct fdir_queue fdir_queue; /* Flow director queue. */ + struct ibv_mr *mr; /* Memory Region (for mp). */ + struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */ +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */ +#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + struct ibv_exp_cq_family *if_cq; /* CQ interface. */ +#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ +}; + +/* Hash RX queue types. */ +enum hash_rxq_type { + HASH_RXQ_TCPV4, + HASH_RXQ_UDPV4, + HASH_RXQ_IPV4, +#ifdef HAVE_FLOW_SPEC_IPV6 + HASH_RXQ_TCPV6, + HASH_RXQ_UDPV6, + HASH_RXQ_IPV6, +#endif /* HAVE_FLOW_SPEC_IPV6 */ + HASH_RXQ_ETH, +}; + +/* Flow structure with Ethernet specification. It is packed to prevent padding + * between attr and spec as this layout is expected by libibverbs. */ +struct flow_attr_spec_eth { + struct ibv_exp_flow_attr attr; + struct ibv_exp_flow_spec_eth spec; +} __attribute__((packed)); + +/* Define a struct flow_attr_spec_eth object as an array of at least + * "size" bytes. Room after the first index is normally used to store + * extra flow specifications. */ +#define FLOW_ATTR_SPEC_ETH(name, size) \ + struct flow_attr_spec_eth name \ + [((size) / sizeof(struct flow_attr_spec_eth)) + \ + !!((size) % sizeof(struct flow_attr_spec_eth))] + +/* Initialization data for hash RX queue. */ +struct hash_rxq_init { + uint64_t hash_fields; /* Fields that participate in the hash. */ + uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */ + unsigned int flow_priority; /* Flow priority to use. */ + union { + struct { + enum ibv_exp_flow_spec_type type; + uint16_t size; + } hdr; + struct ibv_exp_flow_spec_tcp_udp tcp_udp; + struct ibv_exp_flow_spec_ipv4 ipv4; +#ifdef HAVE_FLOW_SPEC_IPV6 + struct ibv_exp_flow_spec_ipv6 ipv6; +#endif /* HAVE_FLOW_SPEC_IPV6 */ + struct ibv_exp_flow_spec_eth eth; + } flow_spec; /* Flow specification template. */ + const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */ +}; + +/* Initialization data for indirection table. */ +struct ind_table_init { + unsigned int max_size; /* Maximum number of WQs. */ + /* Hash RX queues using this table. */ + unsigned int hash_types; + unsigned int hash_types_n; +}; + +/* Initialization data for special flows. */ +struct special_flow_init { + uint8_t dst_mac_val[6]; + uint8_t dst_mac_mask[6]; + unsigned int hash_types; + unsigned int per_vlan:1; +}; + +enum hash_rxq_flow_type { + HASH_RXQ_FLOW_TYPE_PROMISC, + HASH_RXQ_FLOW_TYPE_ALLMULTI, + HASH_RXQ_FLOW_TYPE_BROADCAST, + HASH_RXQ_FLOW_TYPE_IPV6MULTI, + HASH_RXQ_FLOW_TYPE_MAC, +}; + +#ifndef NDEBUG +static inline const char * +hash_rxq_flow_type_str(enum hash_rxq_flow_type flow_type) +{ + switch (flow_type) { + case HASH_RXQ_FLOW_TYPE_PROMISC: + return "promiscuous"; + case HASH_RXQ_FLOW_TYPE_ALLMULTI: + return "allmulticast"; + case HASH_RXQ_FLOW_TYPE_BROADCAST: + return "broadcast"; + case HASH_RXQ_FLOW_TYPE_IPV6MULTI: + return "IPv6 multicast"; + case HASH_RXQ_FLOW_TYPE_MAC: + return "MAC"; + } + return NULL; +} +#endif /* NDEBUG */ + +struct hash_rxq { + struct priv *priv; /* Back pointer to private data. */ + struct ibv_qp *qp; /* Hash RX QP. */ + enum hash_rxq_type type; /* Hash RX queue type. */ + /* MAC flow steering rules, one per VLAN ID. */ + struct ibv_exp_flow *mac_flow[MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS]; + struct ibv_exp_flow *special_flow + [MLX5_MAX_SPECIAL_FLOWS][MLX5_MAX_VLAN_IDS]; +}; + +/* TX element. */ +struct txq_elt { + struct rte_mbuf *buf; +}; + +/* Linear buffer type. It is used when transmitting buffers with too many + * segments that do not fit the hardware queue (see max_send_sge). + * Extra segments are copied (linearized) in such buffers, replacing the + * last SGE during TX. + * The size is arbitrary but large enough to hold a jumbo frame with + * 8 segments considering mbuf.buf_len is about 2048 bytes. */ +typedef uint8_t linear_t[16384]; + +/* TX queue descriptor. */ +struct txq { + struct priv *priv; /* Back pointer to private data. */ + int32_t (*poll_cnt)(struct ibv_cq *cq, uint32_t max); + int (*send_pending)(); +#ifdef HAVE_VERBS_VLAN_INSERTION + int (*send_pending_vlan)(); +#endif +#if MLX5_PMD_MAX_INLINE > 0 + int (*send_pending_inline)(); +#ifdef HAVE_VERBS_VLAN_INSERTION + int (*send_pending_inline_vlan)(); +#endif +#endif +#if MLX5_PMD_SGE_WR_N > 1 + int (*send_pending_sg_list)(); +#ifdef HAVE_VERBS_VLAN_INSERTION + int (*send_pending_sg_list_vlan)(); +#endif +#endif + int (*send_flush)(struct ibv_qp *qp); + struct ibv_cq *cq; /* Completion Queue. */ + struct ibv_qp *qp; /* Queue Pair. */ + struct txq_elt (*elts)[]; /* TX elements. */ +#if MLX5_PMD_MAX_INLINE > 0 + uint32_t max_inline; /* Max inline send size <= MLX5_PMD_MAX_INLINE. */ +#endif + unsigned int elts_n; /* (*elts)[] length. */ + unsigned int elts_head; /* Current index in (*elts)[]. */ + unsigned int elts_tail; /* First element awaiting completion. */ + unsigned int elts_comp; /* Number of completion requests. */ + unsigned int elts_comp_cd; /* Countdown for next completion request. */ + unsigned int elts_comp_cd_init; /* Initial value for countdown. */ + struct { + const struct rte_mempool *mp; /* Cached Memory Pool. */ + struct ibv_mr *mr; /* Memory Region (for mp). */ + uint32_t lkey; /* mr->lkey */ + } mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */ + struct mlx5_txq_stats stats; /* TX queue counters. */ + /* Elements used only for init part are here. */ + linear_t (*elts_linear)[]; /* Linearized buffers. */ + struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */ +#ifdef HAVE_VERBS_VLAN_INSERTION + struct ibv_exp_qp_burst_family_v1 *if_qp; /* QP burst interface. */ +#else + struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */ +#endif + struct ibv_exp_cq_family *if_cq; /* CQ interface. */ + struct ibv_exp_res_domain *rd; /* Resource Domain. */ + unsigned int socket; /* CPU socket ID for allocations. */ +}; + +/* mlx5_rxq.c */ + +extern const struct hash_rxq_init hash_rxq_init[]; +extern const unsigned int hash_rxq_init_n; + +extern uint8_t rss_hash_default_key[]; +extern const size_t rss_hash_default_key_len; + +size_t priv_flow_attr(struct priv *, struct ibv_exp_flow_attr *, + size_t, enum hash_rxq_type); +int priv_create_hash_rxqs(struct priv *); +void priv_destroy_hash_rxqs(struct priv *); +int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type); +int priv_rehash_flows(struct priv *); +void rxq_cleanup(struct rxq *); +int rxq_rehash(struct rte_eth_dev *, struct rxq *); +int rxq_setup(struct rte_eth_dev *, struct rxq *, uint16_t, unsigned int, + const struct rte_eth_rxconf *, struct rte_mempool *); +int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, + const struct rte_eth_rxconf *, struct rte_mempool *); +void mlx5_rx_queue_release(void *); +uint16_t mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); + + +/* mlx5_txq.c */ + +void txq_cleanup(struct txq *); +int txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf); + +int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, + const struct rte_eth_txconf *); +void mlx5_tx_queue_release(void *); +uint16_t mlx5_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); + +/* mlx5_rxtx.c */ + +struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, const struct rte_mempool *); +void txq_mp2mr_iter(const struct rte_mempool *, void *); +uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t); +uint16_t mlx5_rx_burst_sp(void *, struct rte_mbuf **, uint16_t); +uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t); +uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t); +uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t); + +#endif /* RTE_PMD_MLX5_RXTX_H_ */ diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c new file mode 100644 index 00000000..2d3cb519 --- /dev/null +++ b/drivers/net/mlx5/mlx5_stats.c @@ -0,0 +1,144 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_ethdev.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5.h" +#include "mlx5_rxtx.h" +#include "mlx5_defs.h" + +/** + * DPDK callback to get device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[out] stats + * Stats structure output buffer. + */ +void +mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct priv *priv = mlx5_get_priv(dev); + struct rte_eth_stats tmp = {0}; + unsigned int i; + unsigned int idx; + + priv_lock(priv); + /* Add software counters. */ + for (i = 0; (i != priv->rxqs_n); ++i) { + struct rxq *rxq = (*priv->rxqs)[i]; + + if (rxq == NULL) + continue; + idx = rxq->stats.idx; + if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { +#ifdef MLX5_PMD_SOFT_COUNTERS + tmp.q_ipackets[idx] += rxq->stats.ipackets; + tmp.q_ibytes[idx] += rxq->stats.ibytes; +#endif + tmp.q_errors[idx] += (rxq->stats.idropped + + rxq->stats.rx_nombuf); + } +#ifdef MLX5_PMD_SOFT_COUNTERS + tmp.ipackets += rxq->stats.ipackets; + tmp.ibytes += rxq->stats.ibytes; +#endif + tmp.ierrors += rxq->stats.idropped; + tmp.rx_nombuf += rxq->stats.rx_nombuf; + } + for (i = 0; (i != priv->txqs_n); ++i) { + struct txq *txq = (*priv->txqs)[i]; + + if (txq == NULL) + continue; + idx = txq->stats.idx; + if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { +#ifdef MLX5_PMD_SOFT_COUNTERS + tmp.q_opackets[idx] += txq->stats.opackets; + tmp.q_obytes[idx] += txq->stats.obytes; +#endif + tmp.q_errors[idx] += txq->stats.odropped; + } +#ifdef MLX5_PMD_SOFT_COUNTERS + tmp.opackets += txq->stats.opackets; + tmp.obytes += txq->stats.obytes; +#endif + tmp.oerrors += txq->stats.odropped; + } +#ifndef MLX5_PMD_SOFT_COUNTERS + /* FIXME: retrieve and add hardware counters. */ +#endif + *stats = tmp; + priv_unlock(priv); +} + +/** + * DPDK callback to clear device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + */ +void +mlx5_stats_reset(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + unsigned int i; + unsigned int idx; + + priv_lock(priv); + for (i = 0; (i != priv->rxqs_n); ++i) { + if ((*priv->rxqs)[i] == NULL) + continue; + idx = (*priv->rxqs)[i]->stats.idx; + (*priv->rxqs)[i]->stats = + (struct mlx5_rxq_stats){ .idx = idx }; + } + for (i = 0; (i != priv->txqs_n); ++i) { + if ((*priv->txqs)[i] == NULL) + continue; + idx = (*priv->txqs)[i]->stats.idx; + (*priv->txqs)[i]->stats = + (struct mlx5_txq_stats){ .idx = idx }; + } +#ifndef MLX5_PMD_SOFT_COUNTERS + /* FIXME: reset hardware counters. */ +#endif + priv_unlock(priv); +} diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c new file mode 100644 index 00000000..e9b9a293 --- /dev/null +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -0,0 +1,126 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_interrupts.h> +#include <rte_alarm.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5.h" +#include "mlx5_rxtx.h" +#include "mlx5_utils.h" + +/** + * DPDK callback to start the device. + * + * Simulate device start by attaching all configured flows. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_dev_start(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + int err; + + if (mlx5_is_secondary()) + return -E_RTE_SECONDARY; + + priv_lock(priv); + if (priv->started) { + priv_unlock(priv); + return 0; + } + DEBUG("%p: allocating and configuring hash RX queues", (void *)dev); + err = priv_create_hash_rxqs(priv); + if (!err) + err = priv_rehash_flows(priv); + if (!err) + priv->started = 1; + else { + ERROR("%p: an error occurred while configuring hash RX queues:" + " %s", + (void *)priv, strerror(err)); + /* Rollback. */ + priv_special_flow_disable_all(priv); + priv_mac_addrs_disable(priv); + priv_destroy_hash_rxqs(priv); + } + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) + priv_fdir_enable(priv); + priv_dev_interrupt_handler_install(priv, dev); + priv_unlock(priv); + return -err; +} + +/** + * DPDK callback to stop the device. + * + * Simulate device stop by detaching all configured flows. + * + * @param dev + * Pointer to Ethernet device structure. + */ +void +mlx5_dev_stop(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + + if (mlx5_is_secondary()) + return; + + priv_lock(priv); + if (!priv->started) { + priv_unlock(priv); + return; + } + DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev); + priv_special_flow_disable_all(priv); + priv_mac_addrs_disable(priv); + priv_destroy_hash_rxqs(priv); + priv_fdir_disable(priv); + priv_dev_interrupt_handler_uninstall(priv, dev); + priv->started = 0; + priv_unlock(priv); +} diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c new file mode 100644 index 00000000..31ce53ad --- /dev/null +++ b/drivers/net/mlx5/mlx5_txq.c @@ -0,0 +1,605 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stddef.h> +#include <assert.h> +#include <errno.h> +#include <string.h> +#include <stdint.h> + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <infiniband/verbs.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_mbuf.h> +#include <rte_malloc.h> +#include <rte_ethdev.h> +#include <rte_common.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5_utils.h" +#include "mlx5.h" +#include "mlx5_rxtx.h" +#include "mlx5_autoconf.h" +#include "mlx5_defs.h" + +/** + * Allocate TX queue elements. + * + * @param txq + * Pointer to TX queue structure. + * @param elts_n + * Number of elements to allocate. + * + * @return + * 0 on success, errno value on failure. + */ +static int +txq_alloc_elts(struct txq *txq, unsigned int elts_n) +{ + unsigned int i; + struct txq_elt (*elts)[elts_n] = + rte_calloc_socket("TXQ", 1, sizeof(*elts), 0, txq->socket); + linear_t (*elts_linear)[elts_n] = + rte_calloc_socket("TXQ", 1, sizeof(*elts_linear), 0, + txq->socket); + struct ibv_mr *mr_linear = NULL; + int ret = 0; + + if ((elts == NULL) || (elts_linear == NULL)) { + ERROR("%p: can't allocate packets array", (void *)txq); + ret = ENOMEM; + goto error; + } + mr_linear = + ibv_reg_mr(txq->priv->pd, elts_linear, sizeof(*elts_linear), + (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); + if (mr_linear == NULL) { + ERROR("%p: unable to configure MR, ibv_reg_mr() failed", + (void *)txq); + ret = EINVAL; + goto error; + } + for (i = 0; (i != elts_n); ++i) { + struct txq_elt *elt = &(*elts)[i]; + + elt->buf = NULL; + } + DEBUG("%p: allocated and configured %u WRs", (void *)txq, elts_n); + txq->elts_n = elts_n; + txq->elts = elts; + txq->elts_head = 0; + txq->elts_tail = 0; + txq->elts_comp = 0; + /* Request send completion every MLX5_PMD_TX_PER_COMP_REQ packets or + * at least 4 times per ring. */ + txq->elts_comp_cd_init = + ((MLX5_PMD_TX_PER_COMP_REQ < (elts_n / 4)) ? + MLX5_PMD_TX_PER_COMP_REQ : (elts_n / 4)); + txq->elts_comp_cd = txq->elts_comp_cd_init; + txq->elts_linear = elts_linear; + txq->mr_linear = mr_linear; + assert(ret == 0); + return 0; +error: + if (mr_linear != NULL) + claim_zero(ibv_dereg_mr(mr_linear)); + + rte_free(elts_linear); + rte_free(elts); + + DEBUG("%p: failed, freed everything", (void *)txq); + assert(ret > 0); + return ret; +} + +/** + * Free TX queue elements. + * + * @param txq + * Pointer to TX queue structure. + */ +static void +txq_free_elts(struct txq *txq) +{ + unsigned int elts_n = txq->elts_n; + unsigned int elts_head = txq->elts_head; + unsigned int elts_tail = txq->elts_tail; + struct txq_elt (*elts)[elts_n] = txq->elts; + linear_t (*elts_linear)[elts_n] = txq->elts_linear; + struct ibv_mr *mr_linear = txq->mr_linear; + + DEBUG("%p: freeing WRs", (void *)txq); + txq->elts_n = 0; + txq->elts_head = 0; + txq->elts_tail = 0; + txq->elts_comp = 0; + txq->elts_comp_cd = 0; + txq->elts_comp_cd_init = 0; + txq->elts = NULL; + txq->elts_linear = NULL; + txq->mr_linear = NULL; + if (mr_linear != NULL) + claim_zero(ibv_dereg_mr(mr_linear)); + + rte_free(elts_linear); + if (elts == NULL) + return; + while (elts_tail != elts_head) { + struct txq_elt *elt = &(*elts)[elts_tail]; + + assert(elt->buf != NULL); + rte_pktmbuf_free(elt->buf); +#ifndef NDEBUG + /* Poisoning. */ + memset(elt, 0x77, sizeof(*elt)); +#endif + if (++elts_tail == elts_n) + elts_tail = 0; + } + rte_free(elts); +} + +/** + * Clean up a TX queue. + * + * Destroy objects, free allocated memory and reset the structure for reuse. + * + * @param txq + * Pointer to TX queue structure. + */ +void +txq_cleanup(struct txq *txq) +{ + struct ibv_exp_release_intf_params params; + size_t i; + + DEBUG("cleaning up %p", (void *)txq); + txq_free_elts(txq); + txq->poll_cnt = NULL; +#if MLX5_PMD_MAX_INLINE > 0 + txq->send_pending_inline = NULL; +#endif + txq->send_flush = NULL; + if (txq->if_qp != NULL) { + assert(txq->priv != NULL); + assert(txq->priv->ctx != NULL); + assert(txq->qp != NULL); + params = (struct ibv_exp_release_intf_params){ + .comp_mask = 0, + }; + claim_zero(ibv_exp_release_intf(txq->priv->ctx, + txq->if_qp, + ¶ms)); + } + if (txq->if_cq != NULL) { + assert(txq->priv != NULL); + assert(txq->priv->ctx != NULL); + assert(txq->cq != NULL); + params = (struct ibv_exp_release_intf_params){ + .comp_mask = 0, + }; + claim_zero(ibv_exp_release_intf(txq->priv->ctx, + txq->if_cq, + ¶ms)); + } + if (txq->qp != NULL) + claim_zero(ibv_destroy_qp(txq->qp)); + if (txq->cq != NULL) + claim_zero(ibv_destroy_cq(txq->cq)); + if (txq->rd != NULL) { + struct ibv_exp_destroy_res_domain_attr attr = { + .comp_mask = 0, + }; + + assert(txq->priv != NULL); + assert(txq->priv->ctx != NULL); + claim_zero(ibv_exp_destroy_res_domain(txq->priv->ctx, + txq->rd, + &attr)); + } + for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { + if (txq->mp2mr[i].mp == NULL) + break; + assert(txq->mp2mr[i].mr != NULL); + claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr)); + } + memset(txq, 0, sizeof(*txq)); +} + +/** + * Configure a TX queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param txq + * Pointer to TX queue structure. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * + * @return + * 0 on success, errno value on failure. + */ +int +txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf) +{ + struct priv *priv = mlx5_get_priv(dev); + struct txq tmpl = { + .priv = priv, + .socket = socket + }; + union { + struct ibv_exp_query_intf_params params; + struct ibv_exp_qp_init_attr init; + struct ibv_exp_res_domain_init_attr rd; + struct ibv_exp_cq_init_attr cq; + struct ibv_exp_qp_attr mod; + } attr; + enum ibv_exp_query_intf_status status; + int ret = 0; + + (void)conf; /* Thresholds configuration (ignored). */ + if ((desc == 0) || (desc % MLX5_PMD_SGE_WR_N)) { + ERROR("%p: invalid number of TX descriptors (must be a" + " multiple of %d)", (void *)dev, MLX5_PMD_SGE_WR_N); + return EINVAL; + } + desc /= MLX5_PMD_SGE_WR_N; + /* MRs will be registered in mp2mr[] later. */ + attr.rd = (struct ibv_exp_res_domain_init_attr){ + .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | + IBV_EXP_RES_DOMAIN_MSG_MODEL), + .thread_model = IBV_EXP_THREAD_SINGLE, + .msg_model = IBV_EXP_MSG_HIGH_BW, + }; + tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd); + if (tmpl.rd == NULL) { + ret = ENOMEM; + ERROR("%p: RD creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } + attr.cq = (struct ibv_exp_cq_init_attr){ + .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, + .res_domain = tmpl.rd, + }; + tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq); + if (tmpl.cq == NULL) { + ret = ENOMEM; + ERROR("%p: CQ creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } + DEBUG("priv->device_attr.max_qp_wr is %d", + priv->device_attr.max_qp_wr); + DEBUG("priv->device_attr.max_sge is %d", + priv->device_attr.max_sge); + attr.init = (struct ibv_exp_qp_init_attr){ + /* CQ to be associated with the send queue. */ + .send_cq = tmpl.cq, + /* CQ to be associated with the receive queue. */ + .recv_cq = tmpl.cq, + .cap = { + /* Max number of outstanding WRs. */ + .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ? + priv->device_attr.max_qp_wr : + desc), + /* Max number of scatter/gather elements in a WR. */ + .max_send_sge = ((priv->device_attr.max_sge < + MLX5_PMD_SGE_WR_N) ? + priv->device_attr.max_sge : + MLX5_PMD_SGE_WR_N), +#if MLX5_PMD_MAX_INLINE > 0 + .max_inline_data = MLX5_PMD_MAX_INLINE, +#endif + }, + .qp_type = IBV_QPT_RAW_PACKET, + /* Do *NOT* enable this, completions events are managed per + * TX burst. */ + .sq_sig_all = 0, + .pd = priv->pd, + .res_domain = tmpl.rd, + .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD | + IBV_EXP_QP_INIT_ATTR_RES_DOMAIN), + }; + tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init); + if (tmpl.qp == NULL) { + ret = (errno ? errno : EINVAL); + ERROR("%p: QP creation failure: %s", + (void *)dev, strerror(ret)); + goto error; + } +#if MLX5_PMD_MAX_INLINE > 0 + /* ibv_create_qp() updates this value. */ + tmpl.max_inline = attr.init.cap.max_inline_data; +#endif + attr.mod = (struct ibv_exp_qp_attr){ + /* Move the QP to this state. */ + .qp_state = IBV_QPS_INIT, + /* Primary port number. */ + .port_num = priv->port + }; + ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, + (IBV_EXP_QP_STATE | IBV_EXP_QP_PORT)); + if (ret) { + ERROR("%p: QP state to IBV_QPS_INIT failed: %s", + (void *)dev, strerror(ret)); + goto error; + } + ret = txq_alloc_elts(&tmpl, desc); + if (ret) { + ERROR("%p: TXQ allocation failed: %s", + (void *)dev, strerror(ret)); + goto error; + } + attr.mod = (struct ibv_exp_qp_attr){ + .qp_state = IBV_QPS_RTR + }; + ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE); + if (ret) { + ERROR("%p: QP state to IBV_QPS_RTR failed: %s", + (void *)dev, strerror(ret)); + goto error; + } + attr.mod.qp_state = IBV_QPS_RTS; + ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE); + if (ret) { + ERROR("%p: QP state to IBV_QPS_RTS failed: %s", + (void *)dev, strerror(ret)); + goto error; + } + attr.params = (struct ibv_exp_query_intf_params){ + .intf_scope = IBV_EXP_INTF_GLOBAL, + .intf = IBV_EXP_INTF_CQ, + .obj = tmpl.cq, + }; + tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status); + if (tmpl.if_cq == NULL) { + ret = EINVAL; + ERROR("%p: CQ interface family query failed with status %d", + (void *)dev, status); + goto error; + } + attr.params = (struct ibv_exp_query_intf_params){ + .intf_scope = IBV_EXP_INTF_GLOBAL, + .intf = IBV_EXP_INTF_QP_BURST, + .obj = tmpl.qp, +#ifdef HAVE_VERBS_VLAN_INSERTION + .intf_version = 1, +#endif +#ifdef HAVE_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR + /* Enable multi-packet send if supported. */ + .family_flags = + (priv->mps ? + IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR : + 0), +#endif + }; + tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status); + if (tmpl.if_qp == NULL) { + ret = EINVAL; + ERROR("%p: QP interface family query failed with status %d", + (void *)dev, status); + goto error; + } + /* Clean up txq in case we're reinitializing it. */ + DEBUG("%p: cleaning-up old txq just in case", (void *)txq); + txq_cleanup(txq); + *txq = tmpl; + txq->poll_cnt = txq->if_cq->poll_cnt; +#if MLX5_PMD_MAX_INLINE > 0 + txq->send_pending_inline = txq->if_qp->send_pending_inline; +#ifdef HAVE_VERBS_VLAN_INSERTION + txq->send_pending_inline_vlan = txq->if_qp->send_pending_inline_vlan; +#endif +#endif +#if MLX5_PMD_SGE_WR_N > 1 + txq->send_pending_sg_list = txq->if_qp->send_pending_sg_list; +#ifdef HAVE_VERBS_VLAN_INSERTION + txq->send_pending_sg_list_vlan = txq->if_qp->send_pending_sg_list_vlan; +#endif +#endif + txq->send_pending = txq->if_qp->send_pending; +#ifdef HAVE_VERBS_VLAN_INSERTION + txq->send_pending_vlan = txq->if_qp->send_pending_vlan; +#endif + txq->send_flush = txq->if_qp->send_flush; + DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl); + /* Pre-register known mempools. */ + rte_mempool_walk(txq_mp2mr_iter, txq); + assert(ret == 0); + return 0; +error: + txq_cleanup(&tmpl); + assert(ret > 0); + return ret; +} + +/** + * DPDK callback to configure a TX queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * TX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf) +{ + struct priv *priv = dev->data->dev_private; + struct txq *txq = (*priv->txqs)[idx]; + int ret; + + if (mlx5_is_secondary()) + return -E_RTE_SECONDARY; + + priv_lock(priv); + DEBUG("%p: configuring queue %u for %u descriptors", + (void *)dev, idx, desc); + if (idx >= priv->txqs_n) { + ERROR("%p: queue index out of range (%u >= %u)", + (void *)dev, idx, priv->txqs_n); + priv_unlock(priv); + return -EOVERFLOW; + } + if (txq != NULL) { + DEBUG("%p: reusing already allocated queue index %u (%p)", + (void *)dev, idx, (void *)txq); + if (priv->started) { + priv_unlock(priv); + return -EEXIST; + } + (*priv->txqs)[idx] = NULL; + txq_cleanup(txq); + } else { + txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket); + if (txq == NULL) { + ERROR("%p: unable to allocate queue index %u", + (void *)dev, idx); + priv_unlock(priv); + return -ENOMEM; + } + } + ret = txq_setup(dev, txq, desc, socket, conf); + if (ret) + rte_free(txq); + else { + txq->stats.idx = idx; + DEBUG("%p: adding TX queue %p to list", + (void *)dev, (void *)txq); + (*priv->txqs)[idx] = txq; + /* Update send callback. */ + dev->tx_pkt_burst = mlx5_tx_burst; + } + priv_unlock(priv); + return -ret; +} + +/** + * DPDK callback to release a TX queue. + * + * @param dpdk_txq + * Generic TX queue pointer. + */ +void +mlx5_tx_queue_release(void *dpdk_txq) +{ + struct txq *txq = (struct txq *)dpdk_txq; + struct priv *priv; + unsigned int i; + + if (mlx5_is_secondary()) + return; + + if (txq == NULL) + return; + priv = txq->priv; + priv_lock(priv); + for (i = 0; (i != priv->txqs_n); ++i) + if ((*priv->txqs)[i] == txq) { + DEBUG("%p: removing TX queue %p from list", + (void *)priv->dev, (void *)txq); + (*priv->txqs)[i] = NULL; + break; + } + txq_cleanup(txq); + rte_free(txq); + priv_unlock(priv); +} + +/** + * DPDK callback for TX in secondary processes. + * + * This function configures all queues from primary process information + * if necessary before reverting to the normal TX burst callback. + * + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +uint16_t +mlx5_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n) +{ + struct txq *txq = dpdk_txq; + struct priv *priv = mlx5_secondary_data_setup(txq->priv); + struct priv *primary_priv; + unsigned int index; + + if (priv == NULL) + return 0; + primary_priv = + mlx5_secondary_data[priv->dev->data->port_id].primary_priv; + /* Look for queue index in both private structures. */ + for (index = 0; index != priv->txqs_n; ++index) + if (((*primary_priv->txqs)[index] == txq) || + ((*priv->txqs)[index] == txq)) + break; + if (index == priv->txqs_n) + return 0; + txq = (*priv->txqs)[index]; + return priv->dev->tx_pkt_burst(txq, pkts, pkts_n); +} diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h new file mode 100644 index 00000000..a824787f --- /dev/null +++ b/drivers/net/mlx5/mlx5_utils.h @@ -0,0 +1,184 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RTE_PMD_MLX5_UTILS_H_ +#define RTE_PMD_MLX5_UTILS_H_ + +#include <stddef.h> +#include <stdio.h> +#include <limits.h> +#include <assert.h> +#include <errno.h> + +#include "mlx5_defs.h" + +/* Bit-field manipulation. */ +#define BITFIELD_DECLARE(bf, type, size) \ + type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \ + !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))] +#define BITFIELD_DEFINE(bf, type, size) \ + BITFIELD_DECLARE((bf), type, (size)) = { 0 } +#define BITFIELD_SET(bf, b) \ + (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \ + (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \ + ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))) +#define BITFIELD_RESET(bf, b) \ + (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \ + (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \ + ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))) +#define BITFIELD_ISSET(bf, b) \ + (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \ + !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \ + ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))) + +/* Save and restore errno around argument evaluation. */ +#define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0])) + +/* + * Helper macros to work around __VA_ARGS__ limitations in a C99 compliant + * manner. + */ +#define PMD_DRV_LOG_STRIP(a, b) a +#define PMD_DRV_LOG_OPAREN ( +#define PMD_DRV_LOG_CPAREN ) +#define PMD_DRV_LOG_COMMA , + +/* Return the file name part of a path. */ +static inline const char * +pmd_drv_log_basename(const char *s) +{ + const char *n = s; + + while (*n) + if (*(n++) == '/') + s = n; + return s; +} + +/* + * When debugging is enabled (NDEBUG not defined), file, line and function + * information replace the driver name (MLX5_DRIVER_NAME) in log messages. + */ +#ifndef NDEBUG + +#define PMD_DRV_LOG___(level, ...) \ + ERRNO_SAFE(RTE_LOG(level, PMD, __VA_ARGS__)) +#define PMD_DRV_LOG__(level, ...) \ + PMD_DRV_LOG___(level, "%s:%u: %s(): " __VA_ARGS__) +#define PMD_DRV_LOG_(level, s, ...) \ + PMD_DRV_LOG__(level, \ + s "\n" PMD_DRV_LOG_COMMA \ + pmd_drv_log_basename(__FILE__) PMD_DRV_LOG_COMMA \ + __LINE__ PMD_DRV_LOG_COMMA \ + __func__, \ + __VA_ARGS__) + +#else /* NDEBUG */ + +#define PMD_DRV_LOG___(level, ...) \ + ERRNO_SAFE(RTE_LOG(level, PMD, MLX5_DRIVER_NAME ": " __VA_ARGS__)) +#define PMD_DRV_LOG__(level, ...) \ + PMD_DRV_LOG___(level, __VA_ARGS__) +#define PMD_DRV_LOG_(level, s, ...) \ + PMD_DRV_LOG__(level, s "\n", __VA_ARGS__) + +#endif /* NDEBUG */ + +/* Generic printf()-like logging macro with automatic line feed. */ +#define PMD_DRV_LOG(level, ...) \ + PMD_DRV_LOG_(level, \ + __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \ + PMD_DRV_LOG_CPAREN) + +/* + * Like assert(), DEBUG() becomes a no-op and claim_zero() does not perform + * any check when debugging is disabled. + */ +#ifndef NDEBUG + +#define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__) +#define claim_zero(...) assert((__VA_ARGS__) == 0) + +#else /* NDEBUG */ + +#define DEBUG(...) (void)0 +#define claim_zero(...) (__VA_ARGS__) + +#endif /* NDEBUG */ + +#define INFO(...) PMD_DRV_LOG(INFO, __VA_ARGS__) +#define WARN(...) PMD_DRV_LOG(WARNING, __VA_ARGS__) +#define ERROR(...) PMD_DRV_LOG(ERR, __VA_ARGS__) + +/* Convenience macros for accessing mbuf fields. */ +#define NEXT(m) ((m)->next) +#define DATA_LEN(m) ((m)->data_len) +#define PKT_LEN(m) ((m)->pkt_len) +#define DATA_OFF(m) ((m)->data_off) +#define SET_DATA_OFF(m, o) ((m)->data_off = (o)) +#define NB_SEGS(m) ((m)->nb_segs) +#define PORT(m) ((m)->port) + +/* Transpose flags. Useful to convert IBV to DPDK flags. */ +#define TRANSPOSE(val, from, to) \ + (((from) >= (to)) ? \ + (((val) & (from)) / ((from) / (to))) : \ + (((val) & (from)) * ((to) / (from)))) + +/* Allocate a buffer on the stack and fill it with a printf format string. */ +#define MKSTR(name, ...) \ + char name[snprintf(NULL, 0, __VA_ARGS__) + 1]; \ + \ + snprintf(name, sizeof(name), __VA_ARGS__) + +/** + * Return nearest power of two above input value. + * + * @param v + * Input value. + * + * @return + * Nearest power of two above input value. + */ +static inline unsigned int +log2above(unsigned int v) +{ + unsigned int l; + unsigned int r; + + for (l = 0, r = 0; (v >> 1); ++l, v >>= 1) + r |= (v & 1); + return l + r; +} + +#endif /* RTE_PMD_MLX5_UTILS_H_ */ diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c new file mode 100644 index 00000000..ea7af1e4 --- /dev/null +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -0,0 +1,234 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stddef.h> +#include <errno.h> +#include <assert.h> +#include <stdint.h> + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_ethdev.h> +#include <rte_common.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5_utils.h" +#include "mlx5.h" +#include "mlx5_autoconf.h" + +/** + * Configure a VLAN filter. + * + * @param dev + * Pointer to Ethernet device structure. + * @param vlan_id + * VLAN ID to filter. + * @param on + * Toggle filter. + * + * @return + * 0 on success, errno value on failure. + */ +static int +vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct priv *priv = dev->data->dev_private; + unsigned int i; + + DEBUG("%p: %s VLAN filter ID %" PRIu16, + (void *)dev, (on ? "enable" : "disable"), vlan_id); + assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter)); + for (i = 0; (i != priv->vlan_filter_n); ++i) + if (priv->vlan_filter[i] == vlan_id) + break; + /* Check if there's room for another VLAN filter. */ + if (i == RTE_DIM(priv->vlan_filter)) + return ENOMEM; + if (i < priv->vlan_filter_n) { + assert(priv->vlan_filter_n != 0); + /* Enabling an existing VLAN filter has no effect. */ + if (on) + return 0; + /* Remove VLAN filter from list. */ + --priv->vlan_filter_n; + memmove(&priv->vlan_filter[i], + &priv->vlan_filter[i + 1], + priv->vlan_filter_n - i); + priv->vlan_filter[priv->vlan_filter_n] = 0; + } else { + assert(i == priv->vlan_filter_n); + /* Disabling an unknown VLAN filter has no effect. */ + if (!on) + return 0; + /* Add new VLAN filter. */ + priv->vlan_filter[priv->vlan_filter_n] = vlan_id; + ++priv->vlan_filter_n; + } + /* Rehash flows in all hash RX queues. */ + priv_mac_addrs_disable(priv); + priv_special_flow_disable_all(priv); + return priv_rehash_flows(priv); +} + +/** + * DPDK callback to configure a VLAN filter. + * + * @param dev + * Pointer to Ethernet device structure. + * @param vlan_id + * VLAN ID to filter. + * @param on + * Toggle filter. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct priv *priv = dev->data->dev_private; + int ret; + + priv_lock(priv); + ret = vlan_filter_set(dev, vlan_id, on); + priv_unlock(priv); + assert(ret >= 0); + return -ret; +} + +/** + * Set/reset VLAN stripping for a specific queue. + * + * @param priv + * Pointer to private structure. + * @param idx + * RX queue index. + * @param on + * Enable/disable VLAN stripping. + */ +static void +priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) +{ + struct rxq *rxq = (*priv->rxqs)[idx]; +#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + struct ibv_exp_wq_attr mod; + uint16_t vlan_offloads = + (on ? IBV_EXP_RECEIVE_WQ_CVLAN_STRIP : 0) | + 0; + int err; + + DEBUG("set VLAN offloads 0x%x for port %d queue %d", + vlan_offloads, rxq->port_id, idx); + mod = (struct ibv_exp_wq_attr){ + .attr_mask = IBV_EXP_WQ_ATTR_VLAN_OFFLOADS, + .vlan_offloads = vlan_offloads, + }; + + err = ibv_exp_modify_wq(rxq->wq, &mod); + if (err) { + ERROR("%p: failed to modified stripping mode: %s", + (void *)priv, strerror(err)); + return; + } + +#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + + /* Update related bits in RX queue. */ + rxq->vlan_strip = !!on; +} + +/** + * Callback to set/reset VLAN stripping for a specific queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param queue + * RX queue index. + * @param on + * Enable/disable VLAN stripping. + */ +void +mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) +{ + struct priv *priv = dev->data->dev_private; + + /* Validate hw support */ + if (!priv->hw_vlan_strip) { + ERROR("VLAN stripping is not supported"); + return; + } + + /* Validate queue number */ + if (queue >= priv->rxqs_n) { + ERROR("VLAN stripping, invalid queue number %d", queue); + return; + } + + priv_lock(priv); + priv_vlan_strip_queue_set(priv, queue, on); + priv_unlock(priv); +} + +/** + * Callback to set/reset VLAN offloads for a port. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mask + * VLAN offload bit mask. + */ +void +mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct priv *priv = dev->data->dev_private; + unsigned int i; + + if (mask & ETH_VLAN_STRIP_MASK) { + int hw_vlan_strip = dev->data->dev_conf.rxmode.hw_vlan_strip; + + if (!priv->hw_vlan_strip) { + ERROR("VLAN stripping is not supported"); + return; + } + + /* Run on every RX queue and set/reset VLAN stripping. */ + priv_lock(priv); + for (i = 0; (i != priv->rxqs_n); i++) + priv_vlan_strip_queue_set(priv, i, hw_vlan_strip); + priv_unlock(priv); + } +} diff --git a/drivers/net/mlx5/rte_pmd_mlx5_version.map b/drivers/net/mlx5/rte_pmd_mlx5_version.map new file mode 100644 index 00000000..ad607bbe --- /dev/null +++ b/drivers/net/mlx5/rte_pmd_mlx5_version.map @@ -0,0 +1,3 @@ +DPDK_2.2 { + local: *; +}; diff --git a/drivers/net/mpipe/Makefile b/drivers/net/mpipe/Makefile new file mode 100644 index 00000000..46f046d5 --- /dev/null +++ b/drivers/net/mpipe/Makefile @@ -0,0 +1,47 @@ +# +# Copyright 2015 EZchip Semiconductor Ltd. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_pmd_mpipe.a + +CFLAGS += $(WERROR_FLAGS) -O3 +LDLIBS += -lgxio + +EXPORT_MAP := rte_pmd_mpipe_version.map + +LIBABIVER := 1 + +SRCS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += mpipe_tilegx.c + +DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_net lib/librte_malloc + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/mpipe/mpipe_tilegx.c b/drivers/net/mpipe/mpipe_tilegx.c new file mode 100644 index 00000000..adcbc19e --- /dev/null +++ b/drivers/net/mpipe/mpipe_tilegx.c @@ -0,0 +1,1657 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2015 EZchip Semiconductor Ltd. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of EZchip Semiconductor nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <unistd.h> + +#include <rte_eal.h> +#include <rte_dev.h> +#include <rte_eal_memconfig.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> +#include <rte_cycles.h> + +#include <arch/mpipe_xaui_def.h> +#include <arch/mpipe_gbe_def.h> + +#include <gxio/mpipe.h> + +#ifdef RTE_LIBRTE_MPIPE_PMD_DEBUG +#define PMD_DEBUG_RX(...) RTE_LOG(DEBUG, PMD, __VA_ARGS__) +#define PMD_DEBUG_TX(...) RTE_LOG(DEBUG, PMD, __VA_ARGS__) +#else +#define PMD_DEBUG_RX(...) +#define PMD_DEBUG_TX(...) +#endif + +#define MPIPE_MAX_CHANNELS 128 +#define MPIPE_TX_MAX_QUEUES 128 +#define MPIPE_RX_MAX_QUEUES 16 +#define MPIPE_TX_DESCS 512 +#define MPIPE_RX_BUCKETS 256 +#define MPIPE_RX_STACK_SIZE 65536 +#define MPIPE_RX_IP_ALIGN 2 +#define MPIPE_BSM_ALIGN 128 + +#define MPIPE_LINK_UPDATE_TIMEOUT 10 /* s */ +#define MPIPE_LINK_UPDATE_INTERVAL 100000 /* us */ + +struct mpipe_channel_config { + int enable; + int first_bucket; + int num_buckets; + int head_room; + gxio_mpipe_rules_stacks_t stacks; +}; + +struct mpipe_context { + rte_spinlock_t lock; + gxio_mpipe_context_t context; + struct mpipe_channel_config channels[MPIPE_MAX_CHANNELS]; +}; + +/* Per-core local data. */ +struct mpipe_local { + int mbuf_push_debt[RTE_MAX_ETHPORTS]; /* Buffer push debt. */ +} __rte_cache_aligned; + +#define MPIPE_BUF_DEBT_THRESHOLD 32 +static __thread struct mpipe_local mpipe_local; +static struct mpipe_context mpipe_contexts[GXIO_MPIPE_INSTANCE_MAX]; +static int mpipe_instances; +static const char *drivername = "MPIPE PMD"; + +/* Per queue statistics. */ +struct mpipe_queue_stats { + uint64_t packets, bytes, errors, nomem; +}; + +/* Common tx/rx queue fields. */ +struct mpipe_queue { + struct mpipe_dev_priv *priv; /* "priv" data of its device. */ + uint16_t nb_desc; /* Number of tx descriptors. */ + uint16_t port_id; /* Device index. */ + uint16_t stat_idx; /* Queue stats index. */ + uint8_t queue_idx; /* Queue index. */ + uint8_t link_status; /* 0 = link down. */ + struct mpipe_queue_stats stats; /* Stat data for the queue. */ +}; + +/* Transmit queue description. */ +struct mpipe_tx_queue { + struct mpipe_queue q; /* Common stuff. */ +}; + +/* Receive queue description. */ +struct mpipe_rx_queue { + struct mpipe_queue q; /* Common stuff. */ + gxio_mpipe_iqueue_t iqueue; /* mPIPE iqueue. */ + gxio_mpipe_idesc_t *next_desc; /* Next idesc to process. */ + int avail_descs; /* Number of available descs. */ + void *rx_ring_mem; /* DMA ring memory. */ +}; + +struct mpipe_dev_priv { + gxio_mpipe_context_t *context; /* mPIPE context. */ + gxio_mpipe_link_t link; /* mPIPE link for the device. */ + gxio_mpipe_equeue_t equeue; /* mPIPE equeue. */ + unsigned equeue_size; /* mPIPE equeue desc count. */ + int instance; /* mPIPE instance. */ + int ering; /* mPIPE eDMA ring. */ + int stack; /* mPIPE buffer stack. */ + int channel; /* Device channel. */ + int port_id; /* DPDK port index. */ + struct rte_eth_dev *eth_dev; /* DPDK device. */ + struct rte_mbuf **tx_comps; /* TX completion array. */ + struct rte_mempool *rx_mpool; /* mpool used by the rx queues. */ + unsigned rx_offset; /* Receive head room. */ + unsigned rx_size_code; /* mPIPE rx buffer size code. */ + int is_xaui:1, /* Is this an xgbe or gbe? */ + initialized:1, /* Initialized port? */ + running:1; /* Running port? */ + struct ether_addr mac_addr; /* MAC address. */ + unsigned nb_rx_queues; /* Configured tx queues. */ + unsigned nb_tx_queues; /* Configured rx queues. */ + int first_bucket; /* mPIPE bucket start index. */ + int first_ring; /* mPIPE notif ring start index. */ + int notif_group; /* mPIPE notif group. */ + rte_atomic32_t dp_count __rte_cache_aligned; /* DP Entry count. */ + int tx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS]; + int rx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS]; +}; + +#define mpipe_priv(dev) \ + ((struct mpipe_dev_priv*)(dev)->data->dev_private) + +#define mpipe_name(priv) \ + ((priv)->eth_dev->data->name) + +#define mpipe_rx_queue(priv, n) \ + ((struct mpipe_rx_queue *)(priv)->eth_dev->data->rx_queues[n]) + +#define mpipe_tx_queue(priv, n) \ + ((struct mpipe_tx_queue *)(priv)->eth_dev->data->tx_queues[n]) + +static void +mpipe_xmit_flush(struct mpipe_dev_priv *priv); + +static void +mpipe_recv_flush(struct mpipe_dev_priv *priv); + +static int mpipe_equeue_sizes[] = { + [GXIO_MPIPE_EQUEUE_ENTRY_512] = 512, + [GXIO_MPIPE_EQUEUE_ENTRY_2K] = 2048, + [GXIO_MPIPE_EQUEUE_ENTRY_8K] = 8192, + [GXIO_MPIPE_EQUEUE_ENTRY_64K] = 65536, +}; + +static int mpipe_iqueue_sizes[] = { + [GXIO_MPIPE_IQUEUE_ENTRY_128] = 128, + [GXIO_MPIPE_IQUEUE_ENTRY_512] = 512, + [GXIO_MPIPE_IQUEUE_ENTRY_2K] = 2048, + [GXIO_MPIPE_IQUEUE_ENTRY_64K] = 65536, +}; + +static int mpipe_buffer_sizes[] = { + [GXIO_MPIPE_BUFFER_SIZE_128] = 128, + [GXIO_MPIPE_BUFFER_SIZE_256] = 256, + [GXIO_MPIPE_BUFFER_SIZE_512] = 512, + [GXIO_MPIPE_BUFFER_SIZE_1024] = 1024, + [GXIO_MPIPE_BUFFER_SIZE_1664] = 1664, + [GXIO_MPIPE_BUFFER_SIZE_4096] = 4096, + [GXIO_MPIPE_BUFFER_SIZE_10368] = 10368, + [GXIO_MPIPE_BUFFER_SIZE_16384] = 16384, +}; + +static gxio_mpipe_context_t * +mpipe_context(int instance) +{ + if (instance < 0 || instance >= mpipe_instances) + return NULL; + return &mpipe_contexts[instance].context; +} + +static int mpipe_channel_config(int instance, int channel, + struct mpipe_channel_config *config) +{ + struct mpipe_channel_config *data; + struct mpipe_context *context; + gxio_mpipe_rules_t rules; + int idx, rc = 0; + + if (instance < 0 || instance >= mpipe_instances || + channel < 0 || channel >= MPIPE_MAX_CHANNELS) + return -EINVAL; + + context = &mpipe_contexts[instance]; + + rte_spinlock_lock(&context->lock); + + gxio_mpipe_rules_init(&rules, &context->context); + + for (idx = 0; idx < MPIPE_MAX_CHANNELS; idx++) { + data = (channel == idx) ? config : &context->channels[idx]; + + if (!data->enable) + continue; + + rc = gxio_mpipe_rules_begin(&rules, data->first_bucket, + data->num_buckets, &data->stacks); + if (rc < 0) { + goto done; + } + + rc = gxio_mpipe_rules_add_channel(&rules, idx); + if (rc < 0) { + goto done; + } + + rc = gxio_mpipe_rules_set_headroom(&rules, data->head_room); + if (rc < 0) { + goto done; + } + } + + rc = gxio_mpipe_rules_commit(&rules); + if (rc == 0) { + memcpy(&context->channels[channel], config, sizeof(*config)); + } + +done: + rte_spinlock_unlock(&context->lock); + + return rc; +} + +static int +mpipe_get_size_index(int *array, int count, int size, + bool roundup) +{ + int i, last = -1; + + for (i = 0; i < count && array[i] < size; i++) { + if (array[i]) + last = i; + } + + if (roundup) + return i < count ? (int)i : -ENOENT; + else + return last >= 0 ? last : -ENOENT; +} + +static int +mpipe_calc_size(int *array, int count, int size) +{ + int index = mpipe_get_size_index(array, count, size, 1); + return index < 0 ? index : array[index]; +} + +static int mpipe_equeue_size(int size) +{ + int result; + result = mpipe_calc_size(mpipe_equeue_sizes, + RTE_DIM(mpipe_equeue_sizes), size); + return result; +} + +static int mpipe_iqueue_size(int size) +{ + int result; + result = mpipe_calc_size(mpipe_iqueue_sizes, + RTE_DIM(mpipe_iqueue_sizes), size); + return result; +} + +static int mpipe_buffer_size_index(int size) +{ + int result; + result = mpipe_get_size_index(mpipe_buffer_sizes, + RTE_DIM(mpipe_buffer_sizes), size, 0); + return result; +} + +static inline int +mpipe_dev_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &(dev->data->dev_link); + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static inline int +mpipe_dev_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &(dev->data->dev_link); + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static void +mpipe_infos_get(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_dev_info *dev_info) +{ + dev_info->min_rx_bufsize = 128; + dev_info->max_rx_pktlen = 1518; + dev_info->max_tx_queues = MPIPE_TX_MAX_QUEUES; + dev_info->max_rx_queues = MPIPE_RX_MAX_QUEUES; + dev_info->max_mac_addrs = 1; + dev_info->rx_offload_capa = 0; + dev_info->tx_offload_capa = 0; +} + +static int +mpipe_configure(struct rte_eth_dev *dev) +{ + struct mpipe_dev_priv *priv = mpipe_priv(dev); + + if (dev->data->nb_tx_queues > MPIPE_TX_MAX_QUEUES) { + RTE_LOG(ERR, PMD, "%s: Too many tx queues: %d > %d\n", + mpipe_name(priv), dev->data->nb_tx_queues, + MPIPE_TX_MAX_QUEUES); + return -EINVAL; + } + priv->nb_tx_queues = dev->data->nb_tx_queues; + + if (dev->data->nb_rx_queues > MPIPE_RX_MAX_QUEUES) { + RTE_LOG(ERR, PMD, "%s: Too many rx queues: %d > %d\n", + mpipe_name(priv), dev->data->nb_rx_queues, + MPIPE_RX_MAX_QUEUES); + } + priv->nb_rx_queues = dev->data->nb_rx_queues; + + return 0; +} + +static inline int +mpipe_link_compare(struct rte_eth_link *link1, + struct rte_eth_link *link2) +{ + return (*(uint64_t *)link1 == *(uint64_t *)link2) + ? -1 : 0; +} + +static int +mpipe_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct mpipe_dev_priv *priv = mpipe_priv(dev); + struct rte_eth_link old, new; + int64_t state, speed; + int count, rc; + + memset(&old, 0, sizeof(old)); + memset(&new, 0, sizeof(new)); + mpipe_dev_atomic_read_link_status(dev, &old); + + for (count = 0, rc = 0; count < MPIPE_LINK_UPDATE_TIMEOUT; count++) { + if (!priv->initialized) + break; + + state = gxio_mpipe_link_get_attr(&priv->link, + GXIO_MPIPE_LINK_CURRENT_STATE); + if (state < 0) + break; + + speed = state & GXIO_MPIPE_LINK_SPEED_MASK; + + new.link_autoneg = (dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_AUTONEG); + if (speed == GXIO_MPIPE_LINK_1G) { + new.link_speed = ETH_SPEED_NUM_1G; + new.link_duplex = ETH_LINK_FULL_DUPLEX; + new.link_status = ETH_LINK_UP; + } else if (speed == GXIO_MPIPE_LINK_10G) { + new.link_speed = ETH_SPEED_NUM_10G; + new.link_duplex = ETH_LINK_FULL_DUPLEX; + new.link_status = ETH_LINK_UP; + } + + rc = mpipe_link_compare(&old, &new); + if (rc == 0 || !wait_to_complete) + break; + + rte_delay_us(MPIPE_LINK_UPDATE_INTERVAL); + } + + mpipe_dev_atomic_write_link_status(dev, &new); + return rc; +} + +static int +mpipe_set_link(struct rte_eth_dev *dev, int up) +{ + struct mpipe_dev_priv *priv = mpipe_priv(dev); + int rc; + + rc = gxio_mpipe_link_set_attr(&priv->link, + GXIO_MPIPE_LINK_DESIRED_STATE, + up ? GXIO_MPIPE_LINK_ANYSPEED : 0); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to set link %s.\n", + mpipe_name(priv), up ? "up" : "down"); + } else { + mpipe_link_update(dev, 0); + } + + return rc; +} + +static int +mpipe_set_link_up(struct rte_eth_dev *dev) +{ + return mpipe_set_link(dev, 1); +} + +static int +mpipe_set_link_down(struct rte_eth_dev *dev) +{ + return mpipe_set_link(dev, 0); +} + +static inline void +mpipe_dp_enter(struct mpipe_dev_priv *priv) +{ + __insn_mtspr(SPR_DSTREAM_PF, 0); + rte_atomic32_inc(&priv->dp_count); +} + +static inline void +mpipe_dp_exit(struct mpipe_dev_priv *priv) +{ + rte_atomic32_dec(&priv->dp_count); +} + +static inline void +mpipe_dp_wait(struct mpipe_dev_priv *priv) +{ + while (rte_atomic32_read(&priv->dp_count) != 0) { + rte_pause(); + } +} + +static inline int +mpipe_mbuf_stack_index(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf) +{ + return (mbuf->port < RTE_MAX_ETHPORTS) ? + mpipe_priv(&rte_eth_devices[mbuf->port])->stack : + priv->stack; +} + +static inline struct rte_mbuf * +mpipe_recv_mbuf(struct mpipe_dev_priv *priv, gxio_mpipe_idesc_t *idesc, + int in_port) +{ + void *va = gxio_mpipe_idesc_get_va(idesc); + uint16_t size = gxio_mpipe_idesc_get_xfer_size(idesc); + struct rte_mbuf *mbuf = RTE_PTR_SUB(va, priv->rx_offset); + + rte_pktmbuf_reset(mbuf); + mbuf->data_off = (uintptr_t)va - (uintptr_t)mbuf->buf_addr; + mbuf->port = in_port; + mbuf->data_len = size; + mbuf->pkt_len = size; + mbuf->hash.rss = gxio_mpipe_idesc_get_flow_hash(idesc); + + PMD_DEBUG_RX("%s: RX mbuf %p, buffer %p, buf_addr %p, size %d\n", + mpipe_name(priv), mbuf, va, mbuf->buf_addr, size); + + return mbuf; +} + +static inline void +mpipe_recv_push(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf) +{ + const int offset = RTE_PKTMBUF_HEADROOM + MPIPE_RX_IP_ALIGN; + void *buf_addr = RTE_PTR_ADD(mbuf->buf_addr, offset); + + gxio_mpipe_push_buffer(priv->context, priv->stack, buf_addr); + PMD_DEBUG_RX("%s: Pushed mbuf %p, buffer %p into stack %d\n", + mpipe_name(priv), mbuf, buf_addr, priv->stack); +} + +static inline void +mpipe_recv_fill_stack(struct mpipe_dev_priv *priv, int count) +{ + struct rte_mbuf *mbuf; + int i; + + for (i = 0; i < count; i++) { + mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool); + if (!mbuf) + break; + mpipe_recv_push(priv, mbuf); + } + + PMD_DEBUG_RX("%s: Filled %d/%d buffers\n", mpipe_name(priv), i, count); +} + +static inline void +mpipe_recv_flush_stack(struct mpipe_dev_priv *priv) +{ + const int offset = priv->rx_offset & ~RTE_MEMPOOL_ALIGN_MASK; + uint8_t in_port = priv->port_id; + struct rte_mbuf *mbuf; + void *va; + + while (1) { + va = gxio_mpipe_pop_buffer(priv->context, priv->stack); + if (!va) + break; + mbuf = RTE_PTR_SUB(va, offset); + + PMD_DEBUG_RX("%s: Flushing mbuf %p, va %p\n", + mpipe_name(priv), mbuf, va); + + mbuf->data_off = (uintptr_t)va - (uintptr_t)mbuf->buf_addr; + mbuf->refcnt = 1; + mbuf->nb_segs = 1; + mbuf->port = in_port; + mbuf->packet_type = 0; + mbuf->data_len = 0; + mbuf->pkt_len = 0; + + __rte_mbuf_raw_free(mbuf); + } +} + +static void +mpipe_register_segment(struct mpipe_dev_priv *priv, const struct rte_memseg *ms) +{ + size_t size = ms->hugepage_sz; + uint8_t *addr, *end; + int rc; + + for (addr = ms->addr, end = addr + ms->len; addr < end; addr += size) { + rc = gxio_mpipe_register_page(priv->context, priv->stack, addr, + size, 0); + if (rc < 0) + break; + } + + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Could not register memseg @%p, %d.\n", + mpipe_name(priv), ms->addr, rc); + } else { + RTE_LOG(DEBUG, PMD, "%s: Registered segment %p - %p\n", + mpipe_name(priv), ms->addr, + RTE_PTR_ADD(ms->addr, ms->len - 1)); + } +} + +static int +mpipe_recv_init(struct mpipe_dev_priv *priv) +{ + const struct rte_memseg *seg = rte_eal_get_physmem_layout(); + size_t stack_size; + void *stack_mem; + int rc; + + if (!priv->rx_mpool) { + RTE_LOG(ERR, PMD, "%s: No buffer pool.\n", + mpipe_name(priv)); + return -ENODEV; + } + + /* Allocate one NotifRing for each queue. */ + rc = gxio_mpipe_alloc_notif_rings(priv->context, MPIPE_RX_MAX_QUEUES, + 0, 0); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to allocate notif rings.\n", + mpipe_name(priv)); + return rc; + } + priv->first_ring = rc; + + /* Allocate a NotifGroup. */ + rc = gxio_mpipe_alloc_notif_groups(priv->context, 1, 0, 0); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to allocate rx group.\n", + mpipe_name(priv)); + return rc; + } + priv->notif_group = rc; + + /* Allocate required buckets. */ + rc = gxio_mpipe_alloc_buckets(priv->context, MPIPE_RX_BUCKETS, 0, 0); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to allocate buckets.\n", + mpipe_name(priv)); + return rc; + } + priv->first_bucket = rc; + + rc = gxio_mpipe_alloc_buffer_stacks(priv->context, 1, 0, 0); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer stack.\n", + mpipe_name(priv)); + return rc; + } + priv->stack = rc; + + while (seg && seg->addr) + mpipe_register_segment(priv, seg++); + + stack_size = gxio_mpipe_calc_buffer_stack_bytes(MPIPE_RX_STACK_SIZE); + stack_mem = rte_zmalloc(NULL, stack_size, 65536); + if (!stack_mem) { + RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer memory.\n", + mpipe_name(priv)); + return -ENOMEM; + } else { + RTE_LOG(DEBUG, PMD, "%s: Buffer stack memory %p - %p.\n", + mpipe_name(priv), stack_mem, + RTE_PTR_ADD(stack_mem, stack_size - 1)); + } + + rc = gxio_mpipe_init_buffer_stack(priv->context, priv->stack, + priv->rx_size_code, stack_mem, + stack_size, 0); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to initialize buffer stack.\n", + mpipe_name(priv)); + return rc; + } + + return 0; +} + +static int +mpipe_xmit_init(struct mpipe_dev_priv *priv) +{ + size_t ring_size; + void *ring_mem; + int rc; + + /* Allocate eDMA ring. */ + rc = gxio_mpipe_alloc_edma_rings(priv->context, 1, 0, 0); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to alloc tx ring.\n", + mpipe_name(priv)); + return rc; + } + priv->ering = rc; + + rc = mpipe_equeue_size(MPIPE_TX_DESCS); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Cannot allocate %d equeue descs.\n", + mpipe_name(priv), (int)MPIPE_TX_DESCS); + return -ENOMEM; + } + priv->equeue_size = rc; + + /* Initialize completion array. */ + ring_size = sizeof(priv->tx_comps[0]) * priv->equeue_size; + priv->tx_comps = rte_zmalloc(NULL, ring_size, RTE_CACHE_LINE_SIZE); + if (!priv->tx_comps) { + RTE_LOG(ERR, PMD, "%s: Failed to allocate egress comps.\n", + mpipe_name(priv)); + return -ENOMEM; + } + + /* Allocate eDMA ring memory. */ + ring_size = sizeof(gxio_mpipe_edesc_t) * priv->equeue_size; + ring_mem = rte_zmalloc(NULL, ring_size, ring_size); + if (!ring_mem) { + RTE_LOG(ERR, PMD, "%s: Failed to allocate egress descs.\n", + mpipe_name(priv)); + return -ENOMEM; + } else { + RTE_LOG(DEBUG, PMD, "%s: eDMA ring memory %p - %p.\n", + mpipe_name(priv), ring_mem, + RTE_PTR_ADD(ring_mem, ring_size - 1)); + } + + /* Initialize eDMA ring. */ + rc = gxio_mpipe_equeue_init(&priv->equeue, priv->context, priv->ering, + priv->channel, ring_mem, ring_size, 0); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to init equeue\n", + mpipe_name(priv)); + return rc; + } + + return 0; +} + +static int +mpipe_link_init(struct mpipe_dev_priv *priv) +{ + int rc; + + /* Open the link. */ + rc = gxio_mpipe_link_open(&priv->link, priv->context, + mpipe_name(priv), GXIO_MPIPE_LINK_AUTO_NONE); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to open link.\n", + mpipe_name(priv)); + return rc; + } + + /* Get the channel index. */ + rc = gxio_mpipe_link_channel(&priv->link); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Bad channel\n", + mpipe_name(priv)); + return rc; + } + priv->channel = rc; + + return 0; +} + +static int +mpipe_init(struct mpipe_dev_priv *priv) +{ + int rc; + + if (priv->initialized) + return 0; + + rc = mpipe_recv_init(priv); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to init rx.\n", + mpipe_name(priv)); + return rc; + } + + rc = mpipe_xmit_init(priv); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to init tx.\n", + mpipe_name(priv)); + rte_free(priv); + return rc; + } + + priv->initialized = 1; + + return 0; +} + +static int +mpipe_start(struct rte_eth_dev *dev) +{ + struct mpipe_dev_priv *priv = mpipe_priv(dev); + struct mpipe_channel_config config; + struct mpipe_rx_queue *rx_queue; + struct rte_eth_link eth_link; + unsigned queue, buffers = 0; + size_t ring_size; + void *ring_mem; + int rc; + + memset(ð_link, 0, sizeof(eth_link)); + mpipe_dev_atomic_write_link_status(dev, ð_link); + + rc = mpipe_init(priv); + if (rc < 0) + return rc; + + /* Initialize NotifRings. */ + for (queue = 0; queue < priv->nb_rx_queues; queue++) { + rx_queue = mpipe_rx_queue(priv, queue); + ring_size = rx_queue->q.nb_desc * sizeof(gxio_mpipe_idesc_t); + + ring_mem = rte_malloc(NULL, ring_size, ring_size); + if (!ring_mem) { + RTE_LOG(ERR, PMD, "%s: Failed to alloc rx descs.\n", + mpipe_name(priv)); + return -ENOMEM; + } else { + RTE_LOG(DEBUG, PMD, "%s: iDMA ring %d memory %p - %p.\n", + mpipe_name(priv), queue, ring_mem, + RTE_PTR_ADD(ring_mem, ring_size - 1)); + } + + rc = gxio_mpipe_iqueue_init(&rx_queue->iqueue, priv->context, + priv->first_ring + queue, ring_mem, + ring_size, 0); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to init rx queue.\n", + mpipe_name(priv)); + return rc; + } + + rx_queue->rx_ring_mem = ring_mem; + buffers += rx_queue->q.nb_desc; + } + + /* Initialize ingress NotifGroup and buckets. */ + rc = gxio_mpipe_init_notif_group_and_buckets(priv->context, + priv->notif_group, priv->first_ring, priv->nb_rx_queues, + priv->first_bucket, MPIPE_RX_BUCKETS, + GXIO_MPIPE_BUCKET_STATIC_FLOW_AFFINITY); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to init group and buckets.\n", + mpipe_name(priv)); + return rc; + } + + /* Configure the classifier to deliver packets from this port. */ + config.enable = 1; + config.first_bucket = priv->first_bucket; + config.num_buckets = MPIPE_RX_BUCKETS; + memset(&config.stacks, 0xff, sizeof(config.stacks)); + config.stacks.stacks[priv->rx_size_code] = priv->stack; + config.head_room = priv->rx_offset & RTE_MEMPOOL_ALIGN_MASK; + + rc = mpipe_channel_config(priv->instance, priv->channel, + &config); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to setup classifier.\n", + mpipe_name(priv)); + return rc; + } + + /* Fill empty buffers into the buffer stack. */ + mpipe_recv_fill_stack(priv, buffers); + + /* Bring up the link. */ + mpipe_set_link_up(dev); + + /* Start xmit/recv on queues. */ + for (queue = 0; queue < priv->nb_tx_queues; queue++) + mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_UP; + for (queue = 0; queue < priv->nb_rx_queues; queue++) + mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_UP; + priv->running = 1; + + return 0; +} + +static void +mpipe_stop(struct rte_eth_dev *dev) +{ + struct mpipe_dev_priv *priv = mpipe_priv(dev); + struct mpipe_channel_config config; + unsigned queue; + int rc; + + for (queue = 0; queue < priv->nb_tx_queues; queue++) + mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN; + for (queue = 0; queue < priv->nb_rx_queues; queue++) + mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN; + + /* Make sure the link_status writes land. */ + rte_wmb(); + + /* + * Wait for link_status change to register with straggling datapath + * threads. + */ + mpipe_dp_wait(priv); + + /* Bring down the link. */ + mpipe_set_link_down(dev); + + /* Remove classifier rules. */ + memset(&config, 0, sizeof(config)); + rc = mpipe_channel_config(priv->instance, priv->channel, + &config); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to stop classifier.\n", + mpipe_name(priv)); + } + + /* Flush completed xmit packets. */ + mpipe_xmit_flush(priv); + + /* Flush buffer stacks. */ + mpipe_recv_flush(priv); + + priv->running = 0; +} + +static void +mpipe_close(struct rte_eth_dev *dev) +{ + struct mpipe_dev_priv *priv = mpipe_priv(dev); + if (priv->running) + mpipe_stop(dev); +} + +static void +mpipe_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct mpipe_dev_priv *priv = mpipe_priv(dev); + struct mpipe_tx_queue *tx_queue; + struct mpipe_rx_queue *rx_queue; + unsigned i; + uint16_t idx; + + memset(stats, 0, sizeof(*stats)); + + for (i = 0; i < priv->nb_tx_queues; i++) { + tx_queue = mpipe_tx_queue(priv, i); + + stats->opackets += tx_queue->q.stats.packets; + stats->obytes += tx_queue->q.stats.bytes; + stats->oerrors += tx_queue->q.stats.errors; + + idx = tx_queue->q.stat_idx; + if (idx != (uint16_t)-1) { + stats->q_opackets[idx] += tx_queue->q.stats.packets; + stats->q_obytes[idx] += tx_queue->q.stats.bytes; + stats->q_errors[idx] += tx_queue->q.stats.errors; + } + } + + for (i = 0; i < priv->nb_rx_queues; i++) { + rx_queue = mpipe_rx_queue(priv, i); + + stats->ipackets += rx_queue->q.stats.packets; + stats->ibytes += rx_queue->q.stats.bytes; + stats->ierrors += rx_queue->q.stats.errors; + stats->rx_nombuf += rx_queue->q.stats.nomem; + + idx = rx_queue->q.stat_idx; + if (idx != (uint16_t)-1) { + stats->q_ipackets[idx] += rx_queue->q.stats.packets; + stats->q_ibytes[idx] += rx_queue->q.stats.bytes; + stats->q_errors[idx] += rx_queue->q.stats.errors; + } + } +} + +static void +mpipe_stats_reset(struct rte_eth_dev *dev) +{ + struct mpipe_dev_priv *priv = mpipe_priv(dev); + struct mpipe_tx_queue *tx_queue; + struct mpipe_rx_queue *rx_queue; + unsigned i; + + for (i = 0; i < priv->nb_tx_queues; i++) { + tx_queue = mpipe_tx_queue(priv, i); + memset(&tx_queue->q.stats, 0, sizeof(tx_queue->q.stats)); + } + + for (i = 0; i < priv->nb_rx_queues; i++) { + rx_queue = mpipe_rx_queue(priv, i); + memset(&rx_queue->q.stats, 0, sizeof(rx_queue->q.stats)); + } +} + +static int +mpipe_queue_stats_mapping_set(struct rte_eth_dev *dev, uint16_t queue_id, + uint8_t stat_idx, uint8_t is_rx) +{ + struct mpipe_dev_priv *priv = mpipe_priv(dev); + + if (is_rx) { + priv->rx_stat_mapping[stat_idx] = queue_id; + } else { + priv->tx_stat_mapping[stat_idx] = queue_id; + } + + return 0; +} + +static int +mpipe_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct mpipe_tx_queue *tx_queue = dev->data->tx_queues[queue_idx]; + struct mpipe_dev_priv *priv = mpipe_priv(dev); + uint16_t idx; + + tx_queue = rte_realloc(tx_queue, sizeof(*tx_queue), + RTE_CACHE_LINE_SIZE); + if (!tx_queue) { + RTE_LOG(ERR, PMD, "%s: Failed to allocate TX queue.\n", + mpipe_name(priv)); + return -ENOMEM; + } + + memset(&tx_queue->q, 0, sizeof(tx_queue->q)); + tx_queue->q.priv = priv; + tx_queue->q.queue_idx = queue_idx; + tx_queue->q.port_id = dev->data->port_id; + tx_queue->q.nb_desc = nb_desc; + + tx_queue->q.stat_idx = -1; + for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) { + if (priv->tx_stat_mapping[idx] == queue_idx) + tx_queue->q.stat_idx = idx; + } + + dev->data->tx_queues[queue_idx] = tx_queue; + + return 0; +} + +static void +mpipe_tx_queue_release(void *_txq) +{ + rte_free(_txq); +} + +static int +mpipe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mp) +{ + struct mpipe_rx_queue *rx_queue = dev->data->rx_queues[queue_idx]; + struct mpipe_dev_priv *priv = mpipe_priv(dev); + uint16_t idx; + int size, rc; + + rc = mpipe_iqueue_size(nb_desc); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Cannot allocate %d iqueue descs.\n", + mpipe_name(priv), (int)nb_desc); + return -ENOMEM; + } + + if (rc != nb_desc) { + RTE_LOG(WARNING, PMD, "%s: Extending RX descs from %d to %d.\n", + mpipe_name(priv), (int)nb_desc, rc); + nb_desc = rc; + } + + size = sizeof(*rx_queue); + rx_queue = rte_realloc(rx_queue, size, RTE_CACHE_LINE_SIZE); + if (!rx_queue) { + RTE_LOG(ERR, PMD, "%s: Failed to allocate RX queue.\n", + mpipe_name(priv)); + return -ENOMEM; + } + + memset(&rx_queue->q, 0, sizeof(rx_queue->q)); + rx_queue->q.priv = priv; + rx_queue->q.nb_desc = nb_desc; + rx_queue->q.port_id = dev->data->port_id; + rx_queue->q.queue_idx = queue_idx; + + if (!priv->rx_mpool) { + int size = (rte_pktmbuf_data_room_size(mp) - + RTE_PKTMBUF_HEADROOM - + MPIPE_RX_IP_ALIGN); + + priv->rx_offset = (sizeof(struct rte_mbuf) + + rte_pktmbuf_priv_size(mp) + + RTE_PKTMBUF_HEADROOM + + MPIPE_RX_IP_ALIGN); + if (size < 0) { + RTE_LOG(ERR, PMD, "%s: Bad buffer size %d.\n", + mpipe_name(priv), + rte_pktmbuf_data_room_size(mp)); + return -ENOMEM; + } + + priv->rx_size_code = mpipe_buffer_size_index(size); + priv->rx_mpool = mp; + } + + if (priv->rx_mpool != mp) { + RTE_LOG(WARNING, PMD, "%s: Ignoring multiple buffer pools.\n", + mpipe_name(priv)); + } + + rx_queue->q.stat_idx = -1; + for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) { + if (priv->rx_stat_mapping[idx] == queue_idx) + rx_queue->q.stat_idx = idx; + } + + dev->data->rx_queues[queue_idx] = rx_queue; + + return 0; +} + +static void +mpipe_rx_queue_release(void *_rxq) +{ + rte_free(_rxq); +} + +#define MPIPE_XGBE_ENA_HASH_MULTI \ + (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_MULTI_SHIFT) +#define MPIPE_XGBE_ENA_HASH_UNI \ + (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_UNI_SHIFT) +#define MPIPE_XGBE_COPY_ALL \ + (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__COPY_ALL_SHIFT) +#define MPIPE_GBE_ENA_MULTI_HASH \ + (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__MULTI_HASH_ENA_SHIFT) +#define MPIPE_GBE_ENA_UNI_HASH \ + (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__UNI_HASH_ENA_SHIFT) +#define MPIPE_GBE_COPY_ALL \ + (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__COPY_ALL_SHIFT) + +static void +mpipe_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct mpipe_dev_priv *priv = mpipe_priv(dev); + int64_t reg; + int addr; + + if (priv->is_xaui) { + addr = MPIPE_XAUI_RECEIVE_CONFIGURATION; + reg = gxio_mpipe_link_mac_rd(&priv->link, addr); + reg &= ~MPIPE_XGBE_ENA_HASH_MULTI; + reg &= ~MPIPE_XGBE_ENA_HASH_UNI; + reg |= MPIPE_XGBE_COPY_ALL; + gxio_mpipe_link_mac_wr(&priv->link, addr, reg); + } else { + addr = MPIPE_GBE_NETWORK_CONFIGURATION; + reg = gxio_mpipe_link_mac_rd(&priv->link, addr); + reg &= ~MPIPE_GBE_ENA_MULTI_HASH; + reg &= ~MPIPE_GBE_ENA_UNI_HASH; + reg |= MPIPE_GBE_COPY_ALL; + gxio_mpipe_link_mac_wr(&priv->link, addr, reg); + } +} + +static void +mpipe_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct mpipe_dev_priv *priv = mpipe_priv(dev); + int64_t reg; + int addr; + + if (priv->is_xaui) { + addr = MPIPE_XAUI_RECEIVE_CONFIGURATION; + reg = gxio_mpipe_link_mac_rd(&priv->link, addr); + reg |= MPIPE_XGBE_ENA_HASH_MULTI; + reg |= MPIPE_XGBE_ENA_HASH_UNI; + reg &= ~MPIPE_XGBE_COPY_ALL; + gxio_mpipe_link_mac_wr(&priv->link, addr, reg); + } else { + addr = MPIPE_GBE_NETWORK_CONFIGURATION; + reg = gxio_mpipe_link_mac_rd(&priv->link, addr); + reg |= MPIPE_GBE_ENA_MULTI_HASH; + reg |= MPIPE_GBE_ENA_UNI_HASH; + reg &= ~MPIPE_GBE_COPY_ALL; + gxio_mpipe_link_mac_wr(&priv->link, addr, reg); + } +} + +static const struct eth_dev_ops mpipe_dev_ops = { + .dev_infos_get = mpipe_infos_get, + .dev_configure = mpipe_configure, + .dev_start = mpipe_start, + .dev_stop = mpipe_stop, + .dev_close = mpipe_close, + .stats_get = mpipe_stats_get, + .stats_reset = mpipe_stats_reset, + .queue_stats_mapping_set = mpipe_queue_stats_mapping_set, + .tx_queue_setup = mpipe_tx_queue_setup, + .rx_queue_setup = mpipe_rx_queue_setup, + .tx_queue_release = mpipe_tx_queue_release, + .rx_queue_release = mpipe_rx_queue_release, + .link_update = mpipe_link_update, + .dev_set_link_up = mpipe_set_link_up, + .dev_set_link_down = mpipe_set_link_down, + .promiscuous_enable = mpipe_promiscuous_enable, + .promiscuous_disable = mpipe_promiscuous_disable, +}; + +static inline void +mpipe_xmit_null(struct mpipe_dev_priv *priv, int64_t start, int64_t end) +{ + gxio_mpipe_edesc_t null_desc = { { .bound = 1, .ns = 1 } }; + gxio_mpipe_equeue_t *equeue = &priv->equeue; + int64_t slot; + + for (slot = start; slot < end; slot++) { + gxio_mpipe_equeue_put_at(equeue, null_desc, slot); + } +} + +static void +mpipe_xmit_flush(struct mpipe_dev_priv *priv) +{ + gxio_mpipe_equeue_t *equeue = &priv->equeue; + int64_t slot; + + /* Post a dummy descriptor and wait for its return. */ + slot = gxio_mpipe_equeue_reserve(equeue, 1); + if (slot < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to reserve stop slot.\n", + mpipe_name(priv)); + return; + } + + mpipe_xmit_null(priv, slot, slot + 1); + + while (!gxio_mpipe_equeue_is_complete(equeue, slot, 1)) { + rte_pause(); + } + + for (slot = 0; slot < priv->equeue_size; slot++) { + if (priv->tx_comps[slot]) + rte_pktmbuf_free_seg(priv->tx_comps[slot]); + } +} + +static void +mpipe_recv_flush(struct mpipe_dev_priv *priv) +{ + uint8_t in_port = priv->port_id; + struct mpipe_rx_queue *rx_queue; + gxio_mpipe_iqueue_t *iqueue; + gxio_mpipe_idesc_t idesc; + struct rte_mbuf *mbuf; + unsigned queue; + + /* Release packets on the buffer stack. */ + mpipe_recv_flush_stack(priv); + + /* Flush packets sitting in recv queues. */ + for (queue = 0; queue < priv->nb_rx_queues; queue++) { + rx_queue = mpipe_rx_queue(priv, queue); + iqueue = &rx_queue->iqueue; + while (gxio_mpipe_iqueue_try_get(iqueue, &idesc) >= 0) { + /* Skip idesc with the 'buffer error' bit set. */ + if (idesc.be) + continue; + mbuf = mpipe_recv_mbuf(priv, &idesc, in_port); + rte_pktmbuf_free(mbuf); + } + rte_free(rx_queue->rx_ring_mem); + } +} + +static inline uint16_t +mpipe_do_xmit(struct mpipe_tx_queue *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct mpipe_dev_priv *priv = tx_queue->q.priv; + gxio_mpipe_equeue_t *equeue = &priv->equeue; + unsigned nb_bytes = 0; + unsigned nb_sent = 0; + int nb_slots, i; + uint8_t port_id; + + PMD_DEBUG_TX("Trying to transmit %d packets on %s:%d.\n", + nb_pkts, mpipe_name(tx_queue->q.priv), + tx_queue->q.queue_idx); + + /* Optimistic assumption that we need exactly one slot per packet. */ + nb_slots = RTE_MIN(nb_pkts, MPIPE_TX_DESCS / 2); + + do { + struct rte_mbuf *mbuf = NULL, *pkt = NULL; + int64_t slot; + + /* Reserve eDMA ring slots. */ + slot = gxio_mpipe_equeue_try_reserve_fast(equeue, nb_slots); + if (unlikely(slot < 0)) { + break; + } + + for (i = 0; i < nb_slots; i++) { + unsigned idx = (slot + i) & (priv->equeue_size - 1); + rte_prefetch0(priv->tx_comps[idx]); + } + + /* Fill up slots with descriptor and completion info. */ + for (i = 0; i < nb_slots; i++) { + unsigned idx = (slot + i) & (priv->equeue_size - 1); + gxio_mpipe_edesc_t desc; + struct rte_mbuf *next; + + /* Starting on a new packet? */ + if (likely(!mbuf)) { + int room = nb_slots - i; + + pkt = mbuf = tx_pkts[nb_sent]; + + /* Bail out if we run out of descs. */ + if (unlikely(pkt->nb_segs > room)) + break; + + nb_sent++; + } + + /* We have a segment to send. */ + next = mbuf->next; + + if (priv->tx_comps[idx]) + rte_pktmbuf_free_seg(priv->tx_comps[idx]); + + port_id = (mbuf->port < RTE_MAX_ETHPORTS) ? + mbuf->port : priv->port_id; + desc = (gxio_mpipe_edesc_t) { { + .va = rte_pktmbuf_mtod(mbuf, uintptr_t), + .xfer_size = rte_pktmbuf_data_len(mbuf), + .bound = next ? 0 : 1, + .stack_idx = mpipe_mbuf_stack_index(priv, mbuf), + .size = priv->rx_size_code, + } }; + if (mpipe_local.mbuf_push_debt[port_id] > 0) { + mpipe_local.mbuf_push_debt[port_id]--; + desc.hwb = 1; + priv->tx_comps[idx] = NULL; + } else + priv->tx_comps[idx] = mbuf; + + nb_bytes += mbuf->data_len; + gxio_mpipe_equeue_put_at(equeue, desc, slot + i); + + PMD_DEBUG_TX("%s:%d: Sending packet %p, len %d\n", + mpipe_name(priv), + tx_queue->q.queue_idx, + rte_pktmbuf_mtod(mbuf, void *), + rte_pktmbuf_data_len(mbuf)); + + mbuf = next; + } + + if (unlikely(nb_sent < nb_pkts)) { + + /* Fill remaining slots with null descriptors. */ + mpipe_xmit_null(priv, slot + i, slot + nb_slots); + + /* + * Calculate exact number of descriptors needed for + * the next go around. + */ + nb_slots = 0; + for (i = nb_sent; i < nb_pkts; i++) { + nb_slots += tx_pkts[i]->nb_segs; + } + + nb_slots = RTE_MIN(nb_slots, MPIPE_TX_DESCS / 2); + } + } while (nb_sent < nb_pkts); + + tx_queue->q.stats.packets += nb_sent; + tx_queue->q.stats.bytes += nb_bytes; + + return nb_sent; +} + +static inline uint16_t +mpipe_do_recv(struct mpipe_rx_queue *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct mpipe_dev_priv *priv = rx_queue->q.priv; + gxio_mpipe_iqueue_t *iqueue = &rx_queue->iqueue; + gxio_mpipe_idesc_t *first_idesc, *idesc, *last_idesc; + uint8_t in_port = rx_queue->q.port_id; + const unsigned look_ahead = 8; + int room = nb_pkts, rc = 0; + unsigned nb_packets = 0; + unsigned nb_dropped = 0; + unsigned nb_nomem = 0; + unsigned nb_bytes = 0; + unsigned nb_descs, i; + + while (room && !rc) { + if (rx_queue->avail_descs < room) { + rc = gxio_mpipe_iqueue_try_peek(iqueue, + &rx_queue->next_desc); + rx_queue->avail_descs = rc < 0 ? 0 : rc; + } + + if (unlikely(!rx_queue->avail_descs)) { + break; + } + + nb_descs = RTE_MIN(room, rx_queue->avail_descs); + + first_idesc = rx_queue->next_desc; + last_idesc = first_idesc + nb_descs; + + rx_queue->next_desc += nb_descs; + rx_queue->avail_descs -= nb_descs; + + for (i = 1; i < look_ahead; i++) { + rte_prefetch0(first_idesc + i); + } + + PMD_DEBUG_RX("%s:%d: Trying to receive %d packets\n", + mpipe_name(rx_queue->q.priv), + rx_queue->q.queue_idx, + nb_descs); + + for (idesc = first_idesc; idesc < last_idesc; idesc++) { + struct rte_mbuf *mbuf; + + PMD_DEBUG_RX("%s:%d: processing idesc %d/%d\n", + mpipe_name(priv), + rx_queue->q.queue_idx, + nb_packets, nb_descs); + + rte_prefetch0(idesc + look_ahead); + + PMD_DEBUG_RX("%s:%d: idesc %p, %s%s%s%s%s%s%s%s%s%s" + "size: %d, bkt: %d, chan: %d, ring: %d, sqn: %lu, va: %lu\n", + mpipe_name(priv), + rx_queue->q.queue_idx, + idesc, + idesc->me ? "me, " : "", + idesc->tr ? "tr, " : "", + idesc->ce ? "ce, " : "", + idesc->ct ? "ct, " : "", + idesc->cs ? "cs, " : "", + idesc->nr ? "nr, " : "", + idesc->sq ? "sq, " : "", + idesc->ts ? "ts, " : "", + idesc->ps ? "ps, " : "", + idesc->be ? "be, " : "", + idesc->l2_size, + idesc->bucket_id, + idesc->channel, + idesc->notif_ring, + (unsigned long)idesc->packet_sqn, + (unsigned long)idesc->va); + + if (unlikely(gxio_mpipe_idesc_has_error(idesc))) { + nb_dropped++; + gxio_mpipe_iqueue_drop(iqueue, idesc); + PMD_DEBUG_RX("%s:%d: Descriptor error\n", + mpipe_name(rx_queue->q.priv), + rx_queue->q.queue_idx); + continue; + } + + if (mpipe_local.mbuf_push_debt[in_port] < + MPIPE_BUF_DEBT_THRESHOLD) + mpipe_local.mbuf_push_debt[in_port]++; + else { + mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool); + if (unlikely(!mbuf)) { + nb_nomem++; + gxio_mpipe_iqueue_drop(iqueue, idesc); + PMD_DEBUG_RX("%s:%d: alloc failure\n", + mpipe_name(rx_queue->q.priv), + rx_queue->q.queue_idx); + continue; + } + + mpipe_recv_push(priv, mbuf); + } + + /* Get and setup the mbuf for the received packet. */ + mbuf = mpipe_recv_mbuf(priv, idesc, in_port); + + /* Update results and statistics counters. */ + rx_pkts[nb_packets] = mbuf; + nb_bytes += mbuf->pkt_len; + nb_packets++; + } + + /* + * We release the ring in bursts, but do not track and release + * buckets. This therefore breaks dynamic flow affinity, but + * we always operate in static affinity mode, and so we're OK + * with this optimization. + */ + gxio_mpipe_iqueue_advance(iqueue, nb_descs); + gxio_mpipe_credit(iqueue->context, iqueue->ring, -1, nb_descs); + + /* + * Go around once more if we haven't yet peeked the queue, and + * if we have more room to receive. + */ + room = nb_pkts - nb_packets; + } + + rx_queue->q.stats.packets += nb_packets; + rx_queue->q.stats.bytes += nb_bytes; + rx_queue->q.stats.errors += nb_dropped; + rx_queue->q.stats.nomem += nb_nomem; + + PMD_DEBUG_RX("%s:%d: RX: %d/%d pkts/bytes, %d/%d drops/nomem\n", + mpipe_name(rx_queue->q.priv), rx_queue->q.queue_idx, + nb_packets, nb_bytes, nb_dropped, nb_nomem); + + return nb_packets; +} + +static uint16_t +mpipe_recv_pkts(void *_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct mpipe_rx_queue *rx_queue = _rxq; + uint16_t result = 0; + + if (rx_queue) { + mpipe_dp_enter(rx_queue->q.priv); + if (likely(rx_queue->q.link_status)) + result = mpipe_do_recv(rx_queue, rx_pkts, nb_pkts); + mpipe_dp_exit(rx_queue->q.priv); + } + + return result; +} + +static uint16_t +mpipe_xmit_pkts(void *_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct mpipe_tx_queue *tx_queue = _txq; + uint16_t result = 0; + + if (tx_queue) { + mpipe_dp_enter(tx_queue->q.priv); + if (likely(tx_queue->q.link_status)) + result = mpipe_do_xmit(tx_queue, tx_pkts, nb_pkts); + mpipe_dp_exit(tx_queue->q.priv); + } + + return result; +} + +static int +mpipe_link_mac(const char *ifname, uint8_t *mac) +{ + int rc, idx; + char name[GXIO_MPIPE_LINK_NAME_LEN]; + + for (idx = 0, rc = 0; !rc; idx++) { + rc = gxio_mpipe_link_enumerate_mac(idx, name, mac); + if (!rc && !strncmp(name, ifname, GXIO_MPIPE_LINK_NAME_LEN)) + return 0; + } + return -ENODEV; +} + +static int +rte_pmd_mpipe_devinit(const char *ifname, + const char *params __rte_unused) +{ + gxio_mpipe_context_t *context; + struct rte_eth_dev *eth_dev; + struct mpipe_dev_priv *priv; + int instance, rc; + uint8_t *mac; + + /* Get the mPIPE instance that the device belongs to. */ + instance = gxio_mpipe_link_instance(ifname); + context = mpipe_context(instance); + if (!context) { + RTE_LOG(ERR, PMD, "%s: No device for link.\n", ifname); + return -ENODEV; + } + + priv = rte_zmalloc(NULL, sizeof(*priv), 0); + if (!priv) { + RTE_LOG(ERR, PMD, "%s: Failed to allocate priv.\n", ifname); + return -ENOMEM; + } + + memset(&priv->tx_stat_mapping, 0xff, sizeof(priv->tx_stat_mapping)); + memset(&priv->rx_stat_mapping, 0xff, sizeof(priv->rx_stat_mapping)); + priv->context = context; + priv->instance = instance; + priv->is_xaui = (strncmp(ifname, "xgbe", 4) == 0); + priv->channel = -1; + + mac = priv->mac_addr.addr_bytes; + rc = mpipe_link_mac(ifname, mac); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to enumerate link.\n", ifname); + rte_free(priv); + return -ENODEV; + } + + eth_dev = rte_eth_dev_allocate(ifname, RTE_ETH_DEV_VIRTUAL); + if (!eth_dev) { + RTE_LOG(ERR, PMD, "%s: Failed to allocate device.\n", ifname); + rte_free(priv); + return -ENOMEM; + } + + RTE_LOG(INFO, PMD, "%s: Initialized mpipe device" + "(mac %02x:%02x:%02x:%02x:%02x:%02x).\n", + ifname, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + + priv->eth_dev = eth_dev; + priv->port_id = eth_dev->data->port_id; + eth_dev->data->dev_private = priv; + eth_dev->data->mac_addrs = &priv->mac_addr; + + eth_dev->data->dev_flags = 0; + eth_dev->data->kdrv = RTE_KDRV_NONE; + eth_dev->driver = NULL; + eth_dev->data->drv_name = drivername; + eth_dev->data->numa_node = instance; + + eth_dev->dev_ops = &mpipe_dev_ops; + eth_dev->rx_pkt_burst = &mpipe_recv_pkts; + eth_dev->tx_pkt_burst = &mpipe_xmit_pkts; + + rc = mpipe_link_init(priv); + if (rc < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to init link.\n", + mpipe_name(priv)); + return rc; + } + + return 0; +} + +static struct rte_driver pmd_mpipe_xgbe_drv = { + .name = "xgbe", + .type = PMD_VDEV, + .init = rte_pmd_mpipe_devinit, +}; + +static struct rte_driver pmd_mpipe_gbe_drv = { + .name = "gbe", + .type = PMD_VDEV, + .init = rte_pmd_mpipe_devinit, +}; + +PMD_REGISTER_DRIVER(pmd_mpipe_xgbe_drv); +PMD_REGISTER_DRIVER(pmd_mpipe_gbe_drv); + +static void __attribute__((constructor, used)) +mpipe_init_contexts(void) +{ + struct mpipe_context *context; + int rc, instance; + + for (instance = 0; instance < GXIO_MPIPE_INSTANCE_MAX; instance++) { + context = &mpipe_contexts[instance]; + + rte_spinlock_init(&context->lock); + rc = gxio_mpipe_init(&context->context, instance); + if (rc < 0) + break; + } + + mpipe_instances = instance; +} diff --git a/drivers/net/mpipe/rte_pmd_mpipe_version.map b/drivers/net/mpipe/rte_pmd_mpipe_version.map new file mode 100644 index 00000000..ad607bbe --- /dev/null +++ b/drivers/net/mpipe/rte_pmd_mpipe_version.map @@ -0,0 +1,3 @@ +DPDK_2.2 { + local: *; +}; diff --git a/drivers/net/nfp/Makefile b/drivers/net/nfp/Makefile new file mode 100644 index 00000000..1dddd1fd --- /dev/null +++ b/drivers/net/nfp/Makefile @@ -0,0 +1,58 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_nfp.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +LDLIBS += -lm + +EXPORT_MAP := rte_pmd_nfp_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_net.c + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_net lib/librte_malloc + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c new file mode 100644 index 00000000..bcf5fa99 --- /dev/null +++ b/drivers/net/nfp/nfp_net.c @@ -0,0 +1,2510 @@ +/* + * Copyright (c) 2014, 2015 Netronome Systems, Inc. + * All rights reserved. + * + * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * vim:shiftwidth=8:noexpandtab + * + * @file dpdk/pmd/nfp_net.c + * + * Netronome vNIC DPDK Poll-Mode Driver: Main entry point + */ + +#include <math.h> + +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_ethdev.h> +#include <rte_dev.h> +#include <rte_ether.h> +#include <rte_malloc.h> +#include <rte_memzone.h> +#include <rte_mempool.h> +#include <rte_version.h> +#include <rte_string_fns.h> +#include <rte_alarm.h> + +#include "nfp_net_pmd.h" +#include "nfp_net_logs.h" +#include "nfp_net_ctrl.h" + +/* Prototypes */ +static void nfp_net_close(struct rte_eth_dev *dev); +static int nfp_net_configure(struct rte_eth_dev *dev); +static void nfp_net_dev_interrupt_handler(struct rte_intr_handle *handle, + void *param); +static void nfp_net_dev_interrupt_delayed_handler(void *param); +static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +static void nfp_net_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int nfp_net_init(struct rte_eth_dev *eth_dev); +static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete); +static void nfp_net_promisc_enable(struct rte_eth_dev *dev); +static void nfp_net_promisc_disable(struct rte_eth_dev *dev); +static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq); +static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev, + uint16_t queue_idx); +static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +static void nfp_net_rx_queue_release(void *rxq); +static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq); +static void nfp_net_tx_queue_release(void *txq); +static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +static int nfp_net_start(struct rte_eth_dev *dev); +static void nfp_net_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static void nfp_net_stats_reset(struct rte_eth_dev *dev); +static void nfp_net_stop(struct rte_eth_dev *dev); +static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +/* + * The offset of the queue controller queues in the PCIe Target. These + * happen to be at the same offset on the NFP6000 and the NFP3200 so + * we use a single macro here. + */ +#define NFP_PCIE_QUEUE(_q) (0x80000 + (0x800 * ((_q) & 0xff))) + +/* Maximum value which can be added to a queue with one transaction */ +#define NFP_QCP_MAX_ADD 0x7f + +#define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \ + (uint64_t)((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) + +/* nfp_qcp_ptr - Read or Write Pointer of a queue */ +enum nfp_qcp_ptr { + NFP_QCP_READ_PTR = 0, + NFP_QCP_WRITE_PTR +}; + +/* + * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue + * @q: Base address for queue structure + * @ptr: Add to the Read or Write pointer + * @val: Value to add to the queue pointer + * + * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed. + */ +static inline void +nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val) +{ + uint32_t off; + + if (ptr == NFP_QCP_READ_PTR) + off = NFP_QCP_QUEUE_ADD_RPTR; + else + off = NFP_QCP_QUEUE_ADD_WPTR; + + while (val > NFP_QCP_MAX_ADD) { + nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off); + val -= NFP_QCP_MAX_ADD; + } + + nn_writel(rte_cpu_to_le_32(val), q + off); +} + +/* + * nfp_qcp_read - Read the current Read/Write pointer value for a queue + * @q: Base address for queue structure + * @ptr: Read or Write pointer + */ +static inline uint32_t +nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr) +{ + uint32_t off; + uint32_t val; + + if (ptr == NFP_QCP_READ_PTR) + off = NFP_QCP_QUEUE_STS_LO; + else + off = NFP_QCP_QUEUE_STS_HI; + + val = rte_cpu_to_le_32(nn_readl(q + off)); + + if (ptr == NFP_QCP_READ_PTR) + return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask; + else + return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask; +} + +/* + * Functions to read/write from/to Config BAR + * Performs any endian conversion necessary. + */ +static inline uint8_t +nn_cfg_readb(struct nfp_net_hw *hw, int off) +{ + return nn_readb(hw->ctrl_bar + off); +} + +static inline void +nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val) +{ + nn_writeb(val, hw->ctrl_bar + off); +} + +static inline uint32_t +nn_cfg_readl(struct nfp_net_hw *hw, int off) +{ + return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off)); +} + +static inline void +nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val) +{ + nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off); +} + +static inline uint64_t +nn_cfg_readq(struct nfp_net_hw *hw, int off) +{ + return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off)); +} + +static inline void +nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val) +{ + nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off); +} + +/* Creating memzone for hardware rings. */ +static const struct rte_memzone * +ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, + uint16_t queue_id, uint32_t ring_size, int socket_id) +{ + char z_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + + snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + dev->driver->pci_drv.name, + ring_name, dev->data->port_id, queue_id); + + mz = rte_memzone_lookup(z_name); + if (mz) + return mz; + + return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, + NFP_MEMZONE_ALIGN); +} + +/* + * Atomically reads link status information from global structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +nfp_net_dev_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &dev->data->dev_link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/* + * Atomically writes the link status information into global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +nfp_net_dev_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &dev->data->dev_link; + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static void +nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq) +{ + unsigned i; + + if (rxq->rxbufs == NULL) + return; + + for (i = 0; i < rxq->rx_count; i++) { + if (rxq->rxbufs[i].mbuf) { + rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf); + rxq->rxbufs[i].mbuf = NULL; + } + } +} + +static void +nfp_net_rx_queue_release(void *rx_queue) +{ + struct nfp_net_rxq *rxq = rx_queue; + + if (rxq) { + nfp_net_rx_queue_release_mbufs(rxq); + rte_free(rxq->rxbufs); + rte_free(rxq); + } +} + +static void +nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq) +{ + nfp_net_rx_queue_release_mbufs(rxq); + rxq->wr_p = 0; + rxq->rd_p = 0; + rxq->nb_rx_hold = 0; +} + +static void +nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq) +{ + unsigned i; + + if (txq->txbufs == NULL) + return; + + for (i = 0; i < txq->tx_count; i++) { + if (txq->txbufs[i].mbuf) { + rte_pktmbuf_free_seg(txq->txbufs[i].mbuf); + txq->txbufs[i].mbuf = NULL; + } + } +} + +static void +nfp_net_tx_queue_release(void *tx_queue) +{ + struct nfp_net_txq *txq = tx_queue; + + if (txq) { + nfp_net_tx_queue_release_mbufs(txq); + rte_free(txq->txbufs); + rte_free(txq); + } +} + +static void +nfp_net_reset_tx_queue(struct nfp_net_txq *txq) +{ + nfp_net_tx_queue_release_mbufs(txq); + txq->wr_p = 0; + txq->rd_p = 0; + txq->tail = 0; + txq->qcp_rd_p = 0; +} + +static int +__nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) +{ + int cnt; + uint32_t new; + struct timespec wait; + + PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...\n", + hw->qcp_cfg); + + if (hw->qcp_cfg == NULL) + rte_panic("Bad configuration queue pointer\n"); + + nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1); + + wait.tv_sec = 0; + wait.tv_nsec = 1000000; + + PMD_DRV_LOG(DEBUG, "Polling for update ack...\n"); + + /* Poll update field, waiting for NFP to ack the config */ + for (cnt = 0; ; cnt++) { + new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE); + if (new == 0) + break; + if (new & NFP_NET_CFG_UPDATE_ERR) { + PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x\n", new); + return -1; + } + if (cnt >= NFP_NET_POLL_TIMEOUT) { + PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after" + " %dms\n", update, cnt); + rte_panic("Exiting\n"); + } + nanosleep(&wait, 0); /* waiting for a 1ms */ + } + PMD_DRV_LOG(DEBUG, "Ack DONE\n"); + return 0; +} + +/* + * Reconfigure the NIC + * @nn: device to reconfigure + * @ctrl: The value for the ctrl field in the BAR config + * @update: The value for the update field in the BAR config + * + * Write the update word to the BAR and ping the reconfig queue. Then poll + * until the firmware has acknowledged the update by zeroing the update word. + */ +static int +nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) +{ + uint32_t err; + + PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n", + ctrl, update); + + nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl); + nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update); + + rte_wmb(); + + err = __nfp_net_reconfig(hw, update); + + if (!err) + return 0; + + /* + * Reconfig errors imply situations where they can be handled. + * Otherwise, rte_panic is called inside __nfp_net_reconfig + */ + PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x\n", + ctrl, update); + return -EIO; +} + +/* + * Configure an Ethernet device. This function must be invoked first + * before any other function in the Ethernet API. This function can + * also be re-invoked when a device is in the stopped state. + */ +static int +nfp_net_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *dev_conf; + struct rte_eth_rxmode *rxmode; + struct rte_eth_txmode *txmode; + uint32_t new_ctrl = 0; + uint32_t update = 0; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * A DPDK app sends info about how many queues to use and how + * those queues need to be configured. This is used by the + * DPDK core and it makes sure no more queues than those + * advertised by the driver are requested. This function is + * called after that internal process + */ + + PMD_INIT_LOG(DEBUG, "Configure\n"); + + dev_conf = &dev->data->dev_conf; + rxmode = &dev_conf->rxmode; + txmode = &dev_conf->txmode; + + /* Checking TX mode */ + if (txmode->mq_mode) { + PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported\n"); + return -EINVAL; + } + + /* Checking RX mode */ + if (rxmode->mq_mode & ETH_MQ_RX_RSS) { + if (hw->cap & NFP_NET_CFG_CTRL_RSS) { + update = NFP_NET_CFG_UPDATE_RSS; + new_ctrl = NFP_NET_CFG_CTRL_RSS; + } else { + PMD_INIT_LOG(INFO, "RSS not supported\n"); + return -EINVAL; + } + } + + if (rxmode->split_hdr_size) { + PMD_INIT_LOG(INFO, "rxmode does not support split header\n"); + return -EINVAL; + } + + if (rxmode->hw_ip_checksum) { + if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) { + new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM; + } else { + PMD_INIT_LOG(INFO, "RXCSUM not supported\n"); + return -EINVAL; + } + } + + if (rxmode->hw_vlan_filter) { + PMD_INIT_LOG(INFO, "VLAN filter not supported\n"); + return -EINVAL; + } + + if (rxmode->hw_vlan_strip) { + if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) { + new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN; + } else { + PMD_INIT_LOG(INFO, "hw vlan strip not supported\n"); + return -EINVAL; + } + } + + if (rxmode->hw_vlan_extend) { + PMD_INIT_LOG(INFO, "VLAN extended not supported\n"); + return -EINVAL; + } + + /* Supporting VLAN insertion by default */ + if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) + new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN; + + if (rxmode->jumbo_frame) + /* this is handled in rte_eth_dev_configure */ + + if (rxmode->hw_strip_crc) { + PMD_INIT_LOG(INFO, "strip CRC not supported\n"); + return -EINVAL; + } + + if (rxmode->enable_scatter) { + PMD_INIT_LOG(INFO, "Scatter not supported\n"); + return -EINVAL; + } + + if (!new_ctrl) + return 0; + + update |= NFP_NET_CFG_UPDATE_GEN; + + nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl); + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return -EIO; + + hw->ctrl = new_ctrl; + + return 0; +} + +static void +nfp_net_enable_queues(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + uint64_t enabled_queues = 0; + int i; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Enabling the required TX queues in the device */ + for (i = 0; i < dev->data->nb_tx_queues; i++) + enabled_queues |= (1 << i); + + nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues); + + enabled_queues = 0; + + /* Enabling the required RX queues in the device */ + for (i = 0; i < dev->data->nb_rx_queues; i++) + enabled_queues |= (1 << i); + + nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues); +} + +static void +nfp_net_disable_queues(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + uint32_t new_ctrl, update = 0; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0); + nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0); + + new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE; + update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING | + NFP_NET_CFG_UPDATE_MSIX; + + if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) + new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG; + + /* If an error when reconfig we avoid to change hw state */ + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return; + + hw->ctrl = new_ctrl; +} + +static int +nfp_net_rx_freelist_setup(struct rte_eth_dev *dev) +{ + int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0) + return -1; + } + return 0; +} + +static void +nfp_net_params_setup(struct nfp_net_hw *hw) +{ + uint32_t *mac_address; + + nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu); + nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz); + + /* A MAC address is 8 bytes long */ + mac_address = (uint32_t *)(hw->mac_addr); + + nn_cfg_writel(hw, NFP_NET_CFG_MACADDR, + rte_cpu_to_be_32(*mac_address)); + nn_cfg_writel(hw, NFP_NET_CFG_MACADDR + 4, + rte_cpu_to_be_32(*(mac_address + 4))); +} + +static void +nfp_net_cfg_queue_setup(struct nfp_net_hw *hw) +{ + hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; +} + +static int +nfp_net_start(struct rte_eth_dev *dev) +{ + uint32_t new_ctrl, update = 0; + struct nfp_net_hw *hw; + int ret; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_LOG(DEBUG, "Start\n"); + + /* Disabling queues just in case... */ + nfp_net_disable_queues(dev); + + /* Writing configuration parameters in the device */ + nfp_net_params_setup(hw); + + /* Enabling the required queues in the device */ + nfp_net_enable_queues(dev); + + /* Enable device */ + new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_UPDATE_MSIX; + update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; + + if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) + new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; + + nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl); + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return -EIO; + + /* + * Allocating rte mbuffs for configured rx queues. + * This requires queues being enabled before + */ + if (nfp_net_rx_freelist_setup(dev) < 0) { + ret = -ENOMEM; + goto error; + } + + hw->ctrl = new_ctrl; + + return 0; + +error: + /* + * An error returned by this function should mean the app + * exiting and then the system releasing all the memory + * allocated even memory coming from hugepages. + * + * The device could be enabled at this point with some queues + * ready for getting packets. This is true if the call to + * nfp_net_rx_freelist_setup() succeeds for some queues but + * fails for subsequent queues. + * + * This should make the app exiting but better if we tell the + * device first. + */ + nfp_net_disable_queues(dev); + + return ret; +} + +/* Stop device: disable rx and tx functions to allow for reconfiguring. */ +static void +nfp_net_stop(struct rte_eth_dev *dev) +{ + int i; + + PMD_INIT_LOG(DEBUG, "Stop\n"); + + nfp_net_disable_queues(dev); + + /* Clear queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + nfp_net_reset_tx_queue( + (struct nfp_net_txq *)dev->data->tx_queues[i]); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + nfp_net_reset_rx_queue( + (struct nfp_net_rxq *)dev->data->rx_queues[i]); + } +} + +/* Reset and stop device. The device can not be restarted. */ +static void +nfp_net_close(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + + PMD_INIT_LOG(DEBUG, "Close\n"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * We assume that the DPDK application is stopping all the + * threads/queues before calling the device close function. + */ + + nfp_net_stop(dev); + + rte_intr_disable(&dev->pci_dev->intr_handle); + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); + + /* + * The ixgbe PMD driver disables the pcie master on the + * device. The i40e does not... + */ +} + +static void +nfp_net_promisc_enable(struct rte_eth_dev *dev) +{ + uint32_t new_ctrl, update = 0; + struct nfp_net_hw *hw; + + PMD_DRV_LOG(DEBUG, "Promiscuous mode enable\n"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) { + PMD_INIT_LOG(INFO, "Promiscuous mode not supported\n"); + return; + } + + if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) { + PMD_DRV_LOG(INFO, "Promiscuous mode already enabled\n"); + return; + } + + new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC; + update = NFP_NET_CFG_UPDATE_GEN; + + /* + * DPDK sets promiscuous mode on just after this call assuming + * it can not fail ... + */ + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return; + + hw->ctrl = new_ctrl; +} + +static void +nfp_net_promisc_disable(struct rte_eth_dev *dev) +{ + uint32_t new_ctrl, update = 0; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) { + PMD_DRV_LOG(INFO, "Promiscuous mode already disabled\n"); + return; + } + + new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC; + update = NFP_NET_CFG_UPDATE_GEN; + + /* + * DPDK sets promiscuous mode off just before this call + * assuming it can not fail ... + */ + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return; + + hw->ctrl = new_ctrl; +} + +/* + * return 0 means link status changed, -1 means not changed + * + * Wait to complete is needed as it can take up to 9 seconds to get the Link + * status. + */ +static int +nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) +{ + struct nfp_net_hw *hw; + struct rte_eth_link link, old; + uint32_t nn_link_status; + + PMD_DRV_LOG(DEBUG, "Link update\n"); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + memset(&old, 0, sizeof(old)); + nfp_net_dev_atomic_read_link_status(dev, &old); + + nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS); + + memset(&link, 0, sizeof(struct rte_eth_link)); + + if (nn_link_status & NFP_NET_CFG_STS_LINK) + link.link_status = ETH_LINK_UP; + + link.link_duplex = ETH_LINK_FULL_DUPLEX; + /* Other cards can limit the tx and rx rate per VF */ + link.link_speed = ETH_SPEED_NUM_40G; + + if (old.link_status != link.link_status) { + nfp_net_dev_atomic_write_link_status(dev, &link); + if (link.link_status) + PMD_DRV_LOG(INFO, "NIC Link is Up\n"); + else + PMD_DRV_LOG(INFO, "NIC Link is Down\n"); + return 0; + } + + return -1; +} + +static void +nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + int i; + struct nfp_net_hw *hw; + struct rte_eth_stats nfp_dev_stats; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */ + + /* reading per RX ring stats */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) + break; + + nfp_dev_stats.q_ipackets[i] = + nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i)); + + nfp_dev_stats.q_ipackets[i] -= + hw->eth_stats_base.q_ipackets[i]; + + nfp_dev_stats.q_ibytes[i] = + nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8); + + nfp_dev_stats.q_ibytes[i] -= + hw->eth_stats_base.q_ibytes[i]; + } + + /* reading per TX ring stats */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) + break; + + nfp_dev_stats.q_opackets[i] = + nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i)); + + nfp_dev_stats.q_opackets[i] -= + hw->eth_stats_base.q_opackets[i]; + + nfp_dev_stats.q_obytes[i] = + nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8); + + nfp_dev_stats.q_obytes[i] -= + hw->eth_stats_base.q_obytes[i]; + } + + nfp_dev_stats.ipackets = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES); + + nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets; + + nfp_dev_stats.ibytes = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS); + + nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes; + + nfp_dev_stats.opackets = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES); + + nfp_dev_stats.opackets -= hw->eth_stats_base.opackets; + + nfp_dev_stats.obytes = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS); + + nfp_dev_stats.obytes -= hw->eth_stats_base.obytes; + + nfp_dev_stats.imcasts = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); + + nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts; + + /* reading general device stats */ + nfp_dev_stats.ierrors = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); + + nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors; + + nfp_dev_stats.oerrors = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS); + + nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors; + + /* Multicast frames received */ + nfp_dev_stats.imcasts = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); + + nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts; + + /* RX ring mbuf allocation failures */ + nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed; + + nfp_dev_stats.imissed = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS); + + nfp_dev_stats.imissed -= hw->eth_stats_base.imissed; + + if (stats) + memcpy(stats, &nfp_dev_stats, sizeof(*stats)); +} + +static void +nfp_net_stats_reset(struct rte_eth_dev *dev) +{ + int i; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * hw->eth_stats_base records the per counter starting point. + * Lets update it now + */ + + /* reading per RX ring stats */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) + break; + + hw->eth_stats_base.q_ipackets[i] = + nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i)); + + hw->eth_stats_base.q_ibytes[i] = + nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8); + } + + /* reading per TX ring stats */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) + break; + + hw->eth_stats_base.q_opackets[i] = + nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i)); + + hw->eth_stats_base.q_obytes[i] = + nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8); + } + + hw->eth_stats_base.ipackets = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES); + + hw->eth_stats_base.ibytes = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS); + + hw->eth_stats_base.opackets = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES); + + hw->eth_stats_base.obytes = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS); + + hw->eth_stats_base.imcasts = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); + + /* reading general device stats */ + hw->eth_stats_base.ierrors = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); + + hw->eth_stats_base.oerrors = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS); + + /* Multicast frames received */ + hw->eth_stats_base.imcasts = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); + + /* RX ring mbuf allocation failures */ + dev->data->rx_mbuf_alloc_failed = 0; + + hw->eth_stats_base.imissed = + nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS); +} + +static void +nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->driver_name = dev->driver->pci_drv.name; + dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; + dev_info->min_rx_bufsize = ETHER_MIN_MTU; + dev_info->max_rx_pktlen = hw->mtu; + /* Next should change when PF support is implemented */ + dev_info->max_mac_addrs = 1; + + if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; + + if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + + if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; + + if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = DEFAULT_RX_PTHRESH, + .hthresh = DEFAULT_RX_HTHRESH, + .wthresh = DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = DEFAULT_TX_PTHRESH, + .hthresh = DEFAULT_TX_HTHRESH, + .wthresh = DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH, + .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOOFFLOADS, + }; + + dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ; + dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ; + + dev_info->speed_capa = ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G; +} + +static const uint32_t * +nfp_net_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* refers to nfp_net_set_hash() */ + RTE_PTYPE_INNER_L3_IPV4, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV6_EXT, + RTE_PTYPE_INNER_L4_MASK, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == nfp_net_recv_pkts) + return ptypes; + return NULL; +} + +static uint32_t +nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx) +{ + struct nfp_net_rxq *rxq; + struct nfp_net_rx_desc *rxds; + uint32_t idx; + uint32_t count; + + rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx]; + + if (rxq == NULL) { + PMD_INIT_LOG(ERR, "Bad queue: %u\n", queue_idx); + return 0; + } + + idx = rxq->rd_p % rxq->rx_count; + rxds = &rxq->rxds[idx]; + + count = 0; + + /* + * Other PMDs are just checking the DD bit in intervals of 4 + * descriptors and counting all four if the first has the DD + * bit on. Of course, this is not accurate but can be good for + * perfomance. But ideally that should be done in descriptors + * chunks belonging to the same cache line + */ + + while (count < rxq->rx_count) { + rxds = &rxq->rxds[idx]; + if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) + break; + + count++; + idx++; + + /* Wrapping? */ + if ((idx) == rxq->rx_count) + idx = 0; + } + + return count; +} + +static void +nfp_net_dev_link_status_print(struct rte_eth_dev *dev) +{ + struct rte_eth_link link; + + memset(&link, 0, sizeof(link)); + nfp_net_dev_atomic_read_link_status(dev, &link); + if (link.link_status) + RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n", + (int)(dev->data->port_id), (unsigned)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX + ? "full-duplex" : "half-duplex"); + else + RTE_LOG(INFO, PMD, " Port %d: Link Down\n", + (int)(dev->data->port_id)); + + RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n", + dev->pci_dev->addr.domain, dev->pci_dev->addr.bus, + dev->pci_dev->addr.devid, dev->pci_dev->addr.function); +} + +/* Interrupt configuration and handling */ + +/* + * nfp_net_irq_unmask - Unmask an interrupt + * + * If MSI-X auto-masking is enabled clear the mask bit, otherwise + * clear the ICR for the entry. + */ +static void +nfp_net_irq_unmask(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) { + /* If MSI-X auto-masking is used, clear the entry */ + rte_wmb(); + rte_intr_enable(&dev->pci_dev->intr_handle); + } else { + /* Make sure all updates are written before un-masking */ + rte_wmb(); + nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX), + NFP_NET_CFG_ICR_UNMASKED); + } +} + +static void +nfp_net_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) +{ + int64_t timeout; + struct rte_eth_link link; + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n"); + + /* get the link status */ + memset(&link, 0, sizeof(link)); + nfp_net_dev_atomic_read_link_status(dev, &link); + + nfp_net_link_update(dev, 0); + + /* likely to up */ + if (!link.link_status) { + /* handle it 1 sec later, wait it being stable */ + timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT; + /* likely to down */ + } else { + /* handle it 4 sec later, wait it being stable */ + timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT; + } + + if (rte_eal_alarm_set(timeout * 1000, + nfp_net_dev_interrupt_delayed_handler, + (void *)dev) < 0) { + RTE_LOG(ERR, PMD, "Error setting alarm"); + /* Unmasking */ + nfp_net_irq_unmask(dev); + } +} + +/* + * Interrupt handler which shall be registered for alarm callback for delayed + * handling specific interrupt to wait for the stable nic state. As the NIC + * interrupt state is not stable for nfp after link is just down, it needs + * to wait 4 seconds to get the stable status. + * + * @param handle Pointer to interrupt handle. + * @param param The address of parameter (struct rte_eth_dev *) + * + * @return void + */ +static void +nfp_net_dev_interrupt_delayed_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + nfp_net_link_update(dev, 0); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); + + nfp_net_dev_link_status_print(dev); + + /* Unmasking */ + nfp_net_irq_unmask(dev); +} + +static int +nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* check that mtu is within the allowed range */ + if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu)) + return -EINVAL; + + /* switch to jumbo mode if needed */ + if ((uint32_t)mtu > ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.jumbo_frame = 1; + else + dev->data->dev_conf.rxmode.jumbo_frame = 0; + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu; + + /* writing to configuration space */ + nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu); + + hw->mtu = mtu; + + return 0; +} + +static int +nfp_net_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + const struct rte_memzone *tz; + struct nfp_net_rxq *rxq; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* Validating number of descriptors */ + if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 || + (nb_desc > NFP_NET_MAX_RX_DESC) || + (nb_desc < NFP_NET_MIN_RX_DESC)) { + RTE_LOG(ERR, PMD, "Wrong nb_desc value\n"); + return -EINVAL; + } + + /* + * Free memory prior to re-allocation if needed. This is the case after + * calling nfp_net_stop + */ + if (dev->data->rx_queues[queue_idx]) { + nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* Allocating rx queue data structure */ + rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq == NULL) + return -ENOMEM; + + /* Hw queues mapping based on firmware confifguration */ + rxq->qidx = queue_idx; + rxq->fl_qcidx = queue_idx * hw->stride_rx; + rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1); + rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx); + rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx); + + /* + * Tracking mbuf size for detecting a potential mbuf overflow due to + * RX offset + */ + rxq->mem_pool = mp; + rxq->mbuf_size = rxq->mem_pool->elt_size; + rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM); + hw->flbufsz = rxq->mbuf_size; + + rxq->rx_count = nb_desc; + rxq->port_id = dev->data->port_id; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 + : ETHER_CRC_LEN); + rxq->drop_en = rx_conf->rx_drop_en; + + /* + * Allocate RX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + tz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, + sizeof(struct nfp_net_rx_desc) * + NFP_NET_MAX_RX_DESC, socket_id); + + if (tz == NULL) { + RTE_LOG(ERR, PMD, "Error allocatig rx dma\n"); + nfp_net_rx_queue_release(rxq); + return -ENOMEM; + } + + /* Saving physical and virtual addresses for the RX ring */ + rxq->dma = (uint64_t)tz->phys_addr; + rxq->rxds = (struct nfp_net_rx_desc *)tz->addr; + + /* mbuf pointers array for referencing mbufs linked to RX descriptors */ + rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs", + sizeof(*rxq->rxbufs) * nb_desc, + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq->rxbufs == NULL) { + nfp_net_rx_queue_release(rxq); + return -ENOMEM; + } + + PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n", + rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma); + + nfp_net_reset_rx_queue(rxq); + + dev->data->rx_queues[queue_idx] = rxq; + rxq->hw = hw; + + /* + * Telling the HW about the physical address of the RX ring and number + * of descriptors in log2 format + */ + nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma); + nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), log2(nb_desc)); + + return 0; +} + +static int +nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) +{ + struct nfp_net_rx_buff *rxe = rxq->rxbufs; + uint64_t dma_addr; + unsigned i; + + PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors\n", + rxq->rx_count); + + for (i = 0; i < rxq->rx_count; i++) { + struct nfp_net_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool); + + if (mbuf == NULL) { + RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n", + (unsigned)rxq->qidx); + return -ENOMEM; + } + + dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf)); + + rxd = &rxq->rxds[i]; + rxd->fld.dd = 0; + rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff; + rxd->fld.dma_addr_lo = dma_addr & 0xffffffff; + rxe[i].mbuf = mbuf; + PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr); + + rxq->wr_p++; + } + + /* Make sure all writes are flushed before telling the hardware */ + rte_wmb(); + + /* Not advertising the whole ring as the firmware gets confused if so */ + PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u\n", + rxq->rx_count - 1); + + nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1); + + return 0; +} + +static int +nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + const struct rte_memzone *tz; + struct nfp_net_txq *txq; + uint16_t tx_free_thresh; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* Validating number of descriptors */ + if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 || + (nb_desc > NFP_NET_MAX_TX_DESC) || + (nb_desc < NFP_NET_MIN_TX_DESC)) { + RTE_LOG(ERR, PMD, "Wrong nb_desc value\n"); + return -EINVAL; + } + + tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? + tx_conf->tx_free_thresh : + DEFAULT_TX_FREE_THRESH); + + if (tx_free_thresh > (nb_desc)) { + RTE_LOG(ERR, PMD, + "tx_free_thresh must be less than the number of TX " + "descriptors. (tx_free_thresh=%u port=%d " + "queue=%d)\n", (unsigned int)tx_free_thresh, + (int)dev->data->port_id, (int)queue_idx); + return -(EINVAL); + } + + /* + * Free memory prior to re-allocation if needed. This is the case after + * calling nfp_net_stop + */ + if (dev->data->tx_queues[queue_idx]) { + PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d\n", + queue_idx); + nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + /* Allocating tx queue data structure */ + txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq == NULL) { + RTE_LOG(ERR, PMD, "Error allocating tx dma\n"); + return -ENOMEM; + } + + /* + * Allocate TX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, + sizeof(struct nfp_net_tx_desc) * + NFP_NET_MAX_TX_DESC, socket_id); + if (tz == NULL) { + RTE_LOG(ERR, PMD, "Error allocating tx dma\n"); + nfp_net_tx_queue_release(txq); + return -ENOMEM; + } + + txq->tx_count = nb_desc; + txq->tail = 0; + txq->tx_free_thresh = tx_free_thresh; + txq->tx_pthresh = tx_conf->tx_thresh.pthresh; + txq->tx_hthresh = tx_conf->tx_thresh.hthresh; + txq->tx_wthresh = tx_conf->tx_thresh.wthresh; + + /* queue mapping based on firmware configuration */ + txq->qidx = queue_idx; + txq->tx_qcidx = queue_idx * hw->stride_tx; + txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx); + + txq->port_id = dev->data->port_id; + txq->txq_flags = tx_conf->txq_flags; + + /* Saving physical and virtual addresses for the TX ring */ + txq->dma = (uint64_t)tz->phys_addr; + txq->txds = (struct nfp_net_tx_desc *)tz->addr; + + /* mbuf pointers array for referencing mbufs linked to TX descriptors */ + txq->txbufs = rte_zmalloc_socket("txq->txbufs", + sizeof(*txq->txbufs) * nb_desc, + RTE_CACHE_LINE_SIZE, socket_id); + if (txq->txbufs == NULL) { + nfp_net_tx_queue_release(txq); + return -ENOMEM; + } + PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n", + txq->txbufs, txq->txds, (unsigned long int)txq->dma); + + nfp_net_reset_tx_queue(txq); + + dev->data->tx_queues[queue_idx] = txq; + txq->hw = hw; + + /* + * Telling the HW about the physical address of the TX ring and number + * of descriptors in log2 format + */ + nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma); + nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), log2(nb_desc)); + + return 0; +} + +/* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */ +static inline void +nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd, + struct rte_mbuf *mb) +{ + uint64_t ol_flags; + struct nfp_net_hw *hw = txq->hw; + + if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM)) + return; + + ol_flags = mb->ol_flags; + + /* IPv6 does not need checksum */ + if (ol_flags & PKT_TX_IP_CKSUM) + txd->flags |= PCIE_DESC_TX_IP4_CSUM; + + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + txd->flags |= PCIE_DESC_TX_UDP_CSUM; + break; + case PKT_TX_TCP_CKSUM: + txd->flags |= PCIE_DESC_TX_TCP_CSUM; + break; + } + + if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)) + txd->flags |= PCIE_DESC_TX_CSUM; +} + +/* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */ +static inline void +nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, + struct rte_mbuf *mb) +{ + struct nfp_net_hw *hw = rxq->hw; + + if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM)) + return; + + /* If IPv4 and IP checksum error, fail */ + if ((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) && + !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)) + mb->ol_flags |= PKT_RX_IP_CKSUM_BAD; + + /* If neither UDP nor TCP return */ + if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) && + !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM)) + return; + + if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) && + !(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK)) + mb->ol_flags |= PKT_RX_L4_CKSUM_BAD; + + if ((rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) && + !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK)) + mb->ol_flags |= PKT_RX_L4_CKSUM_BAD; +} + +#define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4) +#define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8) + +/* + * nfp_net_set_hash - Set mbuf hash data + * + * The RSS hash and hash-type are pre-pended to the packet data. + * Extract and decode it and set the mbuf fields. + */ +static inline void +nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, + struct rte_mbuf *mbuf) +{ + uint32_t hash; + uint32_t hash_type; + struct nfp_net_hw *hw = rxq->hw; + + if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) + return; + + if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) + return; + + hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET); + hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET); + + /* + * hash type is sharing the same word with input port info + * 31-8: input port + * 7:0: hash type + */ + hash_type &= 0xff; + mbuf->hash.rss = hash; + mbuf->ol_flags |= PKT_RX_RSS_HASH; + + switch (hash_type) { + case NFP_NET_RSS_IPV4: + mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4; + break; + case NFP_NET_RSS_IPV6: + mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6; + break; + case NFP_NET_RSS_IPV6_EX: + mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT; + break; + default: + mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK; + } +} + +/* nfp_net_check_port - Set mbuf in_port field */ +static void +nfp_net_check_port(struct nfp_net_rx_desc *rxd, struct rte_mbuf *mbuf) +{ + uint32_t port; + + if (!(rxd->rxd.flags & PCIE_DESC_RX_INGRESS_PORT)) { + mbuf->port = 0; + return; + } + + port = rte_be_to_cpu_32(*(uint32_t *)((uint8_t *)mbuf->buf_addr + + mbuf->data_off - 8)); + + /* + * hash type is sharing the same word with input port info + * 31-8: input port + * 7:0: hash type + */ + port = (uint8_t)(port >> 8); + mbuf->port = port; +} + +static inline void +nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq) +{ + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; +} + +#define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK) + +/* + * RX path design: + * + * There are some decissions to take: + * 1) How to check DD RX descriptors bit + * 2) How and when to allocate new mbufs + * + * Current implementation checks just one single DD bit each loop. As each + * descriptor is 8 bytes, it is likely a good idea to check descriptors in + * a single cache line instead. Tests with this change have not shown any + * performance improvement but it requires further investigation. For example, + * depending on which descriptor is next, the number of descriptors could be + * less than 8 for just checking those in the same cache line. This implies + * extra work which could be counterproductive by itself. Indeed, last firmware + * changes are just doing this: writing several descriptors with the DD bit + * for saving PCIe bandwidth and DMA operations from the NFP. + * + * Mbuf allocation is done when a new packet is received. Then the descriptor + * is automatically linked with the new mbuf and the old one is given to the + * user. The main drawback with this design is mbuf allocation is heavier than + * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the + * cache point of view it does not seem allocating the mbuf early on as we are + * doing now have any benefit at all. Again, tests with this change have not + * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing + * so looking at the implications of this type of allocation should be studied + * deeply + */ + +static uint16_t +nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct nfp_net_rxq *rxq; + struct nfp_net_rx_desc *rxds; + struct nfp_net_rx_buff *rxb; + struct nfp_net_hw *hw; + struct rte_mbuf *mb; + struct rte_mbuf *new_mb; + int idx; + uint16_t nb_hold; + uint64_t dma_addr; + int avail; + + rxq = rx_queue; + if (unlikely(rxq == NULL)) { + /* + * DPDK just checks the queue is lower than max queues + * enabled. But the queue needs to be configured + */ + RTE_LOG(ERR, PMD, "RX Bad queue\n"); + return -EINVAL; + } + + hw = rxq->hw; + avail = 0; + nb_hold = 0; + + while (avail < nb_pkts) { + idx = rxq->rd_p % rxq->rx_count; + + rxb = &rxq->rxbufs[idx]; + if (unlikely(rxb == NULL)) { + RTE_LOG(ERR, PMD, "rxb does not exist!\n"); + break; + } + + /* + * Memory barrier to ensure that we won't do other + * reads before the DD bit. + */ + rte_rmb(); + + rxds = &rxq->rxds[idx]; + if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) + break; + + /* + * We got a packet. Let's alloc a new mbuff for refilling the + * free descriptor ring as soon as possible + */ + new_mb = rte_pktmbuf_alloc(rxq->mem_pool); + if (unlikely(new_mb == NULL)) { + RTE_LOG(DEBUG, PMD, "RX mbuf alloc failed port_id=%u " + "queue_id=%u\n", (unsigned)rxq->port_id, + (unsigned)rxq->qidx); + nfp_net_mbuf_alloc_failed(rxq); + break; + } + + nb_hold++; + + /* + * Grab the mbuff and refill the descriptor with the + * previously allocated mbuff + */ + mb = rxb->mbuf; + rxb->mbuf = new_mb; + + PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u\n", + rxds->rxd.data_len, rxq->mbuf_size); + + /* Size of this segment */ + mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds); + /* Size of the whole packet. We just support 1 segment */ + mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds); + + if (unlikely((mb->data_len + hw->rx_offset) > + rxq->mbuf_size)) { + /* + * This should not happen and the user has the + * responsibility of avoiding it. But we have + * to give some info about the error + */ + RTE_LOG(ERR, PMD, + "mbuf overflow likely due to the RX offset.\n" + "\t\tYour mbuf size should have extra space for" + " RX offset=%u bytes.\n" + "\t\tCurrently you just have %u bytes available" + " but the received packet is %u bytes long", + hw->rx_offset, + rxq->mbuf_size - hw->rx_offset, + mb->data_len); + return -EINVAL; + } + + /* Filling the received mbuff with packet info */ + if (hw->rx_offset) + mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset; + else + mb->data_off = RTE_PKTMBUF_HEADROOM + + NFP_DESC_META_LEN(rxds); + + /* No scatter mode supported */ + mb->nb_segs = 1; + mb->next = NULL; + + /* Checking the RSS flag */ + nfp_net_set_hash(rxq, rxds, mb); + + /* Checking the checksum flag */ + nfp_net_rx_cksum(rxq, rxds, mb); + + /* Checking the port flag */ + nfp_net_check_port(rxds, mb); + + if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) && + (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) { + mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan); + mb->ol_flags |= PKT_RX_VLAN_PKT; + } + + /* Adding the mbuff to the mbuff array passed by the app */ + rx_pkts[avail++] = mb; + + /* Now resetting and updating the descriptor */ + rxds->vals[0] = 0; + rxds->vals[1] = 0; + dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb)); + rxds->fld.dd = 0; + rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff; + rxds->fld.dma_addr_lo = dma_addr & 0xffffffff; + + rxq->rd_p++; + } + + if (nb_hold == 0) + return nb_hold; + + PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n", + (unsigned)rxq->port_id, (unsigned)rxq->qidx, nb_hold); + + nb_hold += rxq->nb_rx_hold; + + /* + * FL descriptors needs to be written before incrementing the + * FL queue WR pointer + */ + rte_wmb(); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n", + (unsigned)rxq->port_id, (unsigned)rxq->qidx, + (unsigned)nb_hold, (unsigned)avail); + nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + + return avail; +} + +/* + * nfp_net_tx_free_bufs - Check for descriptors with a complete + * status + * @txq: TX queue to work with + * Returns number of descriptors freed + */ +int +nfp_net_tx_free_bufs(struct nfp_net_txq *txq) +{ + uint32_t qcp_rd_p; + int todo; + + PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete" + " status\n", txq->qidx); + + /* Work out how many packets have been sent */ + qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR); + + if (qcp_rd_p == txq->qcp_rd_p) { + PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending " + "packets (%u, %u)\n", txq->qidx, + qcp_rd_p, txq->qcp_rd_p); + return 0; + } + + if (qcp_rd_p > txq->qcp_rd_p) + todo = qcp_rd_p - txq->qcp_rd_p; + else + todo = qcp_rd_p + txq->tx_count - txq->qcp_rd_p; + + PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->qcp_rd_p: %u, qcp->rd_p: %u\n", + qcp_rd_p, txq->qcp_rd_p, txq->rd_p); + + if (todo == 0) + return todo; + + txq->qcp_rd_p += todo; + txq->qcp_rd_p %= txq->tx_count; + txq->rd_p += todo; + + return todo; +} + +/* Leaving always free descriptors for avoiding wrapping confusion */ +#define NFP_FREE_TX_DESC(t) (t->tx_count - (t->wr_p - t->rd_p) - 8) + +/* + * nfp_net_txq_full - Check if the TX queue free descriptors + * is below tx_free_threshold + * + * @txq: TX queue to check + * + * This function uses the host copy* of read/write pointers + */ +static inline +int nfp_net_txq_full(struct nfp_net_txq *txq) +{ + return NFP_FREE_TX_DESC(txq) < txq->tx_free_thresh; +} + +static uint16_t +nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct nfp_net_txq *txq; + struct nfp_net_hw *hw; + struct nfp_net_tx_desc *txds; + struct rte_mbuf *pkt; + uint64_t dma_addr; + int pkt_size, dma_size; + uint16_t free_descs, issued_descs; + struct rte_mbuf **lmbuf; + int i; + + txq = tx_queue; + hw = txq->hw; + txds = &txq->txds[txq->tail]; + + PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n", + txq->qidx, txq->tail, nb_pkts); + + if ((NFP_FREE_TX_DESC(txq) < nb_pkts) || (nfp_net_txq_full(txq))) + nfp_net_tx_free_bufs(txq); + + free_descs = (uint16_t)NFP_FREE_TX_DESC(txq); + if (unlikely(free_descs == 0)) + return 0; + + pkt = *tx_pkts; + + i = 0; + issued_descs = 0; + PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets\n", + txq->qidx, nb_pkts); + /* Sending packets */ + while ((i < nb_pkts) && free_descs) { + /* Grabbing the mbuf linked to the current descriptor */ + lmbuf = &txq->txbufs[txq->tail].mbuf; + /* Warming the cache for releasing the mbuf later on */ + RTE_MBUF_PREFETCH_TO_FREE(*lmbuf); + + pkt = *(tx_pkts + i); + + if (unlikely((pkt->nb_segs > 1) && + !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) { + PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set\n"); + rte_panic("Multisegment packet unsupported\n"); + } + + /* Checking if we have enough descriptors */ + if (unlikely(pkt->nb_segs > free_descs)) + goto xmit_end; + + /* + * Checksum and VLAN flags just in the first descriptor for a + * multisegment packet + */ + nfp_net_tx_cksum(txq, txds, pkt); + + if ((pkt->ol_flags & PKT_TX_VLAN_PKT) && + (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) { + txds->flags |= PCIE_DESC_TX_VLAN; + txds->vlan = pkt->vlan_tci; + } + + if (pkt->ol_flags & PKT_TX_TCP_SEG) + rte_panic("TSO is not supported\n"); + + /* + * mbuf data_len is the data in one segment and pkt_len data + * in the whole packet. When the packet is just one segment, + * then data_len = pkt_len + */ + pkt_size = pkt->pkt_len; + + while (pkt_size) { + /* Releasing mbuf which was prefetched above */ + if (*lmbuf) + rte_pktmbuf_free_seg(*lmbuf); + + dma_size = pkt->data_len; + dma_addr = rte_mbuf_data_dma_addr(pkt); + PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:" + "%" PRIx64 "\n", dma_addr); + + /* Filling descriptors fields */ + txds->dma_len = dma_size; + txds->data_len = pkt->pkt_len; + txds->dma_addr_hi = (dma_addr >> 32) & 0xff; + txds->dma_addr_lo = (dma_addr & 0xffffffff); + ASSERT(free_descs > 0); + free_descs--; + + /* + * Linking mbuf with descriptor for being released + * next time descriptor is used + */ + *lmbuf = pkt; + + txq->wr_p++; + txq->tail++; + if (unlikely(txq->tail == txq->tx_count)) /* wrapping?*/ + txq->tail = 0; + + pkt_size -= dma_size; + if (!pkt_size) { + /* End of packet */ + txds->offset_eop |= PCIE_DESC_TX_EOP; + } else { + txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK; + pkt = pkt->next; + } + /* Referencing next free TX descriptor */ + txds = &txq->txds[txq->tail]; + issued_descs++; + } + i++; + } + +xmit_end: + /* Increment write pointers. Force memory write before we let HW know */ + rte_wmb(); + nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs); + + return i; +} + +static void +nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + uint32_t new_ctrl, update; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + new_ctrl = 0; + + if ((mask & ETH_VLAN_FILTER_OFFLOAD) || + (mask & ETH_VLAN_FILTER_OFFLOAD)) + RTE_LOG(INFO, PMD, "Not support for ETH_VLAN_FILTER_OFFLOAD or" + " ETH_VLAN_FILTER_EXTEND"); + + /* Enable vlan strip if it is not configured yet */ + if ((mask & ETH_VLAN_STRIP_OFFLOAD) && + !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) + new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN; + + /* Disable vlan strip just if it is configured */ + if (!(mask & ETH_VLAN_STRIP_OFFLOAD) && + (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) + new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN; + + if (new_ctrl == 0) + return; + + update = NFP_NET_CFG_UPDATE_GEN; + + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return; + + hw->ctrl = new_ctrl; +} + +/* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */ +static int +nfp_net_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + uint32_t reta, mask; + int i, j; + int idx, shift; + uint32_t update; + struct nfp_net_hw *hw = + NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) + return -EINVAL; + + if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { + RTE_LOG(ERR, PMD, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); + return -EINVAL; + } + + /* + * Update Redirection Table. There are 128 8bit-entries which can be + * manage as 32 32bit-entries + */ + for (i = 0; i < reta_size; i += 4) { + /* Handling 4 RSS entries per loop */ + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF); + + if (!mask) + continue; + + reta = 0; + /* If all 4 entries were set, don't need read RETA register */ + if (mask != 0xF) + reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i); + + for (j = 0; j < 4; j++) { + if (!(mask & (0x1 << j))) + continue; + if (mask != 0xF) + /* Clearing the entry bits */ + reta &= ~(0xFF << (8 * j)); + reta |= reta_conf[idx].reta[shift + j] << (8 * j); + } + nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + shift, reta); + } + + update = NFP_NET_CFG_UPDATE_RSS; + + if (nfp_net_reconfig(hw, hw->ctrl, update) < 0) + return -EIO; + + return 0; +} + + /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */ +static int +nfp_net_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + uint8_t i, j, mask; + int idx, shift; + uint32_t reta; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) + return -EINVAL; + + if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { + RTE_LOG(ERR, PMD, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); + return -EINVAL; + } + + /* + * Reading Redirection Table. There are 128 8bit-entries which can be + * manage as 32 32bit-entries + */ + for (i = 0; i < reta_size; i += 4) { + /* Handling 4 RSS entries per loop */ + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF); + + if (!mask) + continue; + + reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + shift); + for (j = 0; j < 4; j++) { + if (!(mask & (0x1 << j))) + continue; + reta_conf->reta[shift + j] = + (uint8_t)((reta >> (8 * j)) & 0xF); + } + } + return 0; +} + +static int +nfp_net_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + uint32_t update; + uint32_t cfg_rss_ctrl = 0; + uint8_t key; + uint64_t rss_hf; + int i; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + rss_hf = rss_conf->rss_hf; + + /* Checking if RSS is enabled */ + if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) { + if (rss_hf != 0) { /* Enable RSS? */ + RTE_LOG(ERR, PMD, "RSS unsupported\n"); + return -EINVAL; + } + return 0; /* Nothing to do */ + } + + if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) { + RTE_LOG(ERR, PMD, "hash key too long\n"); + return -EINVAL; + } + + if (rss_hf & ETH_RSS_IPV4) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 | + NFP_NET_CFG_RSS_IPV4_TCP | + NFP_NET_CFG_RSS_IPV4_UDP; + + if (rss_hf & ETH_RSS_IPV6) + cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6 | + NFP_NET_CFG_RSS_IPV6_TCP | + NFP_NET_CFG_RSS_IPV6_UDP; + + /* configuring where to apply the RSS hash */ + nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl); + + /* Writing the key byte a byte */ + for (i = 0; i < rss_conf->rss_key_len; i++) { + memcpy(&key, &rss_conf->rss_key[i], 1); + nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key); + } + + /* Writing the key size */ + nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len); + + update = NFP_NET_CFG_UPDATE_RSS; + + if (nfp_net_reconfig(hw, hw->ctrl, update) < 0) + return -EIO; + + return 0; +} + +static int +nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + uint64_t rss_hf; + uint32_t cfg_rss_ctrl; + uint8_t key; + int i; + struct nfp_net_hw *hw; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) + return -EINVAL; + + rss_hf = rss_conf->rss_hf; + cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL); + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP; + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP) + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP; + + /* Reading the key size */ + rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ); + + /* Reading the key byte a byte */ + for (i = 0; i < rss_conf->rss_key_len; i++) { + key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i); + memcpy(&rss_conf->rss_key[i], &key, 1); + } + + return 0; +} + +/* Initialise and register driver with DPDK Application */ +static const struct eth_dev_ops nfp_net_eth_dev_ops = { + .dev_configure = nfp_net_configure, + .dev_start = nfp_net_start, + .dev_stop = nfp_net_stop, + .dev_close = nfp_net_close, + .promiscuous_enable = nfp_net_promisc_enable, + .promiscuous_disable = nfp_net_promisc_disable, + .link_update = nfp_net_link_update, + .stats_get = nfp_net_stats_get, + .stats_reset = nfp_net_stats_reset, + .dev_infos_get = nfp_net_infos_get, + .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, + .mtu_set = nfp_net_dev_mtu_set, + .vlan_offload_set = nfp_net_vlan_offload_set, + .reta_update = nfp_net_reta_update, + .reta_query = nfp_net_reta_query, + .rss_hash_update = nfp_net_rss_hash_update, + .rss_hash_conf_get = nfp_net_rss_hash_conf_get, + .rx_queue_setup = nfp_net_rx_queue_setup, + .rx_queue_release = nfp_net_rx_queue_release, + .rx_queue_count = nfp_net_rx_queue_count, + .tx_queue_setup = nfp_net_tx_queue_setup, + .tx_queue_release = nfp_net_tx_queue_release, +}; + +static int +nfp_net_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct nfp_net_hw *hw; + + uint32_t tx_bar_off, rx_bar_off; + uint32_t start_q; + int stride = 4; + + PMD_INIT_FUNC_TRACE(); + + hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + eth_dev->dev_ops = &nfp_net_eth_dev_ops; + eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; + eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts; + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + pci_dev = eth_dev->pci_dev; + rte_eth_copy_pci_info(eth_dev, pci_dev); + + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + + PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u\n", + pci_dev->id.vendor_id, pci_dev->id.device_id, + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); + + hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr; + if (hw->ctrl_bar == NULL) { + RTE_LOG(ERR, PMD, + "hw->ctrl_bar is NULL. BAR0 not configured\n"); + return -ENODEV; + } + hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS); + hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS); + + /* Work out where in the BAR the queues start. */ + switch (pci_dev->id.device_id) { + case PCI_DEVICE_ID_NFP6000_VF_NIC: + start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); + tx_bar_off = NFP_PCIE_QUEUE(start_q); + start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); + rx_bar_off = NFP_PCIE_QUEUE(start_q); + break; + default: + RTE_LOG(ERR, PMD, "nfp_net: no device ID matching\n"); + return -ENODEV; + } + + PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x\n", tx_bar_off); + PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x\n", rx_bar_off); + + hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off; + hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off; + + PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p\n", + hw->ctrl_bar, hw->tx_bar, hw->rx_bar); + + nfp_net_cfg_queue_setup(hw); + + /* Get some of the read-only fields from the config BAR */ + hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); + hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); + hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); + hw->mtu = hw->max_mtu; + + if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2) + hw->rx_offset = NFP_NET_RX_OFFSET; + else + hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR); + + PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d\n", + hw->ver, hw->max_mtu); + PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s\n", hw->cap, + hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", + hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "", + hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "", + hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "", + hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "", + hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "", + hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "", + hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "", + hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : ""); + + pci_dev = eth_dev->pci_dev; + hw->ctrl = 0; + + hw->stride_rx = stride; + hw->stride_tx = stride; + + PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n", + hw->max_rx_queues, hw->max_tx_queues); + + /* Allocating memory for mac addr */ + eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to space for MAC address"); + return -ENOMEM; + } + + /* Using random mac addresses for VFs */ + eth_random_addr(&hw->mac_addr[0]); + + /* Copying mac address to DPDK eth_dev struct */ + ether_addr_copy(ð_dev->data->mac_addrs[0], + (struct ether_addr *)hw->mac_addr); + + PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x " + "mac=%02x:%02x:%02x:%02x:%02x:%02x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id, + hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2], + hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]); + + /* Registering LSC interrupt handler */ + rte_intr_callback_register(&pci_dev->intr_handle, + nfp_net_dev_interrupt_handler, + (void *)eth_dev); + + /* enable uio intr after callback register */ + rte_intr_enable(&pci_dev->intr_handle); + + /* Telling the firmware about the LSC interrupt entry */ + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); + + /* Recording current stats counters values */ + nfp_net_stats_reset(eth_dev); + + return 0; +} + +static struct rte_pci_id pci_id_nfp_net_map[] = { + { + .vendor_id = PCI_VENDOR_ID_NETRONOME, + .device_id = PCI_DEVICE_ID_NFP6000_PF_NIC, + .subsystem_vendor_id = PCI_ANY_ID, + .subsystem_device_id = PCI_ANY_ID, + }, + { + .vendor_id = PCI_VENDOR_ID_NETRONOME, + .device_id = PCI_DEVICE_ID_NFP6000_VF_NIC, + .subsystem_vendor_id = PCI_ANY_ID, + .subsystem_device_id = PCI_ANY_ID, + }, + { + .vendor_id = 0, + }, +}; + +static struct eth_driver rte_nfp_net_pmd = { + { + .name = "rte_nfp_net_pmd", + .id_table = pci_id_nfp_net_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + }, + .eth_dev_init = nfp_net_init, + .dev_private_size = sizeof(struct nfp_net_adapter), +}; + +static int +nfp_net_pmd_init(const char *name __rte_unused, + const char *params __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + PMD_INIT_LOG(INFO, "librte_pmd_nfp_net version %s\n", + NFP_NET_PMD_VERSION); + + rte_eth_driver_register(&rte_nfp_net_pmd); + return 0; +} + +static struct rte_driver rte_nfp_net_driver = { + .type = PMD_PDEV, + .init = nfp_net_pmd_init, +}; + +PMD_REGISTER_DRIVER(rte_nfp_net_driver); + +/* + * Local variables: + * c-file-style: "Linux" + * indent-tabs-mode: t + * End: + */ diff --git a/drivers/net/nfp/nfp_net_ctrl.h b/drivers/net/nfp/nfp_net_ctrl.h new file mode 100644 index 00000000..fce82515 --- /dev/null +++ b/drivers/net/nfp/nfp_net_ctrl.h @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2014, 2015 Netronome Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * vim:shiftwidth=8:noexpandtab + * + * Netronome network device driver: Control BAR layout + */ +#ifndef _NFP_NET_CTRL_H_ +#define _NFP_NET_CTRL_H_ + +/* + * Configuration BAR size. + * + * The configuration BAR is 8K in size, but on the NFP6000, due to + * THB-350, 32k needs to be reserved. + */ +#ifdef __NFP_IS_6000 +#define NFP_NET_CFG_BAR_SZ (32 * 1024) +#else +#define NFP_NET_CFG_BAR_SZ (8 * 1024) +#endif + +/* Offset in Freelist buffer where packet starts on RX */ +#define NFP_NET_RX_OFFSET 32 + +/* Hash type pre-pended when a RSS hash was computed */ +#define NFP_NET_RSS_NONE 0 +#define NFP_NET_RSS_IPV4 1 +#define NFP_NET_RSS_IPV6 2 +#define NFP_NET_RSS_IPV6_EX 3 +#define NFP_NET_RSS_IPV4_TCP 4 +#define NFP_NET_RSS_IPV6_TCP 5 +#define NFP_NET_RSS_IPV6_EX_TCP 6 +#define NFP_NET_RSS_IPV4_UDP 7 +#define NFP_NET_RSS_IPV6_UDP 8 +#define NFP_NET_RSS_IPV6_EX_UDP 9 + +/* + * @NFP_NET_TXR_MAX: Maximum number of TX rings + * @NFP_NET_TXR_MASK: Mask for TX rings + * @NFP_NET_RXR_MAX: Maximum number of RX rings + * @NFP_NET_RXR_MASK: Mask for RX rings + */ +#define NFP_NET_TXR_MAX 64 +#define NFP_NET_TXR_MASK (NFP_NET_TXR_MAX - 1) +#define NFP_NET_RXR_MAX 64 +#define NFP_NET_RXR_MASK (NFP_NET_RXR_MAX - 1) + +/* + * Read/Write config words (0x0000 - 0x002c) + * @NFP_NET_CFG_CTRL: Global control + * @NFP_NET_CFG_UPDATE: Indicate which fields are updated + * @NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings + * @NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings + * @NFP_NET_CFG_MTU: Set MTU size + * @NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU) + * @NFP_NET_CFG_EXN: MSI-X table entry for exceptions + * @NFP_NET_CFG_LSC: MSI-X table entry for link state changes + * @NFP_NET_CFG_MACADDR: MAC address + * + * TODO: + * - define Error details in UPDATE + */ +#define NFP_NET_CFG_CTRL 0x0000 +#define NFP_NET_CFG_CTRL_ENABLE (0x1 << 0) /* Global enable */ +#define NFP_NET_CFG_CTRL_PROMISC (0x1 << 1) /* Enable Promisc mode */ +#define NFP_NET_CFG_CTRL_L2BC (0x1 << 2) /* Allow L2 Broadcast */ +#define NFP_NET_CFG_CTRL_L2MC (0x1 << 3) /* Allow L2 Multicast */ +#define NFP_NET_CFG_CTRL_RXCSUM (0x1 << 4) /* Enable RX Checksum */ +#define NFP_NET_CFG_CTRL_TXCSUM (0x1 << 5) /* Enable TX Checksum */ +#define NFP_NET_CFG_CTRL_RXVLAN (0x1 << 6) /* Enable VLAN strip */ +#define NFP_NET_CFG_CTRL_TXVLAN (0x1 << 7) /* Enable VLAN insert */ +#define NFP_NET_CFG_CTRL_SCATTER (0x1 << 8) /* Scatter DMA */ +#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */ +#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO */ +#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */ +#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS */ +#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */ +#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */ +#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */ +#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/ +#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */ +#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */ +#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* Enable VXLAN */ +#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* Enable NVGRE */ +#define NFP_NET_CFG_UPDATE 0x0004 +#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */ +#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */ +#define NFP_NET_CFG_UPDATE_RSS (0x1 << 2) /* RSS config change */ +#define NFP_NET_CFG_UPDATE_TXRPRIO (0x1 << 3) /* TX Ring prio change */ +#define NFP_NET_CFG_UPDATE_RXRPRIO (0x1 << 4) /* RX Ring prio change */ +#define NFP_NET_CFG_UPDATE_MSIX (0x1 << 5) /* MSI-X change */ +#define NFP_NET_CFG_UPDATE_L2SWITCH (0x1 << 6) /* Switch changes */ +#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */ +#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */ +#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */ +#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */ +#define NFP_NET_CFG_TXRS_ENABLE 0x0008 +#define NFP_NET_CFG_RXRS_ENABLE 0x0010 +#define NFP_NET_CFG_MTU 0x0018 +#define NFP_NET_CFG_FLBUFSZ 0x001c +#define NFP_NET_CFG_EXN 0x001f +#define NFP_NET_CFG_LSC 0x0020 +#define NFP_NET_CFG_MACADDR 0x0024 + +/* + * Read-only words (0x0030 - 0x0050): + * @NFP_NET_CFG_VERSION: Firmware version number + * @NFP_NET_CFG_STS: Status + * @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL) + * @NFP_NET_MAX_TXRINGS: Maximum number of TX rings + * @NFP_NET_MAX_RXRINGS: Maximum number of RX rings + * @NFP_NET_MAX_MTU: Maximum support MTU + * @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only) + * @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only) + * + * TODO: + * - define more STS bits + */ +#define NFP_NET_CFG_VERSION 0x0030 +#define NFP_NET_CFG_VERSION_RESERVED_MASK (0xff << 24) +#define NFP_NET_CFG_VERSION_CLASS_MASK (0xff << 16) +#define NFP_NET_CFG_VERSION_CLASS(x) (((x) & 0xff) << 16) +#define NFP_NET_CFG_VERSION_CLASS_GENERIC 0 +#define NFP_NET_CFG_VERSION_MAJOR_MASK (0xff << 8) +#define NFP_NET_CFG_VERSION_MAJOR(x) (((x) & 0xff) << 8) +#define NFP_NET_CFG_VERSION_MINOR_MASK (0xff << 0) +#define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0) +#define NFP_NET_CFG_STS 0x0034 +#define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */ +#define NFP_NET_CFG_CAP 0x0038 +#define NFP_NET_CFG_MAX_TXRINGS 0x003c +#define NFP_NET_CFG_MAX_RXRINGS 0x0040 +#define NFP_NET_CFG_MAX_MTU 0x0044 +/* Next two words are being used by VFs for solving THB350 issue */ +#define NFP_NET_CFG_START_TXQ 0x0048 +#define NFP_NET_CFG_START_RXQ 0x004c + +/* + * NFP-3200 workaround (0x0050 - 0x0058) + * @NFP_NET_CFG_SPARE_ADDR: DMA address for ME code to use (e.g. YDS-155 fix) + */ +#define NFP_NET_CFG_SPARE_ADDR 0x0050 +/** + * NFP6000/NFP4000 - Prepend configuration + */ +#define NFP_NET_CFG_RX_OFFSET 0x0050 +#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */ + +/** + * Reuse spare address to contain the offset from the start of + * the host buffer where the first byte of the received frame + * will land. Any metadata will come prior to that offset. If the + * value in this field is 0, it means that that the metadata will + * always land starting at the first byte of the host buffer and + * packet data will immediately follow the metadata. As always, + * the RX descriptor indicates the presence or absence of metadata + * along with the length thereof. + */ +#define NFP_NET_CFG_RX_OFFSET_ADDR 0x0050 + +#define NFP_NET_CFG_VXLAN_PORT 0x0060 +#define NFP_NET_CFG_VXLAN_SZ 0x0008 + +/* Offload definitions */ +#define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(uint16_t)) + +/** + * 64B reserved for future use (0x0080 - 0x00c0) + */ +#define NFP_NET_CFG_RESERVED 0x0080 +#define NFP_NET_CFG_RESERVED_SZ 0x0040 + +/* + * RSS configuration (0x0100 - 0x01ac): + * Used only when NFP_NET_CFG_CTRL_RSS is enabled + * @NFP_NET_CFG_RSS_CFG: RSS configuration word + * @NFP_NET_CFG_RSS_KEY: RSS "secret" key + * @NFP_NET_CFG_RSS_ITBL: RSS indirection table + */ +#define NFP_NET_CFG_RSS_BASE 0x0100 +#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE +#define NFP_NET_CFG_RSS_MASK (0x7f) +#define NFP_NET_CFG_RSS_MASK_of(_x) ((_x) & 0x7f) +#define NFP_NET_CFG_RSS_IPV4 (1 << 8) /* RSS for IPv4 */ +#define NFP_NET_CFG_RSS_IPV6 (1 << 9) /* RSS for IPv6 */ +#define NFP_NET_CFG_RSS_IPV4_TCP (1 << 10) /* RSS for IPv4/TCP */ +#define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */ +#define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */ +#define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */ +#define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */ +#define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4) +#define NFP_NET_CFG_RSS_KEY_SZ 0x28 +#define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \ + NFP_NET_CFG_RSS_KEY_SZ) +#define NFP_NET_CFG_RSS_ITBL_SZ 0x80 + +/* + * TX ring configuration (0x200 - 0x800) + * @NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration + * @NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries) + * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries) + * @NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries) + * @NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries) + * @NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries) + * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation (4B entries) + */ +#define NFP_NET_CFG_TXR_BASE 0x0200 +#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8)) +#define NFP_NET_CFG_TXR_WB_ADDR(_x) (NFP_NET_CFG_TXR_BASE + 0x200 + \ + ((_x) * 0x8)) +#define NFP_NET_CFG_TXR_SZ(_x) (NFP_NET_CFG_TXR_BASE + 0x400 + (_x)) +#define NFP_NET_CFG_TXR_VEC(_x) (NFP_NET_CFG_TXR_BASE + 0x440 + (_x)) +#define NFP_NET_CFG_TXR_PRIO(_x) (NFP_NET_CFG_TXR_BASE + 0x480 + (_x)) +#define NFP_NET_CFG_TXR_IRQ_MOD(_x) (NFP_NET_CFG_TXR_BASE + 0x500 + \ + ((_x) * 0x4)) + +/* + * RX ring configuration (0x0800 - 0x0c00) + * @NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration + * @NFP_NET_CFG_RXR_ADDR: Per TX ring DMA address (8B entries) + * @NFP_NET_CFG_RXR_SZ: Per TX ring ring size (1B entries) + * @NFP_NET_CFG_RXR_VEC: Per TX ring MSI-X table entry (1B entries) + * @NFP_NET_CFG_RXR_PRIO: Per TX ring priority (1B entries) + * @NFP_NET_CFG_RXR_IRQ_MOD: Per TX ring interrupt moderation (4B entries) + */ +#define NFP_NET_CFG_RXR_BASE 0x0800 +#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8)) +#define NFP_NET_CFG_RXR_SZ(_x) (NFP_NET_CFG_RXR_BASE + 0x200 + (_x)) +#define NFP_NET_CFG_RXR_VEC(_x) (NFP_NET_CFG_RXR_BASE + 0x240 + (_x)) +#define NFP_NET_CFG_RXR_PRIO(_x) (NFP_NET_CFG_RXR_BASE + 0x280 + (_x)) +#define NFP_NET_CFG_RXR_IRQ_MOD(_x) (NFP_NET_CFG_RXR_BASE + 0x300 + \ + ((_x) * 0x4)) + +/* + * Interrupt Control/Cause registers (0x0c00 - 0x0d00) + * These registers are only used when MSI-X auto-masking is not + * enabled (@NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index + * by MSI-X entry and are 1B in size. If an entry is zero, the + * corresponding entry is enabled. If the FW generates an interrupt, + * it writes a cause into the corresponding field. This also masks + * the MSI-X entry and the host driver must clear the register to + * re-enable the interrupt. + */ +#define NFP_NET_CFG_ICR_BASE 0x0c00 +#define NFP_NET_CFG_ICR(_x) (NFP_NET_CFG_ICR_BASE + (_x)) +#define NFP_NET_CFG_ICR_UNMASKED 0x0 +#define NFP_NET_CFG_ICR_RXTX 0x1 +#define NFP_NET_CFG_ICR_LSC 0x2 + +/* + * General device stats (0x0d00 - 0x0d90) + * all counters are 64bit. + */ +#define NFP_NET_CFG_STATS_BASE 0x0d00 +#define NFP_NET_CFG_STATS_RX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x00) +#define NFP_NET_CFG_STATS_RX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x08) +#define NFP_NET_CFG_STATS_RX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x10) +#define NFP_NET_CFG_STATS_RX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x18) +#define NFP_NET_CFG_STATS_RX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x20) +#define NFP_NET_CFG_STATS_RX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x28) +#define NFP_NET_CFG_STATS_RX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x30) +#define NFP_NET_CFG_STATS_RX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x38) +#define NFP_NET_CFG_STATS_RX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x40) + +#define NFP_NET_CFG_STATS_TX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x48) +#define NFP_NET_CFG_STATS_TX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x50) +#define NFP_NET_CFG_STATS_TX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x58) +#define NFP_NET_CFG_STATS_TX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x60) +#define NFP_NET_CFG_STATS_TX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x68) +#define NFP_NET_CFG_STATS_TX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x70) +#define NFP_NET_CFG_STATS_TX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x78) +#define NFP_NET_CFG_STATS_TX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x80) +#define NFP_NET_CFG_STATS_TX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x88) + +/* + * Per ring stats (0x1000 - 0x1800) + * options, 64bit per entry + * @NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count) + * @NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count) + */ +#define NFP_NET_CFG_TXR_STATS_BASE 0x1000 +#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \ + ((_x) * 0x10)) +#define NFP_NET_CFG_RXR_STATS_BASE 0x1400 +#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \ + ((_x) * 0x10)) + +#endif /* _NFP_NET_CTRL_H_ */ +/* + * Local variables: + * c-file-style: "Linux" + * indent-tabs-mode: t + * End: + */ diff --git a/drivers/net/nfp/nfp_net_logs.h b/drivers/net/nfp/nfp_net_logs.h new file mode 100644 index 00000000..0b966e43 --- /dev/null +++ b/drivers/net/nfp/nfp_net_logs.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2014, 2015 Netronome Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _NFP_NET_LOGS_H_ +#define _NFP_NET_LOGS_H_ + +#include <rte_log.h> + +#define RTE_LIBRTE_NFP_NET_DEBUG_INIT 1 + +#ifdef RTE_LIBRTE_NFP_NET_DEBUG_INIT +#define PMD_INIT_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +#else +#define PMD_INIT_LOG(level, fmt, args...) do { } while (0) +#define PMD_INIT_FUNC_TRACE() do { } while (0) +#endif + +#ifdef RTE_LIBRTE_NFP_NET_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s() rx: " fmt, __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_NFP_NET_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s() tx: " fmt, __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_NFP_NET_DEBUG_DRIVER +#define PMD_DRV_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) +#else +#define PMD_DRV_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_NFP_NET_DEBUG_INIT +#define ASSERT(x) if (!(x)) rte_panic("NFP_NET: x") +#else +#define ASSERT(x) do { } while (0) +#endif + +#endif /* _NFP_NET_LOGS_H_ */ diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h new file mode 100644 index 00000000..232ce5ca --- /dev/null +++ b/drivers/net/nfp/nfp_net_pmd.h @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2014, 2015 Netronome Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * vim:shiftwidth=8:noexpandtab + * + * @file dpdk/pmd/nfp_net_pmd.h + * + * Netronome NFP_NET PDM driver + */ + +#ifndef _NFP_NET_PMD_H_ +#define _NFP_NET_PMD_H_ + +#define NFP_NET_PMD_VERSION "0.1" +#define PCI_VENDOR_ID_NETRONOME 0x19ee +#define PCI_DEVICE_ID_NFP6000_PF_NIC 0x6000 +#define PCI_DEVICE_ID_NFP6000_VF_NIC 0x6003 + +/* Forward declaration */ +struct nfp_net_adapter; + +/* + * The maximum number of descriptors is limited by design as + * DPDK uses uint16_t variables for these values + */ +#define NFP_NET_MAX_TX_DESC (32 * 1024) +#define NFP_NET_MIN_TX_DESC 64 + +#define NFP_NET_MAX_RX_DESC (32 * 1024) +#define NFP_NET_MIN_RX_DESC 64 + +/* Bar allocation */ +#define NFP_NET_CRTL_BAR 0 +#define NFP_NET_TX_BAR 2 +#define NFP_NET_RX_BAR 2 + +/* Macros for accessing the Queue Controller Peripheral 'CSRs' */ +#define NFP_QCP_QUEUE_OFF(_x) ((_x) * 0x800) +#define NFP_QCP_QUEUE_ADD_RPTR 0x0000 +#define NFP_QCP_QUEUE_ADD_WPTR 0x0004 +#define NFP_QCP_QUEUE_STS_LO 0x0008 +#define NFP_QCP_QUEUE_STS_LO_READPTR_mask (0x3ffff) +#define NFP_QCP_QUEUE_STS_HI 0x000c +#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask (0x3ffff) + +/* Interrupt definitions */ +#define NFP_NET_IRQ_LSC_IDX 0 + +/* Default values for RX/TX configuration */ +#define DEFAULT_RX_FREE_THRESH 32 +#define DEFAULT_RX_PTHRESH 8 +#define DEFAULT_RX_HTHRESH 8 +#define DEFAULT_RX_WTHRESH 0 + +#define DEFAULT_TX_RS_THRESH 32 +#define DEFAULT_TX_FREE_THRESH 32 +#define DEFAULT_TX_PTHRESH 32 +#define DEFAULT_TX_HTHRESH 0 +#define DEFAULT_TX_WTHRESH 0 +#define DEFAULT_TX_RSBIT_THRESH 32 + +/* Alignment for dma zones */ +#define NFP_MEMZONE_ALIGN 128 + +/* + * This is used by the reconfig protocol. It sets the maximum time waiting in + * milliseconds before a reconfig timeout happens. + */ +#define NFP_NET_POLL_TIMEOUT 5000 + +#define NFP_QCP_QUEUE_ADDR_SZ (0x800) + +#define NFP_NET_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ +#define NFP_NET_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ + +/* Version number helper defines */ +#define NFD_CFG_CLASS_VER_msk 0xff +#define NFD_CFG_CLASS_VER_shf 24 +#define NFD_CFG_CLASS_VER(x) (((x) & 0xff) << 24) +#define NFD_CFG_CLASS_VER_of(x) (((x) >> 24) & 0xff) +#define NFD_CFG_CLASS_TYPE_msk 0xff +#define NFD_CFG_CLASS_TYPE_shf 16 +#define NFD_CFG_CLASS_TYPE(x) (((x) & 0xff) << 16) +#define NFD_CFG_CLASS_TYPE_of(x) (((x) >> 16) & 0xff) +#define NFD_CFG_MAJOR_VERSION_msk 0xff +#define NFD_CFG_MAJOR_VERSION_shf 8 +#define NFD_CFG_MAJOR_VERSION(x) (((x) & 0xff) << 8) +#define NFD_CFG_MAJOR_VERSION_of(x) (((x) >> 8) & 0xff) +#define NFD_CFG_MINOR_VERSION_msk 0xff +#define NFD_CFG_MINOR_VERSION_shf 0 +#define NFD_CFG_MINOR_VERSION(x) (((x) & 0xff) << 0) +#define NFD_CFG_MINOR_VERSION_of(x) (((x) >> 0) & 0xff) + +#include <linux/types.h> + +static inline uint8_t nn_readb(volatile const void *addr) +{ + return *((volatile const uint8_t *)(addr)); +} + +static inline void nn_writeb(uint8_t val, volatile void *addr) +{ + *((volatile uint8_t *)(addr)) = val; +} + +static inline uint32_t nn_readl(volatile const void *addr) +{ + return *((volatile const uint32_t *)(addr)); +} + +static inline void nn_writel(uint32_t val, volatile void *addr) +{ + *((volatile uint32_t *)(addr)) = val; +} + +static inline uint64_t nn_readq(volatile void *addr) +{ + const volatile uint32_t *p = addr; + uint32_t low, high; + + high = nn_readl((volatile const void *)(p + 1)); + low = nn_readl((volatile const void *)p); + + return low + ((uint64_t)high << 32); +} + +static inline void nn_writeq(uint64_t val, volatile void *addr) +{ + nn_writel(val >> 32, (volatile char *)addr + 4); + nn_writel(val, addr); +} + +/* TX descriptor format */ +#define PCIE_DESC_TX_EOP (1 << 7) +#define PCIE_DESC_TX_OFFSET_MASK (0x7f) + +/* Flags in the host TX descriptor */ +#define PCIE_DESC_TX_CSUM (1 << 7) +#define PCIE_DESC_TX_IP4_CSUM (1 << 6) +#define PCIE_DESC_TX_TCP_CSUM (1 << 5) +#define PCIE_DESC_TX_UDP_CSUM (1 << 4) +#define PCIE_DESC_TX_VLAN (1 << 3) +#define PCIE_DESC_TX_LSO (1 << 2) +#define PCIE_DESC_TX_ENCAP_NONE (0) +#define PCIE_DESC_TX_ENCAP_VXLAN (1 << 1) +#define PCIE_DESC_TX_ENCAP_GRE (1 << 0) + +struct nfp_net_tx_desc { + union { + struct { + uint8_t dma_addr_hi; /* High bits of host buf address */ + __le16 dma_len; /* Length to DMA for this desc */ + uint8_t offset_eop; /* Offset in buf where pkt starts + + * highest bit is eop flag. + */ + __le32 dma_addr_lo; /* Low 32bit of host buf addr */ + + __le16 lso; /* MSS to be used for LSO */ + uint8_t l4_offset; /* LSO, where the L4 data starts */ + uint8_t flags; /* TX Flags, see @PCIE_DESC_TX_* */ + + __le16 vlan; /* VLAN tag to add if indicated */ + __le16 data_len; /* Length of frame + meta data */ + } __attribute__((__packed__)); + __le32 vals[4]; + }; +}; + +struct nfp_net_txq { + struct nfp_net_hw *hw; /* Backpointer to nfp_net structure */ + + /* + * Queue information: @qidx is the queue index from Linux's + * perspective. @tx_qcidx is the index of the Queue + * Controller Peripheral queue relative to the TX queue BAR. + * @cnt is the size of the queue in number of + * descriptors. @qcp_q is a pointer to the base of the queue + * structure on the NFP + */ + uint8_t *qcp_q; + + /* + * Read and Write pointers. @wr_p and @rd_p are host side pointer, + * they are free running and have little relation to the QCP pointers * + * @qcp_rd_p is a local copy queue controller peripheral read pointer + */ + + uint32_t wr_p; + uint32_t rd_p; + uint32_t qcp_rd_p; + + uint32_t tx_count; + + uint32_t tx_free_thresh; + uint32_t tail; + + /* + * For each descriptor keep a reference to the mbuff and + * DMA address used until completion is signalled. + */ + struct { + struct rte_mbuf *mbuf; + } *txbufs; + + /* + * Information about the host side queue location. @txds is + * the virtual address for the queue, @dma is the DMA address + * of the queue and @size is the size in bytes for the queue + * (needed for free) + */ + struct nfp_net_tx_desc *txds; + + /* + * At this point 56 bytes have been used for all the fields in the + * TX critical path. We have room for 8 bytes and still all placed + * in a cache line. We are not using the threshold values below nor + * the txq_flags but if we need to, we can add the most used in the + * remaining bytes. + */ + uint32_t tx_rs_thresh; /* not used by now. Future? */ + uint32_t tx_pthresh; /* not used by now. Future? */ + uint32_t tx_hthresh; /* not used by now. Future? */ + uint32_t tx_wthresh; /* not used by now. Future? */ + uint32_t txq_flags; /* not used by now. Future? */ + uint8_t port_id; + int qidx; + int tx_qcidx; + __le64 dma; +} __attribute__ ((__aligned__(64))); + +/* RX and freelist descriptor format */ +#define PCIE_DESC_RX_DD (1 << 7) +#define PCIE_DESC_RX_META_LEN_MASK (0x7f) + +/* Flags in the RX descriptor */ +#define PCIE_DESC_RX_RSS (1 << 15) +#define PCIE_DESC_RX_I_IP4_CSUM (1 << 14) +#define PCIE_DESC_RX_I_IP4_CSUM_OK (1 << 13) +#define PCIE_DESC_RX_I_TCP_CSUM (1 << 12) +#define PCIE_DESC_RX_I_TCP_CSUM_OK (1 << 11) +#define PCIE_DESC_RX_I_UDP_CSUM (1 << 10) +#define PCIE_DESC_RX_I_UDP_CSUM_OK (1 << 9) +#define PCIE_DESC_RX_INGRESS_PORT (1 << 8) +#define PCIE_DESC_RX_EOP (1 << 7) +#define PCIE_DESC_RX_IP4_CSUM (1 << 6) +#define PCIE_DESC_RX_IP4_CSUM_OK (1 << 5) +#define PCIE_DESC_RX_TCP_CSUM (1 << 4) +#define PCIE_DESC_RX_TCP_CSUM_OK (1 << 3) +#define PCIE_DESC_RX_UDP_CSUM (1 << 2) +#define PCIE_DESC_RX_UDP_CSUM_OK (1 << 1) +#define PCIE_DESC_RX_VLAN (1 << 0) + +struct nfp_net_rx_desc { + union { + /* Freelist descriptor */ + struct { + uint8_t dma_addr_hi; + __le16 spare; + uint8_t dd; + + __le32 dma_addr_lo; + } __attribute__((__packed__)) fld; + + /* RX descriptor */ + struct { + __le16 data_len; + uint8_t reserved; + uint8_t meta_len_dd; + + __le16 flags; + __le16 vlan; + } __attribute__((__packed__)) rxd; + + __le32 vals[2]; + }; +}; + +struct nfp_net_rx_buff { + struct rte_mbuf *mbuf; +}; + +struct nfp_net_rxq { + struct nfp_net_hw *hw; /* Backpointer to nfp_net structure */ + + /* + * @qcp_fl and @qcp_rx are pointers to the base addresses of the + * freelist and RX queue controller peripheral queue structures on the + * NFP + */ + uint8_t *qcp_fl; + uint8_t *qcp_rx; + + /* + * Read and Write pointers. @wr_p and @rd_p are host side + * pointer, they are free running and have little relation to + * the QCP pointers. @wr_p is where the driver adds new + * freelist descriptors and @rd_p is where the driver start + * reading descriptors for newly arrive packets from. + */ + uint32_t wr_p; + uint32_t rd_p; + + /* + * For each buffer placed on the freelist, record the + * associated SKB + */ + struct nfp_net_rx_buff *rxbufs; + + /* + * Information about the host side queue location. @rxds is + * the virtual address for the queue + */ + struct nfp_net_rx_desc *rxds; + + /* + * The mempool is created by the user specifying a mbuf size. + * We save here the reference of the mempool needed in the RX + * path and the mbuf size for checking received packets can be + * safely copied to the mbuf using the NFP_NET_RX_OFFSET + */ + struct rte_mempool *mem_pool; + uint16_t mbuf_size; + + /* + * Next two fields are used for giving more free descriptors + * to the NFP + */ + uint16_t rx_free_thresh; + uint16_t nb_rx_hold; + + /* the size of the queue in number of descriptors */ + uint16_t rx_count; + + /* + * Fields above this point fit in a single cache line and are all used + * in the RX critical path. Fields below this point are just used + * during queue configuration or not used at all (yet) + */ + + /* referencing dev->data->port_id */ + uint16_t port_id; + + uint8_t crc_len; /* Not used by now */ + uint8_t drop_en; /* Not used by now */ + + /* DMA address of the queue */ + __le64 dma; + + /* + * Queue information: @qidx is the queue index from Linux's + * perspective. @fl_qcidx is the index of the Queue + * Controller peripheral queue relative to the RX queue BAR + * used for the freelist and @rx_qcidx is the Queue Controller + * Peripheral index for the RX queue. + */ + int qidx; + int fl_qcidx; + int rx_qcidx; +} __attribute__ ((__aligned__(64))); + +struct nfp_net_hw { + /* Info from the firmware */ + uint32_t ver; + uint32_t cap; + uint32_t max_mtu; + uint32_t mtu; + uint32_t rx_offset; + + /* Current values for control */ + uint32_t ctrl; + + uint8_t *ctrl_bar; + uint8_t *tx_bar; + uint8_t *rx_bar; + + int stride_rx; + int stride_tx; + + uint8_t *qcp_cfg; + + uint32_t max_tx_queues; + uint32_t max_rx_queues; + uint16_t flbufsz; + uint16_t device_id; + uint16_t vendor_id; + uint16_t subsystem_device_id; + uint16_t subsystem_vendor_id; +#if defined(DSTQ_SELECTION) +#if DSTQ_SELECTION + uint16_t device_function; +#endif +#endif + + uint8_t mac_addr[ETHER_ADDR_LEN]; + + /* Records starting point for counters */ + struct rte_eth_stats eth_stats_base; + +#ifdef NFP_NET_LIBNFP + struct nfp_cpp *cpp; + struct nfp_cpp_area *ctrl_area; + struct nfp_cpp_area *tx_area; + struct nfp_cpp_area *rx_area; + struct nfp_cpp_area *msix_area; +#endif +}; + +struct nfp_net_adapter { + struct nfp_net_hw hw; +}; + +#define NFP_NET_DEV_PRIVATE_TO_HW(adapter)\ + (&((struct nfp_net_adapter *)adapter)->hw) + +#endif /* _NFP_NET_PMD_H_ */ +/* + * Local variables: + * c-file-style: "Linux" + * indent-tabs-mode: t + * End: + */ diff --git a/drivers/net/nfp/rte_pmd_nfp_version.map b/drivers/net/nfp/rte_pmd_nfp_version.map new file mode 100644 index 00000000..ad607bbe --- /dev/null +++ b/drivers/net/nfp/rte_pmd_nfp_version.map @@ -0,0 +1,3 @@ +DPDK_2.2 { + local: *; +}; diff --git a/drivers/net/null/Makefile b/drivers/net/null/Makefile new file mode 100644 index 00000000..22023891 --- /dev/null +++ b/drivers/net/null/Makefile @@ -0,0 +1,61 @@ +# BSD LICENSE +# +# Copyright (C) IGEL Co.,Ltd. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of IGEL Co.,Ltd. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_null.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_null_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += rte_eth_null.c + +# +# Export include files +# +SYMLINK-y-include += rte_eth_null.h + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_kvargs + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c new file mode 100644 index 00000000..5e8e203c --- /dev/null +++ b/drivers/net/null/rte_eth_null.c @@ -0,0 +1,692 @@ +/*- + * BSD LICENSE + * + * Copyright (C) IGEL Co.,Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of IGEL Co.,Ltd. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rte_mbuf.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> +#include <rte_dev.h> +#include <rte_kvargs.h> +#include <rte_spinlock.h> + +#include "rte_eth_null.h" + +#define ETH_NULL_PACKET_SIZE_ARG "size" +#define ETH_NULL_PACKET_COPY_ARG "copy" + +static unsigned default_packet_size = 64; +static unsigned default_packet_copy; + +static const char *valid_arguments[] = { + ETH_NULL_PACKET_SIZE_ARG, + ETH_NULL_PACKET_COPY_ARG, + NULL +}; + +struct pmd_internals; + +struct null_queue { + struct pmd_internals *internals; + + struct rte_mempool *mb_pool; + struct rte_mbuf *dummy_packet; + + rte_atomic64_t rx_pkts; + rte_atomic64_t tx_pkts; + rte_atomic64_t err_pkts; +}; + +struct pmd_internals { + unsigned packet_size; + unsigned packet_copy; + + struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT]; + struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT]; + + /** Bit mask of RSS offloads, the bit offset also means flow type */ + uint64_t flow_type_rss_offloads; + + rte_spinlock_t rss_lock; + + uint16_t reta_size; + struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 / + RTE_RETA_GROUP_SIZE]; + + uint8_t rss_key[40]; /**< 40-byte hash key. */ +}; + + +static struct ether_addr eth_addr = { .addr_bytes = {0} }; +static const char *drivername = "Null PMD"; +static struct rte_eth_link pmd_link = { + .link_speed = ETH_SPEED_NUM_10G, + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_status = ETH_LINK_DOWN, + .link_autoneg = ETH_LINK_SPEED_AUTONEG, +}; + +static uint16_t +eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + int i; + struct null_queue *h = q; + unsigned packet_size; + + if ((q == NULL) || (bufs == NULL)) + return 0; + + packet_size = h->internals->packet_size; + for (i = 0; i < nb_bufs; i++) { + bufs[i] = rte_pktmbuf_alloc(h->mb_pool); + if (!bufs[i]) + break; + bufs[i]->data_len = (uint16_t)packet_size; + bufs[i]->pkt_len = packet_size; + bufs[i]->nb_segs = 1; + bufs[i]->next = NULL; + } + + rte_atomic64_add(&(h->rx_pkts), i); + + return i; +} + +static uint16_t +eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + int i; + struct null_queue *h = q; + unsigned packet_size; + + if ((q == NULL) || (bufs == NULL)) + return 0; + + packet_size = h->internals->packet_size; + for (i = 0; i < nb_bufs; i++) { + bufs[i] = rte_pktmbuf_alloc(h->mb_pool); + if (!bufs[i]) + break; + rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet, + packet_size); + bufs[i]->data_len = (uint16_t)packet_size; + bufs[i]->pkt_len = packet_size; + bufs[i]->nb_segs = 1; + bufs[i]->next = NULL; + } + + rte_atomic64_add(&(h->rx_pkts), i); + + return i; +} + +static uint16_t +eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + int i; + struct null_queue *h = q; + + if ((q == NULL) || (bufs == NULL)) + return 0; + + for (i = 0; i < nb_bufs; i++) + rte_pktmbuf_free(bufs[i]); + + rte_atomic64_add(&(h->tx_pkts), i); + + return i; +} + +static uint16_t +eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + int i; + struct null_queue *h = q; + unsigned packet_size; + + if ((q == NULL) || (bufs == NULL)) + return 0; + + packet_size = h->internals->packet_size; + for (i = 0; i < nb_bufs; i++) { + rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *), + packet_size); + rte_pktmbuf_free(bufs[i]); + } + + rte_atomic64_add(&(h->tx_pkts), i); + + return i; +} + +static int +eth_dev_configure(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static int +eth_dev_start(struct rte_eth_dev *dev) +{ + if (dev == NULL) + return -EINVAL; + + dev->data->dev_link.link_status = ETH_LINK_UP; + return 0; +} + +static void +eth_dev_stop(struct rte_eth_dev *dev) +{ + if (dev == NULL) + return; + + dev->data->dev_link.link_status = ETH_LINK_DOWN; +} + +static int +eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct rte_mbuf *dummy_packet; + struct pmd_internals *internals; + unsigned packet_size; + + if ((dev == NULL) || (mb_pool == NULL)) + return -EINVAL; + + internals = dev->data->dev_private; + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -ENODEV; + + packet_size = internals->packet_size; + + internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool; + dev->data->rx_queues[rx_queue_id] = + &internals->rx_null_queues[rx_queue_id]; + dummy_packet = rte_zmalloc_socket(NULL, + packet_size, 0, dev->data->numa_node); + if (dummy_packet == NULL) + return -ENOMEM; + + internals->rx_null_queues[rx_queue_id].internals = internals; + internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet; + + return 0; +} + +static int +eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct rte_mbuf *dummy_packet; + struct pmd_internals *internals; + unsigned packet_size; + + if (dev == NULL) + return -EINVAL; + + internals = dev->data->dev_private; + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -ENODEV; + + packet_size = internals->packet_size; + + dev->data->tx_queues[tx_queue_id] = + &internals->tx_null_queues[tx_queue_id]; + dummy_packet = rte_zmalloc_socket(NULL, + packet_size, 0, dev->data->numa_node); + if (dummy_packet == NULL) + return -ENOMEM; + + internals->tx_null_queues[tx_queue_id].internals = internals; + internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet; + + return 0; +} + + +static void +eth_dev_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct pmd_internals *internals; + + if ((dev == NULL) || (dev_info == NULL)) + return; + + internals = dev->data->dev_private; + dev_info->driver_name = drivername; + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = (uint32_t)-1; + dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues); + dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues); + dev_info->min_rx_bufsize = 0; + dev_info->pci_dev = NULL; + dev_info->reta_size = internals->reta_size; + dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads; +} + +static void +eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats) +{ + unsigned i, num_stats; + unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0; + const struct pmd_internals *internal; + + if ((dev == NULL) || (igb_stats == NULL)) + return; + + internal = dev->data->dev_private; + num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS, + RTE_MIN(dev->data->nb_rx_queues, + RTE_DIM(internal->rx_null_queues))); + for (i = 0; i < num_stats; i++) { + igb_stats->q_ipackets[i] = + internal->rx_null_queues[i].rx_pkts.cnt; + rx_total += igb_stats->q_ipackets[i]; + } + + num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS, + RTE_MIN(dev->data->nb_tx_queues, + RTE_DIM(internal->tx_null_queues))); + for (i = 0; i < num_stats; i++) { + igb_stats->q_opackets[i] = + internal->tx_null_queues[i].tx_pkts.cnt; + igb_stats->q_errors[i] = + internal->tx_null_queues[i].err_pkts.cnt; + tx_total += igb_stats->q_opackets[i]; + tx_err_total += igb_stats->q_errors[i]; + } + + igb_stats->ipackets = rx_total; + igb_stats->opackets = tx_total; + igb_stats->oerrors = tx_err_total; +} + +static void +eth_stats_reset(struct rte_eth_dev *dev) +{ + unsigned i; + struct pmd_internals *internal; + + if (dev == NULL) + return; + + internal = dev->data->dev_private; + for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++) + internal->rx_null_queues[i].rx_pkts.cnt = 0; + for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) { + internal->tx_null_queues[i].tx_pkts.cnt = 0; + internal->tx_null_queues[i].err_pkts.cnt = 0; + } +} + +static void +eth_queue_release(void *q) +{ + struct null_queue *nq; + + if (q == NULL) + return; + + nq = q; + rte_free(nq->dummy_packet); +} + +static int +eth_link_update(struct rte_eth_dev *dev __rte_unused, + int wait_to_complete __rte_unused) { return 0; } + +static int +eth_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) +{ + int i, j; + struct pmd_internals *internal = dev->data->dev_private; + + if (reta_size != internal->reta_size) + return -EINVAL; + + rte_spinlock_lock(&internal->rss_lock); + + /* Copy RETA table */ + for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) { + internal->reta_conf[i].mask = reta_conf[i].mask; + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + if ((reta_conf[i].mask >> j) & 0x01) + internal->reta_conf[i].reta[j] = reta_conf[i].reta[j]; + } + + rte_spinlock_unlock(&internal->rss_lock); + + return 0; +} + +static int +eth_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) +{ + int i, j; + struct pmd_internals *internal = dev->data->dev_private; + + if (reta_size != internal->reta_size) + return -EINVAL; + + rte_spinlock_lock(&internal->rss_lock); + + /* Copy RETA table */ + for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) { + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + if ((reta_conf[i].mask >> j) & 0x01) + reta_conf[i].reta[j] = internal->reta_conf[i].reta[j]; + } + + rte_spinlock_unlock(&internal->rss_lock); + + return 0; +} + +static int +eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) +{ + struct pmd_internals *internal = dev->data->dev_private; + + rte_spinlock_lock(&internal->rss_lock); + + if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0) + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = + rss_conf->rss_hf & internal->flow_type_rss_offloads; + + if (rss_conf->rss_key) + rte_memcpy(internal->rss_key, rss_conf->rss_key, 40); + + rte_spinlock_unlock(&internal->rss_lock); + + return 0; +} + +static int +eth_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct pmd_internals *internal = dev->data->dev_private; + + rte_spinlock_lock(&internal->rss_lock); + + rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + if (rss_conf->rss_key) + rte_memcpy(rss_conf->rss_key, internal->rss_key, 40); + + rte_spinlock_unlock(&internal->rss_lock); + + return 0; +} + +static const struct eth_dev_ops ops = { + .dev_start = eth_dev_start, + .dev_stop = eth_dev_stop, + .dev_configure = eth_dev_configure, + .dev_infos_get = eth_dev_info, + .rx_queue_setup = eth_rx_queue_setup, + .tx_queue_setup = eth_tx_queue_setup, + .rx_queue_release = eth_queue_release, + .tx_queue_release = eth_queue_release, + .link_update = eth_link_update, + .stats_get = eth_stats_get, + .stats_reset = eth_stats_reset, + .reta_update = eth_rss_reta_update, + .reta_query = eth_rss_reta_query, + .rss_hash_update = eth_rss_hash_update, + .rss_hash_conf_get = eth_rss_hash_conf_get +}; + +int +eth_dev_null_create(const char *name, + const unsigned numa_node, + unsigned packet_size, + unsigned packet_copy) +{ + const unsigned nb_rx_queues = 1; + const unsigned nb_tx_queues = 1; + struct rte_eth_dev_data *data = NULL; + struct pmd_internals *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + + static const uint8_t default_rss_key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D, + 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B, + 0xBE, 0xAC, 0x01, 0xFA + }; + + if (name == NULL) + return -EINVAL; + + RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n", + numa_node); + + /* now do all data allocation - for eth_dev structure, dummy pci driver + * and internal (private) data + */ + data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); + if (data == NULL) + goto error; + + internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node); + if (internals == NULL) + goto error; + + /* reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL); + if (eth_dev == NULL) + goto error; + + /* now put it all together + * - store queue data in internals, + * - store numa_node info in ethdev data + * - point eth_dev_data to internals + * - and point eth_dev structure to new eth_dev_data structure + */ + /* NOTE: we'll replace the data element, of originally allocated eth_dev + * so the nulls are local per-process */ + + internals->packet_size = packet_size; + internals->packet_copy = packet_copy; + + internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; + internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE; + + rte_memcpy(internals->rss_key, default_rss_key, 40); + + data->dev_private = internals; + data->port_id = eth_dev->data->port_id; + data->nb_rx_queues = (uint16_t)nb_rx_queues; + data->nb_tx_queues = (uint16_t)nb_tx_queues; + data->dev_link = pmd_link; + data->mac_addrs = ð_addr; + strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name)); + + eth_dev->data = data; + eth_dev->dev_ops = &ops; + + TAILQ_INIT(ð_dev->link_intr_cbs); + + eth_dev->driver = NULL; + data->dev_flags = RTE_ETH_DEV_DETACHABLE; + data->kdrv = RTE_KDRV_NONE; + data->drv_name = drivername; + data->numa_node = numa_node; + + /* finally assign rx and tx ops */ + if (packet_copy) { + eth_dev->rx_pkt_burst = eth_null_copy_rx; + eth_dev->tx_pkt_burst = eth_null_copy_tx; + } else { + eth_dev->rx_pkt_burst = eth_null_rx; + eth_dev->tx_pkt_burst = eth_null_tx; + } + + return 0; + +error: + rte_free(data); + rte_free(internals); + + return -1; +} + +static inline int +get_packet_size_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + const char *a = value; + unsigned *packet_size = extra_args; + + if ((value == NULL) || (extra_args == NULL)) + return -EINVAL; + + *packet_size = (unsigned)strtoul(a, NULL, 0); + if (*packet_size == UINT_MAX) + return -1; + + return 0; +} + +static inline int +get_packet_copy_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + const char *a = value; + unsigned *packet_copy = extra_args; + + if ((value == NULL) || (extra_args == NULL)) + return -EINVAL; + + *packet_copy = (unsigned)strtoul(a, NULL, 0); + if (*packet_copy == UINT_MAX) + return -1; + + return 0; +} + +static int +rte_pmd_null_devinit(const char *name, const char *params) +{ + unsigned numa_node; + unsigned packet_size = default_packet_size; + unsigned packet_copy = default_packet_copy; + struct rte_kvargs *kvlist = NULL; + int ret; + + if (name == NULL) + return -EINVAL; + + RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name); + + numa_node = rte_socket_id(); + + if (params != NULL) { + kvlist = rte_kvargs_parse(params, valid_arguments); + if (kvlist == NULL) + return -1; + + if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) { + + ret = rte_kvargs_process(kvlist, + ETH_NULL_PACKET_SIZE_ARG, + &get_packet_size_arg, &packet_size); + if (ret < 0) + goto free_kvlist; + } + + if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) { + + ret = rte_kvargs_process(kvlist, + ETH_NULL_PACKET_COPY_ARG, + &get_packet_copy_arg, &packet_copy); + if (ret < 0) + goto free_kvlist; + } + } + + RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, " + "packet copy is %s\n", packet_size, + packet_copy ? "enabled" : "disabled"); + + ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy); + +free_kvlist: + if (kvlist) + rte_kvargs_free(kvlist); + return ret; +} + +static int +rte_pmd_null_devuninit(const char *name) +{ + struct rte_eth_dev *eth_dev = NULL; + + if (name == NULL) + return -EINVAL; + + RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n", + rte_socket_id()); + + /* find the ethdev entry */ + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev == NULL) + return -1; + + rte_free(eth_dev->data->dev_private); + rte_free(eth_dev->data); + + rte_eth_dev_release_port(eth_dev); + + return 0; +} + +static struct rte_driver pmd_null_drv = { + .name = "eth_null", + .type = PMD_VDEV, + .init = rte_pmd_null_devinit, + .uninit = rte_pmd_null_devuninit, +}; + +PMD_REGISTER_DRIVER(pmd_null_drv); diff --git a/drivers/net/null/rte_eth_null.h b/drivers/net/null/rte_eth_null.h new file mode 100644 index 00000000..abada8c2 --- /dev/null +++ b/drivers/net/null/rte_eth_null.h @@ -0,0 +1,40 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RTE_ETH_NULL_H_ +#define RTE_ETH_NULL_H_ + +int eth_dev_null_create(const char *name, const unsigned numa_node, + unsigned packet_size, unsigned packet_copy); + +#endif /* RTE_ETH_NULL_H_ */ diff --git a/drivers/net/null/rte_pmd_null_version.map b/drivers/net/null/rte_pmd_null_version.map new file mode 100644 index 00000000..84b1d0fe --- /dev/null +++ b/drivers/net/null/rte_pmd_null_version.map @@ -0,0 +1,11 @@ +DPDK_2.0 { + + local: *; +}; + +DPDK_2.2 { + global: + + eth_dev_null_create; + +} DPDK_2.0; diff --git a/drivers/net/pcap/Makefile b/drivers/net/pcap/Makefile new file mode 100644 index 00000000..b41d8a27 --- /dev/null +++ b/drivers/net/pcap/Makefile @@ -0,0 +1,63 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. +# Copyright(c) 2014 6WIND S.A. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_pcap.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lpcap + +EXPORT_MAP := rte_pmd_pcap_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += rte_eth_pcap.c + +# +# Export include files +# +SYMLINK-y-include += + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_kvargs + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c new file mode 100644 index 00000000..c98e2341 --- /dev/null +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -0,0 +1,1093 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2014 6WIND S.A. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <time.h> +#include <rte_mbuf.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> +#include <rte_string_fns.h> +#include <rte_cycles.h> +#include <rte_kvargs.h> +#include <rte_dev.h> + +#include <net/if.h> + +#include <pcap.h> + +#define RTE_ETH_PCAP_SNAPSHOT_LEN 65535 +#define RTE_ETH_PCAP_SNAPLEN ETHER_MAX_JUMBO_FRAME_LEN +#define RTE_ETH_PCAP_PROMISC 1 +#define RTE_ETH_PCAP_TIMEOUT -1 +#define ETH_PCAP_RX_PCAP_ARG "rx_pcap" +#define ETH_PCAP_TX_PCAP_ARG "tx_pcap" +#define ETH_PCAP_RX_IFACE_ARG "rx_iface" +#define ETH_PCAP_TX_IFACE_ARG "tx_iface" +#define ETH_PCAP_IFACE_ARG "iface" + +#define ETH_PCAP_ARG_MAXLEN 64 + +static char errbuf[PCAP_ERRBUF_SIZE]; +static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN]; +static struct timeval start_time; +static uint64_t start_cycles; +static uint64_t hz; + +struct pcap_rx_queue { + pcap_t *pcap; + uint8_t in_port; + struct rte_mempool *mb_pool; + volatile unsigned long rx_pkts; + volatile unsigned long rx_bytes; + volatile unsigned long err_pkts; + char name[PATH_MAX]; + char type[ETH_PCAP_ARG_MAXLEN]; +}; + +struct pcap_tx_queue { + pcap_dumper_t *dumper; + pcap_t *pcap; + volatile unsigned long tx_pkts; + volatile unsigned long tx_bytes; + volatile unsigned long err_pkts; + char name[PATH_MAX]; + char type[ETH_PCAP_ARG_MAXLEN]; +}; + +struct rx_pcaps { + unsigned num_of_rx; + pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS]; + const char *names[RTE_PMD_RING_MAX_RX_RINGS]; + const char *types[RTE_PMD_RING_MAX_RX_RINGS]; +}; + +struct tx_pcaps { + unsigned num_of_tx; + pcap_dumper_t *dumpers[RTE_PMD_RING_MAX_TX_RINGS]; + pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS]; + const char *names[RTE_PMD_RING_MAX_RX_RINGS]; + const char *types[RTE_PMD_RING_MAX_RX_RINGS]; +}; + +struct pmd_internals { + struct pcap_rx_queue rx_queue[RTE_PMD_RING_MAX_RX_RINGS]; + struct pcap_tx_queue tx_queue[RTE_PMD_RING_MAX_TX_RINGS]; + int if_index; + int single_iface; +}; + +const char *valid_arguments[] = { + ETH_PCAP_RX_PCAP_ARG, + ETH_PCAP_TX_PCAP_ARG, + ETH_PCAP_RX_IFACE_ARG, + ETH_PCAP_TX_IFACE_ARG, + ETH_PCAP_IFACE_ARG, + NULL +}; + +static int open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper); +static int open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap); +static int open_single_iface(const char *iface, pcap_t **pcap); + +static struct ether_addr eth_addr = { .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 } }; +static const char *drivername = "Pcap PMD"; +static struct rte_eth_link pmd_link = { + .link_speed = ETH_SPEED_NUM_10G, + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_status = ETH_LINK_DOWN, + .link_autoneg = ETH_LINK_SPEED_FIXED, +}; + +static int +eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, + struct rte_mbuf *mbuf, + const u_char *data, + uint16_t data_len) +{ + struct rte_mbuf *m = mbuf; + + /* Copy the first segment. */ + uint16_t len = rte_pktmbuf_tailroom(mbuf); + + rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len); + data_len -= len; + data += len; + + while (data_len > 0) { + /* Allocate next mbuf and point to that. */ + m->next = rte_pktmbuf_alloc(mb_pool); + + if (unlikely(!m->next)) + return -1; + + m = m->next; + + /* Headroom is not needed in chained mbufs. */ + rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m)); + m->pkt_len = 0; + m->data_len = 0; + + /* Copy next segment. */ + len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len); + rte_memcpy(rte_pktmbuf_append(m, len), data, len); + + mbuf->nb_segs++; + data_len -= len; + data += len; + } + + return mbuf->nb_segs; +} + +/* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */ +static void +eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf) +{ + uint16_t data_len = 0; + + while (mbuf) { + rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *), + mbuf->data_len); + + data_len += mbuf->data_len; + mbuf = mbuf->next; + } +} + +static uint16_t +eth_pcap_rx(void *queue, + struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + unsigned i; + struct pcap_pkthdr header; + const u_char *packet; + struct rte_mbuf *mbuf; + struct pcap_rx_queue *pcap_q = queue; + uint16_t num_rx = 0; + uint16_t buf_size; + uint32_t rx_bytes = 0; + + if (unlikely(pcap_q->pcap == NULL || nb_pkts == 0)) + return 0; + + /* Reads the given number of packets from the pcap file one by one + * and copies the packet data into a newly allocated mbuf to return. + */ + for (i = 0; i < nb_pkts; i++) { + /* Get the next PCAP packet */ + packet = pcap_next(pcap_q->pcap, &header); + if (unlikely(packet == NULL)) + break; + else + mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool); + if (unlikely(mbuf == NULL)) + break; + + /* Now get the space available for data in the mbuf */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(pcap_q->mb_pool) - + RTE_PKTMBUF_HEADROOM); + + if (header.caplen <= buf_size) { + /* pcap packet will fit in the mbuf, go ahead and copy */ + rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet, + header.caplen); + mbuf->data_len = (uint16_t)header.caplen; + } else { + /* Try read jumbo frame into multi mbufs. */ + if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool, + mbuf, + packet, + header.caplen) == -1)) + break; + } + + mbuf->pkt_len = (uint16_t)header.caplen; + mbuf->port = pcap_q->in_port; + bufs[num_rx] = mbuf; + num_rx++; + rx_bytes += header.caplen; + } + pcap_q->rx_pkts += num_rx; + pcap_q->rx_bytes += rx_bytes; + return num_rx; +} + +static inline void +calculate_timestamp(struct timeval *ts) { + uint64_t cycles; + struct timeval cur_time; + + cycles = rte_get_timer_cycles() - start_cycles; + cur_time.tv_sec = cycles / hz; + cur_time.tv_usec = (cycles % hz) * 10e6 / hz; + timeradd(&start_time, &cur_time, ts); +} + +/* + * Callback to handle writing packets to a pcap file. + */ +static uint16_t +eth_pcap_tx_dumper(void *queue, + struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + unsigned i; + struct rte_mbuf *mbuf; + struct pcap_tx_queue *dumper_q = queue; + uint16_t num_tx = 0; + uint32_t tx_bytes = 0; + struct pcap_pkthdr header; + + if (dumper_q->dumper == NULL || nb_pkts == 0) + return 0; + + /* writes the nb_pkts packets to the previously opened pcap file dumper */ + for (i = 0; i < nb_pkts; i++) { + mbuf = bufs[i]; + calculate_timestamp(&header.ts); + header.len = mbuf->pkt_len; + header.caplen = header.len; + + if (likely(mbuf->nb_segs == 1)) { + pcap_dump((u_char *)dumper_q->dumper, &header, + rte_pktmbuf_mtod(mbuf, void*)); + } else { + if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) { + eth_pcap_gather_data(tx_pcap_data, mbuf); + pcap_dump((u_char *)dumper_q->dumper, &header, + tx_pcap_data); + } else { + RTE_LOG(ERR, PMD, + "Dropping PCAP packet. " + "Size (%d) > max jumbo size (%d).\n", + mbuf->pkt_len, + ETHER_MAX_JUMBO_FRAME_LEN); + + rte_pktmbuf_free(mbuf); + break; + } + } + + rte_pktmbuf_free(mbuf); + num_tx++; + tx_bytes += mbuf->pkt_len; + } + + /* + * Since there's no place to hook a callback when the forwarding + * process stops and to make sure the pcap file is actually written, + * we flush the pcap dumper within each burst. + */ + pcap_dump_flush(dumper_q->dumper); + dumper_q->tx_pkts += num_tx; + dumper_q->tx_bytes += tx_bytes; + dumper_q->err_pkts += nb_pkts - num_tx; + return num_tx; +} + +/* + * Callback to handle sending packets through a real NIC. + */ +static uint16_t +eth_pcap_tx(void *queue, + struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + unsigned i; + int ret; + struct rte_mbuf *mbuf; + struct pcap_tx_queue *tx_queue = queue; + uint16_t num_tx = 0; + uint32_t tx_bytes = 0; + + if (unlikely(nb_pkts == 0 || tx_queue->pcap == NULL)) + return 0; + + for (i = 0; i < nb_pkts; i++) { + mbuf = bufs[i]; + + if (likely(mbuf->nb_segs == 1)) { + ret = pcap_sendpacket(tx_queue->pcap, + rte_pktmbuf_mtod(mbuf, u_char *), + mbuf->pkt_len); + } else { + if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) { + eth_pcap_gather_data(tx_pcap_data, mbuf); + ret = pcap_sendpacket(tx_queue->pcap, + tx_pcap_data, + mbuf->pkt_len); + } else { + RTE_LOG(ERR, PMD, + "Dropping PCAP packet. " + "Size (%d) > max jumbo size (%d).\n", + mbuf->pkt_len, + ETHER_MAX_JUMBO_FRAME_LEN); + + rte_pktmbuf_free(mbuf); + break; + } + } + + if (unlikely(ret != 0)) + break; + num_tx++; + tx_bytes += mbuf->pkt_len; + rte_pktmbuf_free(mbuf); + } + + tx_queue->tx_pkts += num_tx; + tx_queue->tx_bytes += tx_bytes; + tx_queue->err_pkts += nb_pkts - num_tx; + return num_tx; +} + +static int +eth_dev_start(struct rte_eth_dev *dev) +{ + unsigned i; + struct pmd_internals *internals = dev->data->dev_private; + struct pcap_tx_queue *tx; + struct pcap_rx_queue *rx; + + /* Special iface case. Single pcap is open and shared between tx/rx. */ + if (internals->single_iface) { + tx = &internals->tx_queue[0]; + rx = &internals->rx_queue[0]; + + if (!tx->pcap && strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) { + if (open_single_iface(tx->name, &tx->pcap) < 0) + return -1; + rx->pcap = tx->pcap; + } + goto status_up; + } + + /* If not open already, open tx pcaps/dumpers */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + tx = &internals->tx_queue[i]; + + if (!tx->dumper && strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) { + if (open_single_tx_pcap(tx->name, &tx->dumper) < 0) + return -1; + } + + else if (!tx->pcap && strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) { + if (open_single_iface(tx->name, &tx->pcap) < 0) + return -1; + } + } + + /* If not open already, open rx pcaps */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rx = &internals->rx_queue[i]; + + if (rx->pcap != NULL) + continue; + + if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) { + if (open_single_rx_pcap(rx->name, &rx->pcap) < 0) + return -1; + } + + else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) { + if (open_single_iface(rx->name, &rx->pcap) < 0) + return -1; + } + } + +status_up: + + dev->data->dev_link.link_status = ETH_LINK_UP; + return 0; +} + +/* + * This function gets called when the current port gets stopped. + * Is the only place for us to close all the tx streams dumpers. + * If not called the dumpers will be flushed within each tx burst. + */ +static void +eth_dev_stop(struct rte_eth_dev *dev) +{ + unsigned i; + struct pmd_internals *internals = dev->data->dev_private; + struct pcap_tx_queue *tx; + struct pcap_rx_queue *rx; + + /* Special iface case. Single pcap is open and shared between tx/rx. */ + if (internals->single_iface) { + tx = &internals->tx_queue[0]; + rx = &internals->rx_queue[0]; + pcap_close(tx->pcap); + tx->pcap = NULL; + rx->pcap = NULL; + goto status_down; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + tx = &internals->tx_queue[i]; + + if (tx->dumper != NULL) { + pcap_dump_close(tx->dumper); + tx->dumper = NULL; + } + + if (tx->pcap != NULL) { + pcap_close(tx->pcap); + tx->pcap = NULL; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rx = &internals->rx_queue[i]; + + if (rx->pcap != NULL) { + pcap_close(rx->pcap); + rx->pcap = NULL; + } + } + +status_down: + dev->data->dev_link.link_status = ETH_LINK_DOWN; +} + +static int +eth_dev_configure(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static void +eth_dev_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct pmd_internals *internals = dev->data->dev_private; + dev_info->driver_name = drivername; + dev_info->if_index = internals->if_index; + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = (uint32_t) -1; + dev_info->max_rx_queues = dev->data->nb_rx_queues; + dev_info->max_tx_queues = dev->data->nb_tx_queues; + dev_info->min_rx_bufsize = 0; + dev_info->pci_dev = NULL; +} + +static void +eth_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *igb_stats) +{ + unsigned i; + unsigned long rx_packets_total = 0, rx_bytes_total = 0; + unsigned long tx_packets_total = 0, tx_bytes_total = 0; + unsigned long tx_packets_err_total = 0; + const struct pmd_internals *internal = dev->data->dev_private; + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && + i < dev->data->nb_rx_queues; i++) { + igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts; + igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes; + rx_packets_total += igb_stats->q_ipackets[i]; + rx_bytes_total += igb_stats->q_ibytes[i]; + } + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && + i < dev->data->nb_tx_queues; i++) { + igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts; + igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes; + igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts; + tx_packets_total += igb_stats->q_opackets[i]; + tx_bytes_total += igb_stats->q_obytes[i]; + tx_packets_err_total += igb_stats->q_errors[i]; + } + + igb_stats->ipackets = rx_packets_total; + igb_stats->ibytes = rx_bytes_total; + igb_stats->opackets = tx_packets_total; + igb_stats->obytes = tx_bytes_total; + igb_stats->oerrors = tx_packets_err_total; +} + +static void +eth_stats_reset(struct rte_eth_dev *dev) +{ + unsigned i; + struct pmd_internals *internal = dev->data->dev_private; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + internal->rx_queue[i].rx_pkts = 0; + internal->rx_queue[i].rx_bytes = 0; + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + internal->tx_queue[i].tx_pkts = 0; + internal->tx_queue[i].tx_bytes = 0; + internal->tx_queue[i].err_pkts = 0; + } +} + +static void +eth_dev_close(struct rte_eth_dev *dev __rte_unused) +{ +} + +static void +eth_queue_release(void *q __rte_unused) +{ +} + +static int +eth_link_update(struct rte_eth_dev *dev __rte_unused, + int wait_to_complete __rte_unused) +{ + return 0; +} + +static int +eth_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id]; + pcap_q->mb_pool = mb_pool; + dev->data->rx_queues[rx_queue_id] = pcap_q; + pcap_q->in_port = dev->data->port_id; + return 0; +} + +static int +eth_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + + struct pmd_internals *internals = dev->data->dev_private; + dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id]; + return 0; +} + +static const struct eth_dev_ops ops = { + .dev_start = eth_dev_start, + .dev_stop = eth_dev_stop, + .dev_close = eth_dev_close, + .dev_configure = eth_dev_configure, + .dev_infos_get = eth_dev_info, + .rx_queue_setup = eth_rx_queue_setup, + .tx_queue_setup = eth_tx_queue_setup, + .rx_queue_release = eth_queue_release, + .tx_queue_release = eth_queue_release, + .link_update = eth_link_update, + .stats_get = eth_stats_get, + .stats_reset = eth_stats_reset, +}; + +/* + * Function handler that opens the pcap file for reading a stores a + * reference of it for use it later on. + */ +static int +open_rx_pcap(const char *key, const char *value, void *extra_args) +{ + unsigned i; + const char *pcap_filename = value; + struct rx_pcaps *pcaps = extra_args; + pcap_t *pcap = NULL; + + for (i = 0; i < pcaps->num_of_rx; i++) { + if (open_single_rx_pcap(pcap_filename, &pcap) < 0) + return -1; + + pcaps->pcaps[i] = pcap; + pcaps->names[i] = pcap_filename; + pcaps->types[i] = key; + } + + return 0; +} + +static int +open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap) +{ + if ((*pcap = pcap_open_offline(pcap_filename, errbuf)) == NULL) { + RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, errbuf); + return -1; + } + return 0; +} + +/* + * Opens a pcap file for writing and stores a reference to it + * for use it later on. + */ +static int +open_tx_pcap(const char *key, const char *value, void *extra_args) +{ + unsigned i; + const char *pcap_filename = value; + struct tx_pcaps *dumpers = extra_args; + pcap_dumper_t *dumper; + + for (i = 0; i < dumpers->num_of_tx; i++) { + if (open_single_tx_pcap(pcap_filename, &dumper) < 0) + return -1; + + dumpers->dumpers[i] = dumper; + dumpers->names[i] = pcap_filename; + dumpers->types[i] = key; + } + + return 0; +} + +static int +open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper) +{ + pcap_t *tx_pcap; + /* + * We need to create a dummy empty pcap_t to use it + * with pcap_dump_open(). We create big enough an Ethernet + * pcap holder. + */ + + if ((tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN)) + == NULL) { + RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n"); + return -1; + } + + /* The dumper is created using the previous pcap_t reference */ + if ((*dumper = pcap_dump_open(tx_pcap, pcap_filename)) == NULL) { + RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", pcap_filename); + return -1; + } + + return 0; +} + +/* + * pcap_open_live wrapper function + */ +static inline int +open_iface_live(const char *iface, pcap_t **pcap) { + *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN, + RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf); + + if (*pcap == NULL) { + RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf); + return -1; + } + return 0; +} + +/* + * Opens an interface for reading and writing + */ +static inline int +open_rx_tx_iface(const char *key, const char *value, void *extra_args) +{ + const char *iface = value; + struct rx_pcaps *pcaps = extra_args; + pcap_t *pcap = NULL; + + if (open_single_iface(iface, &pcap) < 0) + return -1; + + pcaps->pcaps[0] = pcap; + pcaps->names[0] = iface; + pcaps->types[0] = key; + + return 0; +} + +/* + * Opens a NIC for reading packets from it + */ +static inline int +open_rx_iface(const char *key, const char *value, void *extra_args) +{ + unsigned i; + const char *iface = value; + struct rx_pcaps *pcaps = extra_args; + pcap_t *pcap = NULL; + + for (i = 0; i < pcaps->num_of_rx; i++) { + if (open_single_iface(iface, &pcap) < 0) + return -1; + pcaps->pcaps[i] = pcap; + pcaps->names[i] = iface; + pcaps->types[i] = key; + } + + return 0; +} + +/* + * Opens a NIC for writing packets to it + */ +static int +open_tx_iface(const char *key, const char *value, void *extra_args) +{ + unsigned i; + const char *iface = value; + struct tx_pcaps *pcaps = extra_args; + pcap_t *pcap; + + for (i = 0; i < pcaps->num_of_tx; i++) { + if (open_single_iface(iface, &pcap) < 0) + return -1; + pcaps->pcaps[i] = pcap; + pcaps->names[i] = iface; + pcaps->types[i] = key; + } + + return 0; +} + +static int +open_single_iface(const char *iface, pcap_t **pcap) +{ + if (open_iface_live(iface, pcap) < 0) { + RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface); + return -1; + } + + return 0; +} + +static int +rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues, + const unsigned nb_tx_queues, + const unsigned numa_node, + struct pmd_internals **internals, + struct rte_eth_dev **eth_dev, + struct rte_kvargs *kvlist) +{ + struct rte_eth_dev_data *data = NULL; + unsigned k_idx; + struct rte_kvargs_pair *pair = NULL; + + for (k_idx = 0; k_idx < kvlist->count; k_idx++) { + pair = &kvlist->pairs[k_idx]; + if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL) + break; + } + + RTE_LOG(INFO, PMD, + "Creating pcap-backed ethdev on numa socket %u\n", numa_node); + + /* now do all data allocation - for eth_dev structure + * and internal (private) data + */ + data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); + if (data == NULL) + goto error; + + *internals = rte_zmalloc_socket(name, sizeof(**internals), 0, numa_node); + if (*internals == NULL) + goto error; + + /* reserve an ethdev entry */ + *eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL); + if (*eth_dev == NULL) + goto error; + + /* check length of device name */ + if ((strlen((*eth_dev)->data->name) + 1) > sizeof(data->name)) + goto error; + + /* now put it all together + * - store queue data in internals, + * - store numa_node info in eth_dev + * - point eth_dev_data to internals + * - and point eth_dev structure to new eth_dev_data structure + */ + /* NOTE: we'll replace the data element, of originally allocated eth_dev + * so the rings are local per-process */ + + if (pair == NULL) + (*internals)->if_index = 0; + else + (*internals)->if_index = if_nametoindex(pair->value); + + data->dev_private = *internals; + data->port_id = (*eth_dev)->data->port_id; + snprintf(data->name, sizeof(data->name), "%s", (*eth_dev)->data->name); + data->nb_rx_queues = (uint16_t)nb_rx_queues; + data->nb_tx_queues = (uint16_t)nb_tx_queues; + data->dev_link = pmd_link; + data->mac_addrs = ð_addr; + strncpy(data->name, + (*eth_dev)->data->name, strlen((*eth_dev)->data->name)); + + (*eth_dev)->data = data; + (*eth_dev)->dev_ops = &ops; + (*eth_dev)->driver = NULL; + data->dev_flags = RTE_ETH_DEV_DETACHABLE; + data->kdrv = RTE_KDRV_NONE; + data->drv_name = drivername; + data->numa_node = numa_node; + + return 0; + +error: + rte_free(data); + rte_free(*internals); + + return -1; +} + +static int +rte_eth_from_pcaps_common(const char *name, struct rx_pcaps *rx_queues, + const unsigned nb_rx_queues, struct tx_pcaps *tx_queues, + const unsigned nb_tx_queues, const unsigned numa_node, + struct rte_kvargs *kvlist, struct pmd_internals **internals, + struct rte_eth_dev **eth_dev) +{ + unsigned i; + + /* do some parameter checking */ + if (rx_queues == NULL && nb_rx_queues > 0) + return -1; + if (tx_queues == NULL && nb_tx_queues > 0) + return -1; + + if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node, + internals, eth_dev, kvlist) < 0) + return -1; + + for (i = 0; i < nb_rx_queues; i++) { + (*internals)->rx_queue[i].pcap = rx_queues->pcaps[i]; + snprintf((*internals)->rx_queue[i].name, + sizeof((*internals)->rx_queue[i].name), "%s", + rx_queues->names[i]); + snprintf((*internals)->rx_queue[i].type, + sizeof((*internals)->rx_queue[i].type), "%s", + rx_queues->types[i]); + } + for (i = 0; i < nb_tx_queues; i++) { + (*internals)->tx_queue[i].dumper = tx_queues->dumpers[i]; + snprintf((*internals)->tx_queue[i].name, + sizeof((*internals)->tx_queue[i].name), "%s", + tx_queues->names[i]); + snprintf((*internals)->tx_queue[i].type, + sizeof((*internals)->tx_queue[i].type), "%s", + tx_queues->types[i]); + } + + return 0; +} + +static int +rte_eth_from_pcaps_n_dumpers(const char *name, + struct rx_pcaps *rx_queues, + const unsigned nb_rx_queues, + struct tx_pcaps *tx_queues, + const unsigned nb_tx_queues, + const unsigned numa_node, + struct rte_kvargs *kvlist) +{ + struct pmd_internals *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + int ret; + + ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues, + tx_queues, nb_tx_queues, numa_node, kvlist, + &internals, ð_dev); + + if (ret < 0) + return ret; + + /* using multiple pcaps/interfaces */ + internals->single_iface = 0; + + eth_dev->rx_pkt_burst = eth_pcap_rx; + eth_dev->tx_pkt_burst = eth_pcap_tx_dumper; + + return 0; +} + +static int +rte_eth_from_pcaps(const char *name, + struct rx_pcaps *rx_queues, + const unsigned nb_rx_queues, + struct tx_pcaps *tx_queues, + const unsigned nb_tx_queues, + const unsigned numa_node, + struct rte_kvargs *kvlist, + int single_iface) +{ + struct pmd_internals *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + int ret; + + ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues, + tx_queues, nb_tx_queues, numa_node, kvlist, + &internals, ð_dev); + + if (ret < 0) + return ret; + + /* store wether we are using a single interface for rx/tx or not */ + internals->single_iface = single_iface; + + eth_dev->rx_pkt_burst = eth_pcap_rx; + eth_dev->tx_pkt_burst = eth_pcap_tx; + + return 0; +} + + +static int +rte_pmd_pcap_devinit(const char *name, const char *params) +{ + unsigned numa_node, using_dumpers = 0; + int ret; + struct rte_kvargs *kvlist; + struct rx_pcaps pcaps; + struct tx_pcaps dumpers; + + RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name); + + numa_node = rte_socket_id(); + + gettimeofday(&start_time, NULL); + start_cycles = rte_get_timer_cycles(); + hz = rte_get_timer_hz(); + + kvlist = rte_kvargs_parse(params, valid_arguments); + if (kvlist == NULL) + return -1; + + /* + * If iface argument is passed we open the NICs and use them for + * reading / writing + */ + if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) { + + ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG, + &open_rx_tx_iface, &pcaps); + if (ret < 0) + goto free_kvlist; + dumpers.pcaps[0] = pcaps.pcaps[0]; + dumpers.names[0] = pcaps.names[0]; + dumpers.types[0] = pcaps.types[0]; + ret = rte_eth_from_pcaps(name, &pcaps, 1, &dumpers, 1, + numa_node, kvlist, 1); + goto free_kvlist; + } + + /* + * We check whether we want to open a RX stream from a real NIC or a + * pcap file + */ + if ((pcaps.num_of_rx = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG))) { + ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG, + &open_rx_pcap, &pcaps); + } else { + pcaps.num_of_rx = rte_kvargs_count(kvlist, + ETH_PCAP_RX_IFACE_ARG); + ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG, + &open_rx_iface, &pcaps); + } + + if (ret < 0) + goto free_kvlist; + + /* + * We check whether we want to open a TX stream to a real NIC or a + * pcap file + */ + if ((dumpers.num_of_tx = rte_kvargs_count(kvlist, + ETH_PCAP_TX_PCAP_ARG))) { + ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG, + &open_tx_pcap, &dumpers); + using_dumpers = 1; + } else { + dumpers.num_of_tx = rte_kvargs_count(kvlist, + ETH_PCAP_TX_IFACE_ARG); + ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG, + &open_tx_iface, &dumpers); + } + + if (ret < 0) + goto free_kvlist; + + if (using_dumpers) + ret = rte_eth_from_pcaps_n_dumpers(name, &pcaps, pcaps.num_of_rx, + &dumpers, dumpers.num_of_tx, numa_node, kvlist); + else + ret = rte_eth_from_pcaps(name, &pcaps, pcaps.num_of_rx, &dumpers, + dumpers.num_of_tx, numa_node, kvlist, 0); + +free_kvlist: + rte_kvargs_free(kvlist); + return ret; +} + +static int +rte_pmd_pcap_devuninit(const char *name) +{ + struct rte_eth_dev *eth_dev = NULL; + + RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %u\n", + rte_socket_id()); + + if (name == NULL) + return -1; + + /* reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev == NULL) + return -1; + + rte_free(eth_dev->data->dev_private); + rte_free(eth_dev->data); + + rte_eth_dev_release_port(eth_dev); + + return 0; +} + +static struct rte_driver pmd_pcap_drv = { + .name = "eth_pcap", + .type = PMD_VDEV, + .init = rte_pmd_pcap_devinit, + .uninit = rte_pmd_pcap_devuninit, +}; + +PMD_REGISTER_DRIVER(pmd_pcap_drv); diff --git a/drivers/net/pcap/rte_pmd_pcap_version.map b/drivers/net/pcap/rte_pmd_pcap_version.map new file mode 100644 index 00000000..ef353984 --- /dev/null +++ b/drivers/net/pcap/rte_pmd_pcap_version.map @@ -0,0 +1,4 @@ +DPDK_2.0 { + + local: *; +}; diff --git a/drivers/net/ring/Makefile b/drivers/net/ring/Makefile new file mode 100644 index 00000000..ae835052 --- /dev/null +++ b/drivers/net/ring/Makefile @@ -0,0 +1,61 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_ring.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_eth_ring_version.map + +LIBABIVER := 2 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += rte_eth_ring.c + +# +# Export include files +# +SYMLINK-y-include += rte_eth_ring.h + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_eal lib/librte_ring +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_mbuf lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_kvargs + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c new file mode 100644 index 00000000..b1783c3e --- /dev/null +++ b/drivers/net/ring/rte_eth_ring.c @@ -0,0 +1,633 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "rte_eth_ring.h" +#include <rte_mbuf.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> +#include <rte_memzone.h> +#include <rte_string_fns.h> +#include <rte_dev.h> +#include <rte_kvargs.h> +#include <rte_errno.h> + +#define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction" +#define ETH_RING_ACTION_CREATE "CREATE" +#define ETH_RING_ACTION_ATTACH "ATTACH" + +static const char *valid_arguments[] = { + ETH_RING_NUMA_NODE_ACTION_ARG, + NULL +}; + +enum dev_action { + DEV_CREATE, + DEV_ATTACH +}; + +struct ring_queue { + struct rte_ring *rng; + rte_atomic64_t rx_pkts; + rte_atomic64_t tx_pkts; + rte_atomic64_t err_pkts; +}; + +struct pmd_internals { + unsigned max_rx_queues; + unsigned max_tx_queues; + + struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS]; + struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS]; + + struct ether_addr address; + enum dev_action action; +}; + + +static const char *drivername = "Rings PMD"; +static struct rte_eth_link pmd_link = { + .link_speed = ETH_SPEED_NUM_10G, + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_status = ETH_LINK_DOWN, + .link_autoneg = ETH_LINK_SPEED_AUTONEG +}; + +static uint16_t +eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + void **ptrs = (void *)&bufs[0]; + struct ring_queue *r = q; + const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng, + ptrs, nb_bufs); + if (r->rng->flags & RING_F_SC_DEQ) + r->rx_pkts.cnt += nb_rx; + else + rte_atomic64_add(&(r->rx_pkts), nb_rx); + return nb_rx; +} + +static uint16_t +eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + void **ptrs = (void *)&bufs[0]; + struct ring_queue *r = q; + const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng, + ptrs, nb_bufs); + if (r->rng->flags & RING_F_SP_ENQ) { + r->tx_pkts.cnt += nb_tx; + r->err_pkts.cnt += nb_bufs - nb_tx; + } else { + rte_atomic64_add(&(r->tx_pkts), nb_tx); + rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx); + } + return nb_tx; +} + +static int +eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; } + +static int +eth_dev_start(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = ETH_LINK_UP; + return 0; +} + +static void +eth_dev_stop(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = ETH_LINK_DOWN; +} + +static int +eth_dev_set_link_down(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = ETH_LINK_DOWN; + return 0; +} + +static int +eth_dev_set_link_up(struct rte_eth_dev *dev) +{ + dev->data->dev_link.link_status = ETH_LINK_UP; + return 0; +} + +static int +eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool __rte_unused) +{ + struct pmd_internals *internals = dev->data->dev_private; + dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id]; + return 0; +} + +static int +eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct pmd_internals *internals = dev->data->dev_private; + dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id]; + return 0; +} + + +static void +eth_dev_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct pmd_internals *internals = dev->data->dev_private; + dev_info->driver_name = drivername; + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = (uint32_t)-1; + dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues; + dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues; + dev_info->min_rx_bufsize = 0; + dev_info->pci_dev = NULL; +} + +static void +eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + unsigned i; + unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0; + const struct pmd_internals *internal = dev->data->dev_private; + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && + i < dev->data->nb_rx_queues; i++) { + stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt; + rx_total += stats->q_ipackets[i]; + } + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && + i < dev->data->nb_tx_queues; i++) { + stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt; + stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt; + tx_total += stats->q_opackets[i]; + tx_err_total += stats->q_errors[i]; + } + + stats->ipackets = rx_total; + stats->opackets = tx_total; + stats->oerrors = tx_err_total; +} + +static void +eth_stats_reset(struct rte_eth_dev *dev) +{ + unsigned i; + struct pmd_internals *internal = dev->data->dev_private; + for (i = 0; i < dev->data->nb_rx_queues; i++) + internal->rx_ring_queues[i].rx_pkts.cnt = 0; + for (i = 0; i < dev->data->nb_tx_queues; i++) { + internal->tx_ring_queues[i].tx_pkts.cnt = 0; + internal->tx_ring_queues[i].err_pkts.cnt = 0; + } +} + +static void +eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused, + uint32_t index __rte_unused) +{ +} + +static void +eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused, + struct ether_addr *mac_addr __rte_unused, + uint32_t index __rte_unused, + uint32_t vmdq __rte_unused) +{ +} + +static void +eth_queue_release(void *q __rte_unused) { ; } +static int +eth_link_update(struct rte_eth_dev *dev __rte_unused, + int wait_to_complete __rte_unused) { return 0; } + +static const struct eth_dev_ops ops = { + .dev_start = eth_dev_start, + .dev_stop = eth_dev_stop, + .dev_set_link_up = eth_dev_set_link_up, + .dev_set_link_down = eth_dev_set_link_down, + .dev_configure = eth_dev_configure, + .dev_infos_get = eth_dev_info, + .rx_queue_setup = eth_rx_queue_setup, + .tx_queue_setup = eth_tx_queue_setup, + .rx_queue_release = eth_queue_release, + .tx_queue_release = eth_queue_release, + .link_update = eth_link_update, + .stats_get = eth_stats_get, + .stats_reset = eth_stats_reset, + .mac_addr_remove = eth_mac_addr_remove, + .mac_addr_add = eth_mac_addr_add, +}; + +static int +do_eth_dev_ring_create(const char *name, + struct rte_ring * const rx_queues[], const unsigned nb_rx_queues, + struct rte_ring *const tx_queues[], const unsigned nb_tx_queues, + const unsigned numa_node, enum dev_action action) +{ + struct rte_eth_dev_data *data = NULL; + struct pmd_internals *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + unsigned i; + + RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n", + numa_node); + + /* now do all data allocation - for eth_dev structure, dummy pci driver + * and internal (private) data + */ + data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); + if (data == NULL) { + rte_errno = ENOMEM; + goto error; + } + + data->rx_queues = rte_zmalloc_socket(name, + sizeof(void *) * nb_rx_queues, 0, numa_node); + if (data->rx_queues == NULL) { + rte_errno = ENOMEM; + goto error; + } + + data->tx_queues = rte_zmalloc_socket(name, + sizeof(void *) * nb_tx_queues, 0, numa_node); + if (data->tx_queues == NULL) { + rte_errno = ENOMEM; + goto error; + } + + internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node); + if (internals == NULL) { + rte_errno = ENOMEM; + goto error; + } + + /* reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL); + if (eth_dev == NULL) { + rte_errno = ENOSPC; + goto error; + } + + /* now put it all together + * - store queue data in internals, + * - store numa_node info in eth_dev_data + * - point eth_dev_data to internals + * - and point eth_dev structure to new eth_dev_data structure + */ + /* NOTE: we'll replace the data element, of originally allocated eth_dev + * so the rings are local per-process */ + + internals->action = action; + internals->max_rx_queues = nb_rx_queues; + internals->max_tx_queues = nb_tx_queues; + for (i = 0; i < nb_rx_queues; i++) { + internals->rx_ring_queues[i].rng = rx_queues[i]; + data->rx_queues[i] = &internals->rx_ring_queues[i]; + } + for (i = 0; i < nb_tx_queues; i++) { + internals->tx_ring_queues[i].rng = tx_queues[i]; + data->tx_queues[i] = &internals->tx_ring_queues[i]; + } + + data->dev_private = internals; + data->port_id = eth_dev->data->port_id; + memmove(data->name, eth_dev->data->name, sizeof(data->name)); + data->nb_rx_queues = (uint16_t)nb_rx_queues; + data->nb_tx_queues = (uint16_t)nb_tx_queues; + data->dev_link = pmd_link; + data->mac_addrs = &internals->address; + + eth_dev->data = data; + eth_dev->driver = NULL; + eth_dev->dev_ops = &ops; + data->dev_flags = RTE_ETH_DEV_DETACHABLE; + data->kdrv = RTE_KDRV_NONE; + data->drv_name = drivername; + data->numa_node = numa_node; + + TAILQ_INIT(&(eth_dev->link_intr_cbs)); + + /* finally assign rx and tx ops */ + eth_dev->rx_pkt_burst = eth_ring_rx; + eth_dev->tx_pkt_burst = eth_ring_tx; + + return data->port_id; + +error: + if (data) { + rte_free(data->rx_queues); + rte_free(data->tx_queues); + } + rte_free(data); + rte_free(internals); + + return -1; +} + +int +rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[], + const unsigned nb_rx_queues, + struct rte_ring *const tx_queues[], + const unsigned nb_tx_queues, + const unsigned numa_node) +{ + /* do some parameter checking */ + if (rx_queues == NULL && nb_rx_queues > 0) { + rte_errno = EINVAL; + return -1; + } + if (tx_queues == NULL && nb_tx_queues > 0) { + rte_errno = EINVAL; + return -1; + } + if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) { + rte_errno = EINVAL; + return -1; + } + + return do_eth_dev_ring_create(name, rx_queues, nb_rx_queues, + tx_queues, nb_tx_queues, numa_node, DEV_ATTACH); +} + +int +rte_eth_from_ring(struct rte_ring *r) +{ + return rte_eth_from_rings(r->name, &r, 1, &r, 1, + r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY); +} + +static int +eth_dev_ring_create(const char *name, const unsigned numa_node, + enum dev_action action) +{ + /* rx and tx are so-called from point of view of first port. + * They are inverted from the point of view of second port + */ + struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS]; + unsigned i; + char rng_name[RTE_RING_NAMESIZE]; + unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS, + RTE_PMD_RING_MAX_TX_RINGS); + + for (i = 0; i < num_rings; i++) { + snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name); + rxtx[i] = (action == DEV_CREATE) ? + rte_ring_create(rng_name, 1024, numa_node, + RING_F_SP_ENQ|RING_F_SC_DEQ) : + rte_ring_lookup(rng_name); + if (rxtx[i] == NULL) + return -1; + } + + if (do_eth_dev_ring_create(name, rxtx, num_rings, rxtx, num_rings, + numa_node, action) < 0) + return -1; + + return 0; +} + +struct node_action_pair { + char name[PATH_MAX]; + unsigned node; + enum dev_action action; +}; + +struct node_action_list { + unsigned total; + unsigned count; + struct node_action_pair *list; +}; + +static int parse_kvlist (const char *key __rte_unused, const char *value, void *data) +{ + struct node_action_list *info = data; + int ret; + char *name; + char *action; + char *node; + char *end; + + name = strdup(value); + + ret = -EINVAL; + + if (!name) { + RTE_LOG(WARNING, PMD, "command line paramter is empty for ring pmd!\n"); + goto out; + } + + node = strchr(name, ':'); + if (!node) { + RTE_LOG(WARNING, PMD, "could not parse node value from %s", name); + goto out; + } + + *node = '\0'; + node++; + + action = strchr(node, ':'); + if (!action) { + RTE_LOG(WARNING, PMD, "could not action value from %s", node); + goto out; + } + + *action = '\0'; + action++; + + /* + * Need to do some sanity checking here + */ + + if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0) + info->list[info->count].action = DEV_ATTACH; + else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0) + info->list[info->count].action = DEV_CREATE; + else + goto out; + + errno = 0; + info->list[info->count].node = strtol(node, &end, 10); + + if ((errno != 0) || (*end != '\0')) { + RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node); + goto out; + } + + snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name); + + info->count++; + + ret = 0; +out: + free(name); + return ret; +} + +static int +rte_pmd_ring_devinit(const char *name, const char *params) +{ + struct rte_kvargs *kvlist = NULL; + int ret = 0; + struct node_action_list *info = NULL; + + RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name); + + if (params == NULL || params[0] == '\0') { + ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE); + if (ret == -1) { + RTE_LOG(INFO, PMD, + "Attach to pmd_ring for %s\n", name); + ret = eth_dev_ring_create(name, rte_socket_id(), + DEV_ATTACH); + } + } + else { + kvlist = rte_kvargs_parse(params, valid_arguments); + + if (!kvlist) { + RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating" + " rings-backed ethernet device\n"); + ret = eth_dev_ring_create(name, rte_socket_id(), + DEV_CREATE); + if (ret == -1) { + RTE_LOG(INFO, PMD, + "Attach to pmd_ring for %s\n", + name); + ret = eth_dev_ring_create(name, rte_socket_id(), + DEV_ATTACH); + } + return ret; + } else { + ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG); + info = rte_zmalloc("struct node_action_list", + sizeof(struct node_action_list) + + (sizeof(struct node_action_pair) * ret), + 0); + if (!info) + goto out_free; + + info->total = ret; + info->list = (struct node_action_pair*)(info + 1); + + ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG, + parse_kvlist, info); + + if (ret < 0) + goto out_free; + + for (info->count = 0; info->count < info->total; info->count++) { + ret = eth_dev_ring_create(name, + info->list[info->count].node, + info->list[info->count].action); + if ((ret == -1) && + (info->list[info->count].action == DEV_CREATE)) { + RTE_LOG(INFO, PMD, + "Attach to pmd_ring for %s\n", + name); + ret = eth_dev_ring_create(name, + info->list[info->count].node, + DEV_ATTACH); + } + } + } + } + +out_free: + rte_kvargs_free(kvlist); + rte_free(info); + return ret; +} + +static int +rte_pmd_ring_devuninit(const char *name) +{ + struct rte_eth_dev *eth_dev = NULL; + struct pmd_internals *internals = NULL; + struct ring_queue *r = NULL; + uint16_t i; + + RTE_LOG(INFO, PMD, "Un-Initializing pmd_ring for %s\n", name); + + if (name == NULL) + return -EINVAL; + + /* find an ethdev entry */ + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev == NULL) + return -ENODEV; + + eth_dev_stop(eth_dev); + + if (eth_dev->data) { + internals = eth_dev->data->dev_private; + if (internals->action == DEV_CREATE) { + /* + * it is only necessary to delete the rings in rx_queues because + * they are the same used in tx_queues + */ + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + r = eth_dev->data->rx_queues[i]; + rte_ring_free(r->rng); + } + } + + rte_free(eth_dev->data->rx_queues); + rte_free(eth_dev->data->tx_queues); + rte_free(eth_dev->data->dev_private); + } + + rte_free(eth_dev->data); + + rte_eth_dev_release_port(eth_dev); + return 0; +} + +static struct rte_driver pmd_ring_drv = { + .name = "eth_ring", + .type = PMD_VDEV, + .init = rte_pmd_ring_devinit, + .uninit = rte_pmd_ring_devuninit, +}; + +PMD_REGISTER_DRIVER(pmd_ring_drv); diff --git a/drivers/net/ring/rte_eth_ring.h b/drivers/net/ring/rte_eth_ring.h new file mode 100644 index 00000000..4ff83eca --- /dev/null +++ b/drivers/net/ring/rte_eth_ring.h @@ -0,0 +1,86 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_ETH_RING_H_ +#define _RTE_ETH_RING_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include <rte_ring.h> + +/** + * Create a new ethdev port from a set of rings + * + * @param name + * name to be given to the new ethdev port + * @param rx_queues + * pointer to array of rte_rings to be used as RX queues + * @param nb_rx_queues + * number of elements in the rx_queues array + * @param tx_queues + * pointer to array of rte_rings to be used as TX queues + * @param nb_tx_queues + * number of elements in the tx_queues array + * @param numa_node + * the numa node on which the memory for this port is to be allocated + * @return + * the port number of the newly created the ethdev or -1 on error. + */ +int rte_eth_from_rings(const char *name, + struct rte_ring * const rx_queues[], + const unsigned nb_rx_queues, + struct rte_ring *const tx_queues[], + const unsigned nb_tx_queues, + const unsigned numa_node); + +/** + * Create a new ethdev port from a ring + * + * This function is a shortcut call for rte_eth_from_rings for the + * case where one wants to take a single rte_ring and use it as though + * it were an ethdev + * + * @param ring + * the ring to be used as an ethdev + * @return + * the port number of the newly created ethdev, or -1 on error + */ +int rte_eth_from_ring(struct rte_ring *r); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ring/rte_eth_ring_version.map b/drivers/net/ring/rte_eth_ring_version.map new file mode 100644 index 00000000..1f785d94 --- /dev/null +++ b/drivers/net/ring/rte_eth_ring_version.map @@ -0,0 +1,14 @@ +DPDK_2.0 { + global: + + rte_eth_from_rings; + + local: *; +}; + +DPDK_2.2 { + global: + + rte_eth_from_ring; + +} DPDK_2.0; diff --git a/drivers/net/szedata2/Makefile b/drivers/net/szedata2/Makefile new file mode 100644 index 00000000..963a8d67 --- /dev/null +++ b/drivers/net/szedata2/Makefile @@ -0,0 +1,63 @@ +# BSD LICENSE +# +# Copyright (c) 2015 CESNET +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of CESNET nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_szedata2.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lsze2 + +EXPORT_MAP := rte_pmd_szedata2_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += rte_eth_szedata2.c + +# +# Export include files +# +SYMLINK-y-include += + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_malloc +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_kvargs + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c new file mode 100644 index 00000000..78c43b0c --- /dev/null +++ b/drivers/net/szedata2/rte_eth_szedata2.c @@ -0,0 +1,1604 @@ +/*- + * BSD LICENSE + * + * Copyright (c) 2015 - 2016 CESNET + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of CESNET nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <unistd.h> +#include <stdbool.h> +#include <err.h> +#include <sys/types.h> +#include <dirent.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <sys/mman.h> + +#include <libsze2.h> + +#include <rte_mbuf.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> +#include <rte_kvargs.h> +#include <rte_dev.h> +#include <rte_atomic.h> + +#include "rte_eth_szedata2.h" + +#define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32 +#define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32 +#define RTE_ETH_SZEDATA2_TX_LOCK_SIZE (32 * 1024 * 1024) + +/** + * size of szedata2_packet header with alignment + */ +#define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8 + +#define RTE_SZEDATA2_DRIVER_NAME "rte_szedata2_pmd" +#define RTE_SZEDATA2_PCI_DRIVER_NAME "rte_szedata2_pmd" + +#define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u" + +struct szedata2_rx_queue { + struct szedata *sze; + uint8_t rx_channel; + uint8_t in_port; + struct rte_mempool *mb_pool; + volatile uint64_t rx_pkts; + volatile uint64_t rx_bytes; + volatile uint64_t err_pkts; +}; + +struct szedata2_tx_queue { + struct szedata *sze; + uint8_t tx_channel; + volatile uint64_t tx_pkts; + volatile uint64_t tx_bytes; + volatile uint64_t err_pkts; +}; + +struct pmd_internals { + struct szedata2_rx_queue rx_queue[RTE_ETH_SZEDATA2_MAX_RX_QUEUES]; + struct szedata2_tx_queue tx_queue[RTE_ETH_SZEDATA2_MAX_TX_QUEUES]; + uint16_t max_rx_queues; + uint16_t max_tx_queues; + char sze_dev[PATH_MAX]; +}; + +static struct ether_addr eth_addr = { + .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 } +}; + +static uint16_t +eth_szedata2_rx(void *queue, + struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + unsigned int i; + struct rte_mbuf *mbuf; + struct szedata2_rx_queue *sze_q = queue; + struct rte_pktmbuf_pool_private *mbp_priv; + uint16_t num_rx = 0; + uint16_t buf_size; + uint16_t sg_size; + uint16_t hw_size; + uint16_t packet_size; + uint64_t num_bytes = 0; + struct szedata *sze = sze_q->sze; + uint8_t *header_ptr = NULL; /* header of packet */ + uint8_t *packet_ptr1 = NULL; + uint8_t *packet_ptr2 = NULL; + uint16_t packet_len1 = 0; + uint16_t packet_len2 = 0; + uint16_t hw_data_align; + + if (unlikely(sze_q->sze == NULL || nb_pkts == 0)) + return 0; + + /* + * Reads the given number of packets from szedata2 channel given + * by queue and copies the packet data into a newly allocated mbuf + * to return. + */ + for (i = 0; i < nb_pkts; i++) { + mbuf = rte_pktmbuf_alloc(sze_q->mb_pool); + + if (unlikely(mbuf == NULL)) + break; + + /* get the next sze packet */ + if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes && + sze->ct_rx_lck->next == NULL) { + /* unlock old data */ + szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig); + sze->ct_rx_lck_orig = NULL; + sze->ct_rx_lck = NULL; + } + + if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) { + /* nothing to read, lock new data */ + sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U); + sze->ct_rx_lck_orig = sze->ct_rx_lck; + + if (sze->ct_rx_lck == NULL) { + /* nothing to lock */ + rte_pktmbuf_free(mbuf); + break; + } + + sze->ct_rx_cur_ptr = sze->ct_rx_lck->start; + sze->ct_rx_rem_bytes = sze->ct_rx_lck->len; + + if (!sze->ct_rx_rem_bytes) { + rte_pktmbuf_free(mbuf); + break; + } + } + + if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) { + /* + * cut in header + * copy parts of header to merge buffer + */ + if (sze->ct_rx_lck->next == NULL) { + rte_pktmbuf_free(mbuf); + break; + } + + /* copy first part of header */ + rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr, + sze->ct_rx_rem_bytes); + + /* copy second part of header */ + sze->ct_rx_lck = sze->ct_rx_lck->next; + sze->ct_rx_cur_ptr = sze->ct_rx_lck->start; + rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes, + sze->ct_rx_cur_ptr, + RTE_SZE2_PACKET_HEADER_SIZE - + sze->ct_rx_rem_bytes); + + sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE - + sze->ct_rx_rem_bytes; + sze->ct_rx_rem_bytes = sze->ct_rx_lck->len - + RTE_SZE2_PACKET_HEADER_SIZE + + sze->ct_rx_rem_bytes; + + header_ptr = (uint8_t *)sze->ct_rx_buffer; + } else { + /* not cut */ + header_ptr = (uint8_t *)sze->ct_rx_cur_ptr; + sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE; + sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE; + } + + sg_size = le16toh(*((uint16_t *)header_ptr)); + hw_size = le16toh(*(((uint16_t *)header_ptr) + 1)); + packet_size = sg_size - + RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size); + + + /* checks if packet all right */ + if (!sg_size) + errx(5, "Zero segsize"); + + /* check sg_size and hwsize */ + if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) { + errx(10, "Hwsize bigger than expected. Segsize: %d, " + "hwsize: %d", sg_size, hw_size); + } + + hw_data_align = + RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size) - + RTE_SZE2_PACKET_HEADER_SIZE; + + if (sze->ct_rx_rem_bytes >= + (uint16_t)(sg_size - + RTE_SZE2_PACKET_HEADER_SIZE)) { + /* no cut */ + /* one packet ready - go to another */ + packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align; + packet_len1 = packet_size; + packet_ptr2 = NULL; + packet_len2 = 0; + + sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) - + RTE_SZE2_PACKET_HEADER_SIZE; + sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) - + RTE_SZE2_PACKET_HEADER_SIZE; + } else { + /* cut in data */ + if (sze->ct_rx_lck->next == NULL) { + errx(6, "Need \"next\" lock, " + "but it is missing: %u", + sze->ct_rx_rem_bytes); + } + + /* skip hw data */ + if (sze->ct_rx_rem_bytes <= hw_data_align) { + uint16_t rem_size = hw_data_align - + sze->ct_rx_rem_bytes; + + /* MOVE to next lock */ + sze->ct_rx_lck = sze->ct_rx_lck->next; + sze->ct_rx_cur_ptr = + (void *)(((uint8_t *) + (sze->ct_rx_lck->start)) + rem_size); + + packet_ptr1 = sze->ct_rx_cur_ptr; + packet_len1 = packet_size; + packet_ptr2 = NULL; + packet_len2 = 0; + + sze->ct_rx_cur_ptr += + RTE_SZE2_ALIGN8(packet_size); + sze->ct_rx_rem_bytes = sze->ct_rx_lck->len - + rem_size - RTE_SZE2_ALIGN8(packet_size); + } else { + /* get pointer and length from first part */ + packet_ptr1 = sze->ct_rx_cur_ptr + + hw_data_align; + packet_len1 = sze->ct_rx_rem_bytes - + hw_data_align; + + /* MOVE to next lock */ + sze->ct_rx_lck = sze->ct_rx_lck->next; + sze->ct_rx_cur_ptr = sze->ct_rx_lck->start; + + /* get pointer and length from second part */ + packet_ptr2 = sze->ct_rx_cur_ptr; + packet_len2 = packet_size - packet_len1; + + sze->ct_rx_cur_ptr += + RTE_SZE2_ALIGN8(packet_size) - + packet_len1; + sze->ct_rx_rem_bytes = sze->ct_rx_lck->len - + (RTE_SZE2_ALIGN8(packet_size) - + packet_len1); + } + } + + if (unlikely(packet_ptr1 == NULL)) { + rte_pktmbuf_free(mbuf); + break; + } + + /* get the space available for data in the mbuf */ + mbp_priv = rte_mempool_get_priv(sze_q->mb_pool); + buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size - + RTE_PKTMBUF_HEADROOM); + + if (packet_size <= buf_size) { + /* sze packet will fit in one mbuf, go ahead and copy */ + rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), + packet_ptr1, packet_len1); + if (packet_ptr2 != NULL) { + rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf, + uint8_t *) + packet_len1), + packet_ptr2, packet_len2); + } + mbuf->data_len = (uint16_t)packet_size; + + mbuf->pkt_len = packet_size; + mbuf->port = sze_q->in_port; + bufs[num_rx] = mbuf; + num_rx++; + num_bytes += packet_size; + } else { + /* + * sze packet will not fit in one mbuf, + * scattered mode is not enabled, drop packet + */ + RTE_LOG(ERR, PMD, + "SZE segment %d bytes will not fit in one mbuf " + "(%d bytes), scattered mode is not enabled, " + "drop packet!!\n", + packet_size, buf_size); + rte_pktmbuf_free(mbuf); + } + } + + sze_q->rx_pkts += num_rx; + sze_q->rx_bytes += num_bytes; + return num_rx; +} + +static uint16_t +eth_szedata2_rx_scattered(void *queue, + struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + unsigned int i; + struct rte_mbuf *mbuf; + struct szedata2_rx_queue *sze_q = queue; + struct rte_pktmbuf_pool_private *mbp_priv; + uint16_t num_rx = 0; + uint16_t buf_size; + uint16_t sg_size; + uint16_t hw_size; + uint16_t packet_size; + uint64_t num_bytes = 0; + struct szedata *sze = sze_q->sze; + uint8_t *header_ptr = NULL; /* header of packet */ + uint8_t *packet_ptr1 = NULL; + uint8_t *packet_ptr2 = NULL; + uint16_t packet_len1 = 0; + uint16_t packet_len2 = 0; + uint16_t hw_data_align; + + if (unlikely(sze_q->sze == NULL || nb_pkts == 0)) + return 0; + + /* + * Reads the given number of packets from szedata2 channel given + * by queue and copies the packet data into a newly allocated mbuf + * to return. + */ + for (i = 0; i < nb_pkts; i++) { + const struct szedata_lock *ct_rx_lck_backup; + unsigned int ct_rx_rem_bytes_backup; + unsigned char *ct_rx_cur_ptr_backup; + + /* get the next sze packet */ + if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes && + sze->ct_rx_lck->next == NULL) { + /* unlock old data */ + szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig); + sze->ct_rx_lck_orig = NULL; + sze->ct_rx_lck = NULL; + } + + /* + * Store items from sze structure which can be changed + * before mbuf allocating. Use these items in case of mbuf + * allocating failure. + */ + ct_rx_lck_backup = sze->ct_rx_lck; + ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes; + ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr; + + if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) { + /* nothing to read, lock new data */ + sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U); + sze->ct_rx_lck_orig = sze->ct_rx_lck; + + /* + * Backup items from sze structure must be updated + * after locking to contain pointers to new locks. + */ + ct_rx_lck_backup = sze->ct_rx_lck; + ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes; + ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr; + + if (sze->ct_rx_lck == NULL) + /* nothing to lock */ + break; + + sze->ct_rx_cur_ptr = sze->ct_rx_lck->start; + sze->ct_rx_rem_bytes = sze->ct_rx_lck->len; + + if (!sze->ct_rx_rem_bytes) + break; + } + + if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) { + /* + * cut in header - copy parts of header to merge buffer + */ + if (sze->ct_rx_lck->next == NULL) + break; + + /* copy first part of header */ + rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr, + sze->ct_rx_rem_bytes); + + /* copy second part of header */ + sze->ct_rx_lck = sze->ct_rx_lck->next; + sze->ct_rx_cur_ptr = sze->ct_rx_lck->start; + rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes, + sze->ct_rx_cur_ptr, + RTE_SZE2_PACKET_HEADER_SIZE - + sze->ct_rx_rem_bytes); + + sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE - + sze->ct_rx_rem_bytes; + sze->ct_rx_rem_bytes = sze->ct_rx_lck->len - + RTE_SZE2_PACKET_HEADER_SIZE + + sze->ct_rx_rem_bytes; + + header_ptr = (uint8_t *)sze->ct_rx_buffer; + } else { + /* not cut */ + header_ptr = (uint8_t *)sze->ct_rx_cur_ptr; + sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE; + sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE; + } + + sg_size = le16toh(*((uint16_t *)header_ptr)); + hw_size = le16toh(*(((uint16_t *)header_ptr) + 1)); + packet_size = sg_size - + RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size); + + + /* checks if packet all right */ + if (!sg_size) + errx(5, "Zero segsize"); + + /* check sg_size and hwsize */ + if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) { + errx(10, "Hwsize bigger than expected. Segsize: %d, " + "hwsize: %d", sg_size, hw_size); + } + + hw_data_align = + RTE_SZE2_ALIGN8((RTE_SZE2_PACKET_HEADER_SIZE + + hw_size)) - RTE_SZE2_PACKET_HEADER_SIZE; + + if (sze->ct_rx_rem_bytes >= + (uint16_t)(sg_size - + RTE_SZE2_PACKET_HEADER_SIZE)) { + /* no cut */ + /* one packet ready - go to another */ + packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align; + packet_len1 = packet_size; + packet_ptr2 = NULL; + packet_len2 = 0; + + sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) - + RTE_SZE2_PACKET_HEADER_SIZE; + sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) - + RTE_SZE2_PACKET_HEADER_SIZE; + } else { + /* cut in data */ + if (sze->ct_rx_lck->next == NULL) { + errx(6, "Need \"next\" lock, but it is " + "missing: %u", sze->ct_rx_rem_bytes); + } + + /* skip hw data */ + if (sze->ct_rx_rem_bytes <= hw_data_align) { + uint16_t rem_size = hw_data_align - + sze->ct_rx_rem_bytes; + + /* MOVE to next lock */ + sze->ct_rx_lck = sze->ct_rx_lck->next; + sze->ct_rx_cur_ptr = + (void *)(((uint8_t *) + (sze->ct_rx_lck->start)) + rem_size); + + packet_ptr1 = sze->ct_rx_cur_ptr; + packet_len1 = packet_size; + packet_ptr2 = NULL; + packet_len2 = 0; + + sze->ct_rx_cur_ptr += + RTE_SZE2_ALIGN8(packet_size); + sze->ct_rx_rem_bytes = sze->ct_rx_lck->len - + rem_size - RTE_SZE2_ALIGN8(packet_size); + } else { + /* get pointer and length from first part */ + packet_ptr1 = sze->ct_rx_cur_ptr + + hw_data_align; + packet_len1 = sze->ct_rx_rem_bytes - + hw_data_align; + + /* MOVE to next lock */ + sze->ct_rx_lck = sze->ct_rx_lck->next; + sze->ct_rx_cur_ptr = sze->ct_rx_lck->start; + + /* get pointer and length from second part */ + packet_ptr2 = sze->ct_rx_cur_ptr; + packet_len2 = packet_size - packet_len1; + + sze->ct_rx_cur_ptr += + RTE_SZE2_ALIGN8(packet_size) - + packet_len1; + sze->ct_rx_rem_bytes = sze->ct_rx_lck->len - + (RTE_SZE2_ALIGN8(packet_size) - + packet_len1); + } + } + + if (unlikely(packet_ptr1 == NULL)) + break; + + mbuf = rte_pktmbuf_alloc(sze_q->mb_pool); + + if (unlikely(mbuf == NULL)) { + /* + * Restore items from sze structure to state after + * unlocking (eventually locking). + */ + sze->ct_rx_lck = ct_rx_lck_backup; + sze->ct_rx_rem_bytes = ct_rx_rem_bytes_backup; + sze->ct_rx_cur_ptr = ct_rx_cur_ptr_backup; + break; + } + + /* get the space available for data in the mbuf */ + mbp_priv = rte_mempool_get_priv(sze_q->mb_pool); + buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size - + RTE_PKTMBUF_HEADROOM); + + if (packet_size <= buf_size) { + /* sze packet will fit in one mbuf, go ahead and copy */ + rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), + packet_ptr1, packet_len1); + if (packet_ptr2 != NULL) { + rte_memcpy((void *) + (rte_pktmbuf_mtod(mbuf, uint8_t *) + + packet_len1), packet_ptr2, packet_len2); + } + mbuf->data_len = (uint16_t)packet_size; + } else { + /* + * sze packet will not fit in one mbuf, + * scatter packet into more mbufs + */ + struct rte_mbuf *m = mbuf; + uint16_t len = rte_pktmbuf_tailroom(mbuf); + + /* copy first part of packet */ + /* fill first mbuf */ + rte_memcpy(rte_pktmbuf_append(mbuf, len), packet_ptr1, + len); + packet_len1 -= len; + packet_ptr1 = ((uint8_t *)packet_ptr1) + len; + + while (packet_len1 > 0) { + /* fill new mbufs */ + m->next = rte_pktmbuf_alloc(sze_q->mb_pool); + + if (unlikely(m->next == NULL)) { + rte_pktmbuf_free(mbuf); + /* + * Restore items from sze structure + * to state after unlocking (eventually + * locking). + */ + sze->ct_rx_lck = ct_rx_lck_backup; + sze->ct_rx_rem_bytes = + ct_rx_rem_bytes_backup; + sze->ct_rx_cur_ptr = + ct_rx_cur_ptr_backup; + goto finish; + } + + m = m->next; + + len = RTE_MIN(rte_pktmbuf_tailroom(m), + packet_len1); + rte_memcpy(rte_pktmbuf_append(mbuf, len), + packet_ptr1, len); + + (mbuf->nb_segs)++; + packet_len1 -= len; + packet_ptr1 = ((uint8_t *)packet_ptr1) + len; + } + + if (packet_ptr2 != NULL) { + /* copy second part of packet, if exists */ + /* fill the rest of currently last mbuf */ + len = rte_pktmbuf_tailroom(m); + rte_memcpy(rte_pktmbuf_append(mbuf, len), + packet_ptr2, len); + packet_len2 -= len; + packet_ptr2 = ((uint8_t *)packet_ptr2) + len; + + while (packet_len2 > 0) { + /* fill new mbufs */ + m->next = rte_pktmbuf_alloc( + sze_q->mb_pool); + + if (unlikely(m->next == NULL)) { + rte_pktmbuf_free(mbuf); + /* + * Restore items from sze + * structure to state after + * unlocking (eventually + * locking). + */ + sze->ct_rx_lck = + ct_rx_lck_backup; + sze->ct_rx_rem_bytes = + ct_rx_rem_bytes_backup; + sze->ct_rx_cur_ptr = + ct_rx_cur_ptr_backup; + goto finish; + } + + m = m->next; + + len = RTE_MIN(rte_pktmbuf_tailroom(m), + packet_len2); + rte_memcpy( + rte_pktmbuf_append(mbuf, len), + packet_ptr2, len); + + (mbuf->nb_segs)++; + packet_len2 -= len; + packet_ptr2 = ((uint8_t *)packet_ptr2) + + len; + } + } + } + mbuf->pkt_len = packet_size; + mbuf->port = sze_q->in_port; + bufs[num_rx] = mbuf; + num_rx++; + num_bytes += packet_size; + } + +finish: + sze_q->rx_pkts += num_rx; + sze_q->rx_bytes += num_bytes; + return num_rx; +} + +static uint16_t +eth_szedata2_tx(void *queue, + struct rte_mbuf **bufs, + uint16_t nb_pkts) +{ + struct rte_mbuf *mbuf; + struct szedata2_tx_queue *sze_q = queue; + uint16_t num_tx = 0; + uint64_t num_bytes = 0; + + const struct szedata_lock *lck; + uint32_t lock_size; + uint32_t lock_size2; + void *dst; + uint32_t pkt_len; + uint32_t hwpkt_len; + uint32_t unlock_size; + uint32_t rem_len; + uint8_t mbuf_segs; + uint16_t pkt_left = nb_pkts; + + if (sze_q->sze == NULL || nb_pkts == 0) + return 0; + + while (pkt_left > 0) { + unlock_size = 0; + lck = szedata_tx_lock_data(sze_q->sze, + RTE_ETH_SZEDATA2_TX_LOCK_SIZE, + sze_q->tx_channel); + if (lck == NULL) + continue; + + dst = lck->start; + lock_size = lck->len; + lock_size2 = lck->next ? lck->next->len : 0; + +next_packet: + mbuf = bufs[nb_pkts - pkt_left]; + + pkt_len = mbuf->pkt_len; + mbuf_segs = mbuf->nb_segs; + + hwpkt_len = RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED + + RTE_SZE2_ALIGN8(pkt_len); + + if (lock_size + lock_size2 < hwpkt_len) { + szedata_tx_unlock_data(sze_q->sze, lck, unlock_size); + continue; + } + + num_bytes += pkt_len; + + if (lock_size > hwpkt_len) { + void *tmp_dst; + + rem_len = 0; + + /* write packet length at first 2 bytes in 8B header */ + *((uint16_t *)dst) = htole16( + RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED + + pkt_len); + *(((uint16_t *)dst) + 1) = htole16(0); + + /* copy packet from mbuf */ + tmp_dst = ((uint8_t *)(dst)) + + RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED; + if (mbuf_segs == 1) { + /* + * non-scattered packet, + * transmit from one mbuf + */ + rte_memcpy(tmp_dst, + rte_pktmbuf_mtod(mbuf, const void *), + pkt_len); + } else { + /* scattered packet, transmit from more mbufs */ + struct rte_mbuf *m = mbuf; + while (m) { + rte_memcpy(tmp_dst, + rte_pktmbuf_mtod(m, + const void *), + m->data_len); + tmp_dst = ((uint8_t *)(tmp_dst)) + + m->data_len; + m = m->next; + } + } + + + dst = ((uint8_t *)dst) + hwpkt_len; + unlock_size += hwpkt_len; + lock_size -= hwpkt_len; + + rte_pktmbuf_free(mbuf); + num_tx++; + pkt_left--; + if (pkt_left == 0) { + szedata_tx_unlock_data(sze_q->sze, lck, + unlock_size); + break; + } + goto next_packet; + } else if (lock_size + lock_size2 >= hwpkt_len) { + void *tmp_dst; + uint16_t write_len; + + /* write packet length at first 2 bytes in 8B header */ + *((uint16_t *)dst) = + htole16(RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED + + pkt_len); + *(((uint16_t *)dst) + 1) = htole16(0); + + /* + * If the raw packet (pkt_len) is smaller than lock_size + * get the correct length for memcpy + */ + write_len = + pkt_len < lock_size - + RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED ? + pkt_len : + lock_size - RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED; + + rem_len = hwpkt_len - lock_size; + + tmp_dst = ((uint8_t *)(dst)) + + RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED; + if (mbuf_segs == 1) { + /* + * non-scattered packet, + * transmit from one mbuf + */ + /* copy part of packet to first area */ + rte_memcpy(tmp_dst, + rte_pktmbuf_mtod(mbuf, const void *), + write_len); + + if (lck->next) + dst = lck->next->start; + + /* copy part of packet to second area */ + rte_memcpy(dst, + (const void *)(rte_pktmbuf_mtod(mbuf, + const uint8_t *) + + write_len), pkt_len - write_len); + } else { + /* scattered packet, transmit from more mbufs */ + struct rte_mbuf *m = mbuf; + uint16_t written = 0; + uint16_t to_write = 0; + bool new_mbuf = true; + uint16_t write_off = 0; + + /* copy part of packet to first area */ + while (m && written < write_len) { + to_write = RTE_MIN(m->data_len, + write_len - written); + rte_memcpy(tmp_dst, + rte_pktmbuf_mtod(m, + const void *), + to_write); + + tmp_dst = ((uint8_t *)(tmp_dst)) + + to_write; + if (m->data_len <= write_len - + written) { + m = m->next; + new_mbuf = true; + } else { + new_mbuf = false; + } + written += to_write; + } + + if (lck->next) + dst = lck->next->start; + + tmp_dst = dst; + written = 0; + write_off = new_mbuf ? 0 : to_write; + + /* copy part of packet to second area */ + while (m && written < pkt_len - write_len) { + rte_memcpy(tmp_dst, (const void *) + (rte_pktmbuf_mtod(m, + uint8_t *) + write_off), + m->data_len - write_off); + + tmp_dst = ((uint8_t *)(tmp_dst)) + + (m->data_len - write_off); + written += m->data_len - write_off; + m = m->next; + write_off = 0; + } + } + + dst = ((uint8_t *)dst) + rem_len; + unlock_size += hwpkt_len; + lock_size = lock_size2 - rem_len; + lock_size2 = 0; + + rte_pktmbuf_free(mbuf); + num_tx++; + } + + szedata_tx_unlock_data(sze_q->sze, lck, unlock_size); + pkt_left--; + } + + sze_q->tx_pkts += num_tx; + sze_q->err_pkts += nb_pkts - num_tx; + sze_q->tx_bytes += num_bytes; + return num_tx; +} + +static int +eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id) +{ + struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id]; + int ret; + struct pmd_internals *internals = (struct pmd_internals *) + dev->data->dev_private; + + if (rxq->sze == NULL) { + uint32_t rx = 1 << rxq->rx_channel; + uint32_t tx = 0; + rxq->sze = szedata_open(internals->sze_dev); + if (rxq->sze == NULL) + return -EINVAL; + ret = szedata_subscribe3(rxq->sze, &rx, &tx); + if (ret != 0 || rx == 0) + goto err; + } + + ret = szedata_start(rxq->sze); + if (ret != 0) + goto err; + dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; + +err: + szedata_close(rxq->sze); + rxq->sze = NULL; + return -EINVAL; +} + +static int +eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rxq_id) +{ + struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id]; + + if (rxq->sze != NULL) { + szedata_close(rxq->sze); + rxq->sze = NULL; + } + + dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; +} + +static int +eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id) +{ + struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id]; + int ret; + struct pmd_internals *internals = (struct pmd_internals *) + dev->data->dev_private; + + if (txq->sze == NULL) { + uint32_t rx = 0; + uint32_t tx = 1 << txq->tx_channel; + txq->sze = szedata_open(internals->sze_dev); + if (txq->sze == NULL) + return -EINVAL; + ret = szedata_subscribe3(txq->sze, &rx, &tx); + if (ret != 0 || tx == 0) + goto err; + } + + ret = szedata_start(txq->sze); + if (ret != 0) + goto err; + dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; + +err: + szedata_close(txq->sze); + txq->sze = NULL; + return -EINVAL; +} + +static int +eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t txq_id) +{ + struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id]; + + if (txq->sze != NULL) { + szedata_close(txq->sze); + txq->sze = NULL; + } + + dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; +} + +static int +eth_dev_start(struct rte_eth_dev *dev) +{ + int ret; + uint16_t i; + uint16_t nb_rx = dev->data->nb_rx_queues; + uint16_t nb_tx = dev->data->nb_tx_queues; + + for (i = 0; i < nb_rx; i++) { + ret = eth_rx_queue_start(dev, i); + if (ret != 0) + goto err_rx; + } + + for (i = 0; i < nb_tx; i++) { + ret = eth_tx_queue_start(dev, i); + if (ret != 0) + goto err_tx; + } + + return 0; + +err_tx: + for (i = 0; i < nb_tx; i++) + eth_tx_queue_stop(dev, i); +err_rx: + for (i = 0; i < nb_rx; i++) + eth_rx_queue_stop(dev, i); + return ret; +} + +static void +eth_dev_stop(struct rte_eth_dev *dev) +{ + uint16_t i; + uint16_t nb_rx = dev->data->nb_rx_queues; + uint16_t nb_tx = dev->data->nb_tx_queues; + + for (i = 0; i < nb_tx; i++) + eth_tx_queue_stop(dev, i); + + for (i = 0; i < nb_rx; i++) + eth_rx_queue_stop(dev, i); +} + +static int +eth_dev_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *data = dev->data; + if (data->dev_conf.rxmode.enable_scatter == 1) { + dev->rx_pkt_burst = eth_szedata2_rx_scattered; + data->scattered_rx = 1; + } else { + dev->rx_pkt_burst = eth_szedata2_rx; + data->scattered_rx = 0; + } + return 0; +} + +static void +eth_dev_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct pmd_internals *internals = dev->data->dev_private; + dev_info->if_index = 0; + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = (uint32_t)-1; + dev_info->max_rx_queues = internals->max_rx_queues; + dev_info->max_tx_queues = internals->max_tx_queues; + dev_info->min_rx_bufsize = 0; + dev_info->speed_capa = ETH_LINK_SPEED_100G; +} + +static void +eth_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + uint16_t i; + uint16_t nb_rx = dev->data->nb_rx_queues; + uint16_t nb_tx = dev->data->nb_tx_queues; + uint64_t rx_total = 0; + uint64_t tx_total = 0; + uint64_t tx_err_total = 0; + uint64_t rx_total_bytes = 0; + uint64_t tx_total_bytes = 0; + const struct pmd_internals *internals = dev->data->dev_private; + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < nb_rx; i++) { + stats->q_ipackets[i] = internals->rx_queue[i].rx_pkts; + stats->q_ibytes[i] = internals->rx_queue[i].rx_bytes; + rx_total += stats->q_ipackets[i]; + rx_total_bytes += stats->q_ibytes[i]; + } + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < nb_tx; i++) { + stats->q_opackets[i] = internals->tx_queue[i].tx_pkts; + stats->q_obytes[i] = internals->tx_queue[i].tx_bytes; + stats->q_errors[i] = internals->tx_queue[i].err_pkts; + tx_total += stats->q_opackets[i]; + tx_total_bytes += stats->q_obytes[i]; + tx_err_total += stats->q_errors[i]; + } + + stats->ipackets = rx_total; + stats->opackets = tx_total; + stats->ibytes = rx_total_bytes; + stats->obytes = tx_total_bytes; + stats->oerrors = tx_err_total; +} + +static void +eth_stats_reset(struct rte_eth_dev *dev) +{ + uint16_t i; + uint16_t nb_rx = dev->data->nb_rx_queues; + uint16_t nb_tx = dev->data->nb_tx_queues; + struct pmd_internals *internals = dev->data->dev_private; + + for (i = 0; i < nb_rx; i++) { + internals->rx_queue[i].rx_pkts = 0; + internals->rx_queue[i].rx_bytes = 0; + internals->rx_queue[i].err_pkts = 0; + } + for (i = 0; i < nb_tx; i++) { + internals->tx_queue[i].tx_pkts = 0; + internals->tx_queue[i].tx_bytes = 0; + internals->tx_queue[i].err_pkts = 0; + } +} + +static void +eth_rx_queue_release(void *q) +{ + struct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q; + if (rxq->sze != NULL) { + szedata_close(rxq->sze); + rxq->sze = NULL; + } +} + +static void +eth_tx_queue_release(void *q) +{ + struct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q; + if (txq->sze != NULL) { + szedata_close(txq->sze); + txq->sze = NULL; + } +} + +static void +eth_dev_close(struct rte_eth_dev *dev) +{ + uint16_t i; + uint16_t nb_rx = dev->data->nb_rx_queues; + uint16_t nb_tx = dev->data->nb_tx_queues; + + eth_dev_stop(dev); + + for (i = 0; i < nb_rx; i++) { + eth_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + for (i = 0; i < nb_tx; i++) { + eth_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + +static int +eth_link_update(struct rte_eth_dev *dev, + int wait_to_complete __rte_unused) +{ + struct rte_eth_link link; + struct rte_eth_link *link_ptr = &link; + struct rte_eth_link *dev_link = &dev->data->dev_link; + volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR( + dev, SZEDATA2_CGMII_IBUF_BASE_OFF, + volatile struct szedata2_cgmii_ibuf *); + + switch (cgmii_link_speed(ibuf)) { + case SZEDATA2_LINK_SPEED_10G: + link.link_speed = ETH_SPEED_NUM_10G; + break; + case SZEDATA2_LINK_SPEED_40G: + link.link_speed = ETH_SPEED_NUM_40G; + break; + case SZEDATA2_LINK_SPEED_100G: + link.link_speed = ETH_SPEED_NUM_100G; + break; + default: + link.link_speed = ETH_SPEED_NUM_10G; + break; + } + + /* szedata2 uses only full duplex */ + link.link_duplex = ETH_LINK_FULL_DUPLEX; + + link.link_status = (cgmii_ibuf_is_enabled(ibuf) && + cgmii_ibuf_is_link_up(ibuf)) ? ETH_LINK_UP : ETH_LINK_DOWN; + + link.link_autoneg = ETH_LINK_SPEED_FIXED; + + rte_atomic64_cmpset((uint64_t *)dev_link, *(uint64_t *)dev_link, + *(uint64_t *)link_ptr); + + return 0; +} + +static int +eth_dev_set_link_up(struct rte_eth_dev *dev) +{ + volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR( + dev, SZEDATA2_CGMII_IBUF_BASE_OFF, + volatile struct szedata2_cgmii_ibuf *); + volatile struct szedata2_cgmii_obuf *obuf = SZEDATA2_PCI_RESOURCE_PTR( + dev, SZEDATA2_CGMII_OBUF_BASE_OFF, + volatile struct szedata2_cgmii_obuf *); + + cgmii_ibuf_enable(ibuf); + cgmii_obuf_enable(obuf); + return 0; +} + +static int +eth_dev_set_link_down(struct rte_eth_dev *dev) +{ + volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR( + dev, SZEDATA2_CGMII_IBUF_BASE_OFF, + volatile struct szedata2_cgmii_ibuf *); + volatile struct szedata2_cgmii_obuf *obuf = SZEDATA2_PCI_RESOURCE_PTR( + dev, SZEDATA2_CGMII_OBUF_BASE_OFF, + volatile struct szedata2_cgmii_obuf *); + + cgmii_ibuf_disable(ibuf); + cgmii_obuf_disable(obuf); + return 0; +} + +static int +eth_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct szedata2_rx_queue *rxq = &internals->rx_queue[rx_queue_id]; + int ret; + uint32_t rx = 1 << rx_queue_id; + uint32_t tx = 0; + + rxq->sze = szedata_open(internals->sze_dev); + if (rxq->sze == NULL) + return -EINVAL; + ret = szedata_subscribe3(rxq->sze, &rx, &tx); + if (ret != 0 || rx == 0) { + szedata_close(rxq->sze); + rxq->sze = NULL; + return -EINVAL; + } + rxq->rx_channel = rx_queue_id; + rxq->in_port = dev->data->port_id; + rxq->mb_pool = mb_pool; + rxq->rx_pkts = 0; + rxq->rx_bytes = 0; + rxq->err_pkts = 0; + + dev->data->rx_queues[rx_queue_id] = rxq; + return 0; +} + +static int +eth_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct pmd_internals *internals = dev->data->dev_private; + struct szedata2_tx_queue *txq = &internals->tx_queue[tx_queue_id]; + int ret; + uint32_t rx = 0; + uint32_t tx = 1 << tx_queue_id; + + txq->sze = szedata_open(internals->sze_dev); + if (txq->sze == NULL) + return -EINVAL; + ret = szedata_subscribe3(txq->sze, &rx, &tx); + if (ret != 0 || tx == 0) { + szedata_close(txq->sze); + txq->sze = NULL; + return -EINVAL; + } + txq->tx_channel = tx_queue_id; + txq->tx_pkts = 0; + txq->tx_bytes = 0; + txq->err_pkts = 0; + + dev->data->tx_queues[tx_queue_id] = txq; + return 0; +} + +static void +eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused, + struct ether_addr *mac_addr __rte_unused) +{ +} + +static void +eth_promiscuous_enable(struct rte_eth_dev *dev) +{ + volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR( + dev, SZEDATA2_CGMII_IBUF_BASE_OFF, + volatile struct szedata2_cgmii_ibuf *); + cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_PROMISC); +} + +static void +eth_promiscuous_disable(struct rte_eth_dev *dev) +{ + volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR( + dev, SZEDATA2_CGMII_IBUF_BASE_OFF, + volatile struct szedata2_cgmii_ibuf *); + cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ONLY_VALID); +} + +static void +eth_allmulticast_enable(struct rte_eth_dev *dev) +{ + volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR( + dev, SZEDATA2_CGMII_IBUF_BASE_OFF, + volatile struct szedata2_cgmii_ibuf *); + cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ALL_MULTICAST); +} + +static void +eth_allmulticast_disable(struct rte_eth_dev *dev) +{ + volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR( + dev, SZEDATA2_CGMII_IBUF_BASE_OFF, + volatile struct szedata2_cgmii_ibuf *); + cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ONLY_VALID); +} + +static const struct eth_dev_ops ops = { + .dev_start = eth_dev_start, + .dev_stop = eth_dev_stop, + .dev_set_link_up = eth_dev_set_link_up, + .dev_set_link_down = eth_dev_set_link_down, + .dev_close = eth_dev_close, + .dev_configure = eth_dev_configure, + .dev_infos_get = eth_dev_info, + .promiscuous_enable = eth_promiscuous_enable, + .promiscuous_disable = eth_promiscuous_disable, + .allmulticast_enable = eth_allmulticast_enable, + .allmulticast_disable = eth_allmulticast_disable, + .rx_queue_start = eth_rx_queue_start, + .rx_queue_stop = eth_rx_queue_stop, + .tx_queue_start = eth_tx_queue_start, + .tx_queue_stop = eth_tx_queue_stop, + .rx_queue_setup = eth_rx_queue_setup, + .tx_queue_setup = eth_tx_queue_setup, + .rx_queue_release = eth_rx_queue_release, + .tx_queue_release = eth_tx_queue_release, + .link_update = eth_link_update, + .stats_get = eth_stats_get, + .stats_reset = eth_stats_reset, + .mac_addr_set = eth_mac_addr_set, +}; + +/* + * This function goes through sysfs and looks for an index of szedata2 + * device file (/dev/szedataIIX, where X is the index). + * + * @return + * 0 on success + * -1 on error + */ +static int +get_szedata2_index(struct rte_eth_dev *dev, uint32_t *index) +{ + DIR *dir; + struct dirent *entry; + int ret; + uint32_t tmp_index; + FILE *fd; + char pcislot_path[PATH_MAX]; + struct rte_pci_addr pcislot_addr = dev->pci_dev->addr; + uint32_t domain; + uint32_t bus; + uint32_t devid; + uint32_t function; + + dir = opendir("/sys/class/combo"); + if (dir == NULL) + return -1; + + /* + * Iterate through all combosixX directories. + * When the value in /sys/class/combo/combosixX/device/pcislot + * file is the location of the ethernet device dev, "X" is the + * index of the device. + */ + while ((entry = readdir(dir)) != NULL) { + ret = sscanf(entry->d_name, "combosix%u", &tmp_index); + if (ret != 1) + continue; + + snprintf(pcislot_path, PATH_MAX, + "/sys/class/combo/combosix%u/device/pcislot", + tmp_index); + + fd = fopen(pcislot_path, "r"); + if (fd == NULL) + continue; + + ret = fscanf(fd, "%4" PRIx16 ":%2" PRIx8 ":%2" PRIx8 ".%" PRIx8, + &domain, &bus, &devid, &function); + fclose(fd); + if (ret != 4) + continue; + + if (pcislot_addr.domain == domain && + pcislot_addr.bus == bus && + pcislot_addr.devid == devid && + pcislot_addr.function == function) { + *index = tmp_index; + closedir(dir); + return 0; + } + } + + closedir(dir); + return -1; +} + +static int +rte_szedata2_eth_dev_init(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *data = dev->data; + struct pmd_internals *internals = (struct pmd_internals *) + data->dev_private; + struct szedata *szedata_temp; + int ret; + uint32_t szedata2_index; + struct rte_pci_addr *pci_addr = &dev->pci_dev->addr; + struct rte_pci_resource *pci_rsc = + &dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER]; + char rsc_filename[PATH_MAX]; + void *pci_resource_ptr = NULL; + int fd; + + RTE_LOG(INFO, PMD, "Initializing szedata2 device (" PCI_PRI_FMT ")\n", + pci_addr->domain, pci_addr->bus, pci_addr->devid, + pci_addr->function); + + /* Get index of szedata2 device file and create path to device file */ + ret = get_szedata2_index(dev, &szedata2_index); + if (ret != 0) { + RTE_LOG(ERR, PMD, "Failed to get szedata2 device index!\n"); + return -ENODEV; + } + snprintf(internals->sze_dev, PATH_MAX, SZEDATA2_DEV_PATH_FMT, + szedata2_index); + + RTE_LOG(INFO, PMD, "SZEDATA2 path: %s\n", internals->sze_dev); + + /* + * Get number of available DMA RX and TX channels, which is maximum + * number of queues that can be created and store it in private device + * data structure. + */ + szedata_temp = szedata_open(internals->sze_dev); + if (szedata_temp == NULL) { + RTE_LOG(ERR, PMD, "szedata_open(): failed to open %s", + internals->sze_dev); + return -EINVAL; + } + internals->max_rx_queues = szedata_ifaces_available(szedata_temp, + SZE2_DIR_RX); + internals->max_tx_queues = szedata_ifaces_available(szedata_temp, + SZE2_DIR_TX); + szedata_close(szedata_temp); + + RTE_LOG(INFO, PMD, "Available DMA channels RX: %u TX: %u\n", + internals->max_rx_queues, internals->max_tx_queues); + + /* Set rx, tx burst functions */ + if (data->dev_conf.rxmode.enable_scatter == 1 || + data->scattered_rx == 1) { + dev->rx_pkt_burst = eth_szedata2_rx_scattered; + data->scattered_rx = 1; + } else { + dev->rx_pkt_burst = eth_szedata2_rx; + data->scattered_rx = 0; + } + dev->tx_pkt_burst = eth_szedata2_tx; + + /* Set function callbacks for Ethernet API */ + dev->dev_ops = &ops; + + rte_eth_copy_pci_info(dev, dev->pci_dev); + + /* mmap pci resource0 file to rte_pci_resource structure */ + if (dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr == + 0) { + RTE_LOG(ERR, PMD, "Missing resource%u file\n", + PCI_RESOURCE_NUMBER); + return -EINVAL; + } + snprintf(rsc_filename, PATH_MAX, + SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/resource%u", + pci_addr->domain, pci_addr->bus, + pci_addr->devid, pci_addr->function, PCI_RESOURCE_NUMBER); + fd = open(rsc_filename, O_RDWR); + if (fd < 0) { + RTE_LOG(ERR, PMD, "Could not open file %s\n", rsc_filename); + return -EINVAL; + } + + pci_resource_ptr = mmap(0, + dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len, + PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + close(fd); + if (pci_resource_ptr == NULL) { + RTE_LOG(ERR, PMD, "Could not mmap file %s (fd = %d)\n", + rsc_filename, fd); + return -EINVAL; + } + dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr = + pci_resource_ptr; + + RTE_LOG(DEBUG, PMD, "resource%u phys_addr = 0x%llx len = %llu " + "virt addr = %llx\n", PCI_RESOURCE_NUMBER, + (unsigned long long)pci_rsc->phys_addr, + (unsigned long long)pci_rsc->len, + (unsigned long long)pci_rsc->addr); + + /* Get link state */ + eth_link_update(dev, 0); + + /* Allocate space for one mac address */ + data->mac_addrs = rte_zmalloc(data->name, sizeof(struct ether_addr), + RTE_CACHE_LINE_SIZE); + if (data->mac_addrs == NULL) { + RTE_LOG(ERR, PMD, "Could not alloc space for MAC address!\n"); + munmap(dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr, + dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len); + return -EINVAL; + } + + ether_addr_copy(ð_addr, data->mac_addrs); + + /* At initial state COMBO card is in promiscuous mode so disable it */ + eth_promiscuous_disable(dev); + + RTE_LOG(INFO, PMD, "szedata2 device (" + PCI_PRI_FMT ") successfully initialized\n", + pci_addr->domain, pci_addr->bus, pci_addr->devid, + pci_addr->function); + + return 0; +} + +static int +rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev) +{ + struct rte_pci_addr *pci_addr = &dev->pci_dev->addr; + + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; + munmap(dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr, + dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len); + + RTE_LOG(INFO, PMD, "szedata2 device (" + PCI_PRI_FMT ") successfully uninitialized\n", + pci_addr->domain, pci_addr->bus, pci_addr->devid, + pci_addr->function); + + return 0; +} + +static const struct rte_pci_id rte_szedata2_pci_id_table[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, + PCI_DEVICE_ID_NETCOPE_COMBO80G) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, + PCI_DEVICE_ID_NETCOPE_COMBO100G) + }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, + PCI_DEVICE_ID_NETCOPE_COMBO100G2) + }, + { + .vendor_id = 0, + } +}; + +static struct eth_driver szedata2_eth_driver = { + .pci_drv = { + .name = RTE_SZEDATA2_PCI_DRIVER_NAME, + .id_table = rte_szedata2_pci_id_table, + }, + .eth_dev_init = rte_szedata2_eth_dev_init, + .eth_dev_uninit = rte_szedata2_eth_dev_uninit, + .dev_private_size = sizeof(struct pmd_internals), +}; + +static int +rte_szedata2_init(const char *name __rte_unused, + const char *args __rte_unused) +{ + rte_eth_driver_register(&szedata2_eth_driver); + return 0; +} + +static int +rte_szedata2_uninit(const char *name __rte_unused) +{ + return 0; +} + +static struct rte_driver rte_szedata2_driver = { + .type = PMD_PDEV, + .name = RTE_SZEDATA2_DRIVER_NAME, + .init = rte_szedata2_init, + .uninit = rte_szedata2_uninit, +}; + +PMD_REGISTER_DRIVER(rte_szedata2_driver); diff --git a/drivers/net/szedata2/rte_eth_szedata2.h b/drivers/net/szedata2/rte_eth_szedata2.h new file mode 100644 index 00000000..522cf47f --- /dev/null +++ b/drivers/net/szedata2/rte_eth_szedata2.h @@ -0,0 +1,462 @@ +/*- + * BSD LICENSE + * + * Copyright (c) 2015 - 2016 CESNET + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of CESNET nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RTE_PMD_SZEDATA2_H_ +#define RTE_PMD_SZEDATA2_H_ + +#include <stdbool.h> + +#include <rte_byteorder.h> + +/* PCI Vendor ID */ +#define PCI_VENDOR_ID_NETCOPE 0x1b26 + +/* PCI Device IDs */ +#define PCI_DEVICE_ID_NETCOPE_COMBO80G 0xcb80 +#define PCI_DEVICE_ID_NETCOPE_COMBO100G 0xc1c1 +#define PCI_DEVICE_ID_NETCOPE_COMBO100G2 0xc2c1 + +/* number of PCI resource used by COMBO card */ +#define PCI_RESOURCE_NUMBER 0 + +/* szedata2_packet header length == 4 bytes == 2B segment size + 2B hw size */ +#define RTE_SZE2_PACKET_HEADER_SIZE 4 + +#define RTE_SZE2_MMIO_MAX 10 + +/*! + * Round 'what' to the nearest larger (or equal) multiple of '8' + * (szedata2 packet is aligned to 8 bytes) + */ +#define RTE_SZE2_ALIGN8(what) (((what) + ((8) - 1)) & (~((8) - 1))) + +/*! main handle structure */ +struct szedata { + int fd; + struct sze2_instance_info *info; + uint32_t *write_size; + void *space[RTE_SZE2_MMIO_MAX]; + struct szedata_lock lock[2][2]; + + __u32 *rx_asize, *tx_asize; + + /* szedata_read_next variables - to keep context (ct) */ + + /* + * rx + */ + /** initial sze lock ptr */ + const struct szedata_lock *ct_rx_lck_orig; + /** current sze lock ptr (initial or next) */ + const struct szedata_lock *ct_rx_lck; + /** remaining bytes (not read) within current lock */ + unsigned int ct_rx_rem_bytes; + /** current pointer to locked memory */ + unsigned char *ct_rx_cur_ptr; + /** + * allocated buffer to store RX packet if it was split + * into 2 buffers + */ + unsigned char *ct_rx_buffer; + /** registered function to provide filtering based on hwdata */ + int (*ct_rx_filter)(u_int16_t hwdata_len, u_char *hwdata); + + /* + * tx + */ + /** + * buffer for tx - packet is prepared here + * (in future for burst write) + */ + unsigned char *ct_tx_buffer; + /** initial sze TX lock ptrs - number according to TX interfaces */ + const struct szedata_lock **ct_tx_lck_orig; + /** current sze TX lock ptrs - number according to TX interfaces */ + const struct szedata_lock **ct_tx_lck; + /** already written bytes in both locks */ + unsigned int *ct_tx_written_bytes; + /** remaining bytes (not written) within current lock */ + unsigned int *ct_tx_rem_bytes; + /** current pointers to locked memory */ + unsigned char **ct_tx_cur_ptr; + /** NUMA node closest to PCIe device, or -1 */ + int numa_node; +}; + +/* + * @return Byte from PCI resource at offset "offset". + */ +static inline uint8_t +pci_resource_read8(struct rte_eth_dev *dev, uint32_t offset) +{ + return *((uint8_t *)((uint8_t *) + dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr + + offset)); +} + +/* + * @return Two bytes from PCI resource starting at offset "offset". + */ +static inline uint16_t +pci_resource_read16(struct rte_eth_dev *dev, uint32_t offset) +{ + return rte_le_to_cpu_16(*((uint16_t *)((uint8_t *) + dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr + + offset))); +} + +/* + * @return Four bytes from PCI resource starting at offset "offset". + */ +static inline uint32_t +pci_resource_read32(struct rte_eth_dev *dev, uint32_t offset) +{ + return rte_le_to_cpu_32(*((uint32_t *)((uint8_t *) + dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr + + offset))); +} + +/* + * @return Eight bytes from PCI resource starting at offset "offset". + */ +static inline uint64_t +pci_resource_read64(struct rte_eth_dev *dev, uint32_t offset) +{ + return rte_le_to_cpu_64(*((uint64_t *)((uint8_t *) + dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr + + offset))); +} + +/* + * Write one byte to PCI resource address space at offset "offset". + */ +static inline void +pci_resource_write8(struct rte_eth_dev *dev, uint32_t offset, uint8_t val) +{ + *((uint8_t *)((uint8_t *) + dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr + + offset)) = val; +} + +/* + * Write two bytes to PCI resource address space at offset "offset". + */ +static inline void +pci_resource_write16(struct rte_eth_dev *dev, uint32_t offset, uint16_t val) +{ + *((uint16_t *)((uint8_t *) + dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr + + offset)) = rte_cpu_to_le_16(val); +} + +/* + * Write four bytes to PCI resource address space at offset "offset". + */ +static inline void +pci_resource_write32(struct rte_eth_dev *dev, uint32_t offset, uint32_t val) +{ + *((uint32_t *)((uint8_t *) + dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr + + offset)) = rte_cpu_to_le_32(val); +} + +/* + * Write eight bytes to PCI resource address space at offset "offset". + */ +static inline void +pci_resource_write64(struct rte_eth_dev *dev, uint32_t offset, uint64_t val) +{ + *((uint64_t *)((uint8_t *) + dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr + + offset)) = rte_cpu_to_le_64(val); +} + +#define SZEDATA2_PCI_RESOURCE_PTR(dev, offset, type) \ + ((type)((uint8_t *) \ + ((dev)->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr) \ + + (offset))) + +enum szedata2_link_speed { + SZEDATA2_LINK_SPEED_DEFAULT = 0, + SZEDATA2_LINK_SPEED_10G, + SZEDATA2_LINK_SPEED_40G, + SZEDATA2_LINK_SPEED_100G, +}; + +enum szedata2_mac_check_mode { + SZEDATA2_MAC_CHMODE_PROMISC = 0x0, + SZEDATA2_MAC_CHMODE_ONLY_VALID = 0x1, + SZEDATA2_MAC_CHMODE_ALL_BROADCAST = 0x2, + SZEDATA2_MAC_CHMODE_ALL_MULTICAST = 0x3, +}; + +/* + * Structure describes CGMII IBUF address space + */ +struct szedata2_cgmii_ibuf { + /** Total Received Frames Counter low part */ + uint32_t trfcl; + /** Correct Frames Counter low part */ + uint32_t cfcl; + /** Discarded Frames Counter low part */ + uint32_t dfcl; + /** Counter of frames discarded due to buffer overflow low part */ + uint32_t bodfcl; + /** Total Received Frames Counter high part */ + uint32_t trfch; + /** Correct Frames Counter high part */ + uint32_t cfch; + /** Discarded Frames Counter high part */ + uint32_t dfch; + /** Counter of frames discarded due to buffer overflow high part */ + uint32_t bodfch; + /** IBUF enable register */ + uint32_t ibuf_en; + /** Error mask register */ + uint32_t err_mask; + /** IBUF status register */ + uint32_t ibuf_st; + /** IBUF command register */ + uint32_t ibuf_cmd; + /** Minimum frame length allowed */ + uint32_t mfla; + /** Frame MTU */ + uint32_t mtu; + /** MAC address check mode */ + uint32_t mac_chmode; + /** Octets Received OK Counter low part */ + uint32_t orocl; + /** Octets Received OK Counter high part */ + uint32_t oroch; +} __rte_packed; + +/* Offset of CGMII IBUF memory for MAC addresses */ +#define SZEDATA2_CGMII_IBUF_MAC_MEM_OFF 0x80 + +/* + * @return + * true if IBUF is enabled + * false if IBUF is disabled + */ +static inline bool +cgmii_ibuf_is_enabled(volatile struct szedata2_cgmii_ibuf *ibuf) +{ + return ((rte_le_to_cpu_32(ibuf->ibuf_en) & 0x1) != 0) ? true : false; +} + +/* + * Enables IBUF. + */ +static inline void +cgmii_ibuf_enable(volatile struct szedata2_cgmii_ibuf *ibuf) +{ + ibuf->ibuf_en = + rte_cpu_to_le_32(rte_le_to_cpu_32(ibuf->ibuf_en) | 0x1); +} + +/* + * Disables IBUF. + */ +static inline void +cgmii_ibuf_disable(volatile struct szedata2_cgmii_ibuf *ibuf) +{ + ibuf->ibuf_en = + rte_cpu_to_le_32(rte_le_to_cpu_32(ibuf->ibuf_en) & ~0x1); +} + +/* + * @return + * true if ibuf link is up + * false if ibuf link is down + */ +static inline bool +cgmii_ibuf_is_link_up(volatile struct szedata2_cgmii_ibuf *ibuf) +{ + return ((rte_le_to_cpu_32(ibuf->ibuf_st) & 0x80) != 0) ? true : false; +} + +/* + * @return + * MAC address check mode + */ +static inline enum szedata2_mac_check_mode +cgmii_ibuf_mac_mode_read(volatile struct szedata2_cgmii_ibuf *ibuf) +{ + switch (rte_le_to_cpu_32(ibuf->mac_chmode) & 0x3) { + case 0x0: + return SZEDATA2_MAC_CHMODE_PROMISC; + case 0x1: + return SZEDATA2_MAC_CHMODE_ONLY_VALID; + case 0x2: + return SZEDATA2_MAC_CHMODE_ALL_BROADCAST; + case 0x3: + return SZEDATA2_MAC_CHMODE_ALL_MULTICAST; + default: + return SZEDATA2_MAC_CHMODE_PROMISC; + } +} + +/* + * Writes "mode" in MAC address check mode register. + */ +static inline void +cgmii_ibuf_mac_mode_write(volatile struct szedata2_cgmii_ibuf *ibuf, + enum szedata2_mac_check_mode mode) +{ + ibuf->mac_chmode = rte_cpu_to_le_32( + (rte_le_to_cpu_32(ibuf->mac_chmode) & ~0x3) | mode); +} + +/* + * Structure describes CGMII OBUF address space + */ +struct szedata2_cgmii_obuf { + /** Total Sent Frames Counter low part */ + uint32_t tsfcl; + /** Octets Sent Counter low part */ + uint32_t oscl; + /** Total Discarded Frames Counter low part */ + uint32_t tdfcl; + /** reserved */ + uint32_t reserved1; + /** Total Sent Frames Counter high part */ + uint32_t tsfch; + /** Octets Sent Counter high part */ + uint32_t osch; + /** Total Discarded Frames Counter high part */ + uint32_t tdfch; + /** reserved */ + uint32_t reserved2; + /** OBUF enable register */ + uint32_t obuf_en; + /** reserved */ + uint64_t reserved3; + /** OBUF control register */ + uint32_t ctrl; + /** OBUF status register */ + uint32_t obuf_st; +} __rte_packed; + +/* + * @return + * true if OBUF is enabled + * false if OBUF is disabled + */ +static inline bool +cgmii_obuf_is_enabled(volatile struct szedata2_cgmii_obuf *obuf) +{ + return ((rte_le_to_cpu_32(obuf->obuf_en) & 0x1) != 0) ? true : false; +} + +/* + * Enables OBUF. + */ +static inline void +cgmii_obuf_enable(volatile struct szedata2_cgmii_obuf *obuf) +{ + obuf->obuf_en = + rte_cpu_to_le_32(rte_le_to_cpu_32(obuf->obuf_en) | 0x1); +} + +/* + * Disables OBUF. + */ +static inline void +cgmii_obuf_disable(volatile struct szedata2_cgmii_obuf *obuf) +{ + obuf->obuf_en = + rte_cpu_to_le_32(rte_le_to_cpu_32(obuf->obuf_en) & ~0x1); +} + +/* + * Function takes value from IBUF status register. Values in IBUF and OBUF + * should be same. + * + * @return Link speed constant. + */ +static inline enum szedata2_link_speed +cgmii_link_speed(volatile struct szedata2_cgmii_ibuf *ibuf) +{ + uint32_t speed = (rte_le_to_cpu_32(ibuf->ibuf_st) & 0x70) >> 4; + switch (speed) { + case 0x03: + return SZEDATA2_LINK_SPEED_10G; + case 0x04: + return SZEDATA2_LINK_SPEED_40G; + case 0x05: + return SZEDATA2_LINK_SPEED_100G; + default: + return SZEDATA2_LINK_SPEED_DEFAULT; + } +} + +/* + * IBUFs and OBUFs can generally be located at different offsets in different + * firmwares. + * This part defines base offsets of IBUFs and OBUFs through various firmwares. + * Currently one firmware type is supported. + * Type of firmware is set through configuration option + * CONFIG_RTE_LIBRTE_PMD_SZEDATA_AS. + * Possible values are: + * 0 - for firmwares: + * NIC_100G1_LR4 + * HANIC_100G1_LR4 + * HANIC_100G1_SR10 + */ +#if !defined(RTE_LIBRTE_PMD_SZEDATA2_AS) +#error "RTE_LIBRTE_PMD_SZEDATA2_AS has to be defined" +#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 0 + +/* + * CGMII IBUF offset from the beginning of PCI resource address space. + */ +#define SZEDATA2_CGMII_IBUF_BASE_OFF 0x8000 +/* + * Size of CGMII IBUF. + */ +#define SZEDATA2_CGMII_IBUF_SIZE 0x200 + +/* + * GCMII OBUF offset from the beginning of PCI resource address space. + */ +#define SZEDATA2_CGMII_OBUF_BASE_OFF 0x9000 +/* + * Size of CGMII OBUF. + */ +#define SZEDATA2_CGMII_OBUF_SIZE 0x100 + +#else +#error "RTE_LIBRTE_PMD_SZEDATA2_AS has wrong value, see comments in config file" +#endif + +#endif diff --git a/drivers/net/szedata2/rte_pmd_szedata2_version.map b/drivers/net/szedata2/rte_pmd_szedata2_version.map new file mode 100644 index 00000000..ad607bbe --- /dev/null +++ b/drivers/net/szedata2/rte_pmd_szedata2_version.map @@ -0,0 +1,3 @@ +DPDK_2.2 { + local: *; +}; diff --git a/drivers/net/vhost/Makefile b/drivers/net/vhost/Makefile new file mode 100644 index 00000000..f49a69b3 --- /dev/null +++ b/drivers/net/vhost/Makefile @@ -0,0 +1,62 @@ +# BSD LICENSE +# +# Copyright (c) 2010-2016 Intel Corporation. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_vhost.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_vhost_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += rte_eth_vhost.c + +# +# Export include files +# +SYMLINK-y-include += rte_eth_vhost.h + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_kvargs +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_vhost + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c new file mode 100644 index 00000000..310cbefc --- /dev/null +++ b/drivers/net/vhost/rte_eth_vhost.c @@ -0,0 +1,927 @@ +/*- + * BSD LICENSE + * + * Copyright (c) 2016 IGEL Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of IGEL Co.,Ltd. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <unistd.h> +#include <pthread.h> +#include <stdbool.h> +#ifdef RTE_LIBRTE_VHOST_NUMA +#include <numaif.h> +#endif + +#include <rte_mbuf.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> +#include <rte_dev.h> +#include <rte_kvargs.h> +#include <rte_virtio_net.h> +#include <rte_spinlock.h> + +#include "rte_eth_vhost.h" + +#define ETH_VHOST_IFACE_ARG "iface" +#define ETH_VHOST_QUEUES_ARG "queues" + +static const char *drivername = "VHOST PMD"; + +static const char *valid_arguments[] = { + ETH_VHOST_IFACE_ARG, + ETH_VHOST_QUEUES_ARG, + NULL +}; + +static struct ether_addr base_eth_addr = { + .addr_bytes = { + 0x56 /* V */, + 0x48 /* H */, + 0x4F /* O */, + 0x53 /* S */, + 0x54 /* T */, + 0x00 + } +}; + +struct vhost_queue { + rte_atomic32_t allow_queuing; + rte_atomic32_t while_queuing; + struct virtio_net *device; + struct pmd_internal *internal; + struct rte_mempool *mb_pool; + uint8_t port; + uint16_t virtqueue_id; + uint64_t rx_pkts; + uint64_t tx_pkts; + uint64_t missed_pkts; + uint64_t rx_bytes; + uint64_t tx_bytes; +}; + +struct pmd_internal { + char *dev_name; + char *iface_name; + uint16_t max_queues; + + volatile uint16_t once; +}; + +struct internal_list { + TAILQ_ENTRY(internal_list) next; + struct rte_eth_dev *eth_dev; +}; + +TAILQ_HEAD(internal_list_head, internal_list); +static struct internal_list_head internal_list = + TAILQ_HEAD_INITIALIZER(internal_list); + +static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER; + +static rte_atomic16_t nb_started_ports; +static pthread_t session_th; + +static struct rte_eth_link pmd_link = { + .link_speed = 10000, + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_status = ETH_LINK_DOWN +}; + +struct rte_vhost_vring_state { + rte_spinlock_t lock; + + bool cur[RTE_MAX_QUEUES_PER_PORT * 2]; + bool seen[RTE_MAX_QUEUES_PER_PORT * 2]; + unsigned int index; + unsigned int max_vring; +}; + +static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS]; + +static uint16_t +eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + struct vhost_queue *r = q; + uint16_t i, nb_rx = 0; + + if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) + return 0; + + rte_atomic32_set(&r->while_queuing, 1); + + if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) + goto out; + + /* Dequeue packets from guest TX queue */ + nb_rx = rte_vhost_dequeue_burst(r->device, + r->virtqueue_id, r->mb_pool, bufs, nb_bufs); + + r->rx_pkts += nb_rx; + + for (i = 0; likely(i < nb_rx); i++) { + bufs[i]->port = r->port; + r->rx_bytes += bufs[i]->pkt_len; + } + +out: + rte_atomic32_set(&r->while_queuing, 0); + + return nb_rx; +} + +static uint16_t +eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) +{ + struct vhost_queue *r = q; + uint16_t i, nb_tx = 0; + + if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) + return 0; + + rte_atomic32_set(&r->while_queuing, 1); + + if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) + goto out; + + /* Enqueue packets to guest RX queue */ + nb_tx = rte_vhost_enqueue_burst(r->device, + r->virtqueue_id, bufs, nb_bufs); + + r->tx_pkts += nb_tx; + r->missed_pkts += nb_bufs - nb_tx; + + for (i = 0; likely(i < nb_tx); i++) + r->tx_bytes += bufs[i]->pkt_len; + + for (i = 0; likely(i < nb_tx); i++) + rte_pktmbuf_free(bufs[i]); +out: + rte_atomic32_set(&r->while_queuing, 0); + + return nb_tx; +} + +static int +eth_dev_configure(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static inline struct internal_list * +find_internal_resource(char *ifname) +{ + int found = 0; + struct internal_list *list; + struct pmd_internal *internal; + + if (ifname == NULL) + return NULL; + + pthread_mutex_lock(&internal_list_lock); + + TAILQ_FOREACH(list, &internal_list, next) { + internal = list->eth_dev->data->dev_private; + if (!strcmp(internal->iface_name, ifname)) { + found = 1; + break; + } + } + + pthread_mutex_unlock(&internal_list_lock); + + if (!found) + return NULL; + + return list; +} + +static int +new_device(struct virtio_net *dev) +{ + struct rte_eth_dev *eth_dev; + struct internal_list *list; + struct pmd_internal *internal; + struct vhost_queue *vq; + unsigned i; +#ifdef RTE_LIBRTE_VHOST_NUMA + int newnode, ret; +#endif + + if (dev == NULL) { + RTE_LOG(INFO, PMD, "Invalid argument\n"); + return -1; + } + + list = find_internal_resource(dev->ifname); + if (list == NULL) { + RTE_LOG(INFO, PMD, "Invalid device name\n"); + return -1; + } + + eth_dev = list->eth_dev; + internal = eth_dev->data->dev_private; + +#ifdef RTE_LIBRTE_VHOST_NUMA + ret = get_mempolicy(&newnode, NULL, 0, dev, + MPOL_F_NODE | MPOL_F_ADDR); + if (ret < 0) { + RTE_LOG(ERR, PMD, "Unknown numa node\n"); + return -1; + } + + eth_dev->data->numa_node = newnode; +#endif + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + vq = eth_dev->data->rx_queues[i]; + if (vq == NULL) + continue; + vq->device = dev; + vq->internal = internal; + vq->port = eth_dev->data->port_id; + } + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + vq = eth_dev->data->tx_queues[i]; + if (vq == NULL) + continue; + vq->device = dev; + vq->internal = internal; + vq->port = eth_dev->data->port_id; + } + + for (i = 0; i < dev->virt_qp_nb * VIRTIO_QNUM; i++) + rte_vhost_enable_guest_notification(dev, i, 0); + + dev->flags |= VIRTIO_DEV_RUNNING; + dev->priv = eth_dev; + eth_dev->data->dev_link.link_status = ETH_LINK_UP; + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + vq = eth_dev->data->rx_queues[i]; + if (vq == NULL) + continue; + rte_atomic32_set(&vq->allow_queuing, 1); + } + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + vq = eth_dev->data->tx_queues[i]; + if (vq == NULL) + continue; + rte_atomic32_set(&vq->allow_queuing, 1); + } + + RTE_LOG(INFO, PMD, "New connection established\n"); + + _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC); + + return 0; +} + +static void +destroy_device(volatile struct virtio_net *dev) +{ + struct rte_eth_dev *eth_dev; + struct vhost_queue *vq; + unsigned i; + + if (dev == NULL) { + RTE_LOG(INFO, PMD, "Invalid argument\n"); + return; + } + + eth_dev = (struct rte_eth_dev *)dev->priv; + if (eth_dev == NULL) { + RTE_LOG(INFO, PMD, "Failed to find a ethdev\n"); + return; + } + + /* Wait until rx/tx_pkt_burst stops accessing vhost device */ + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + vq = eth_dev->data->rx_queues[i]; + if (vq == NULL) + continue; + rte_atomic32_set(&vq->allow_queuing, 0); + while (rte_atomic32_read(&vq->while_queuing)) + rte_pause(); + } + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + vq = eth_dev->data->tx_queues[i]; + if (vq == NULL) + continue; + rte_atomic32_set(&vq->allow_queuing, 0); + while (rte_atomic32_read(&vq->while_queuing)) + rte_pause(); + } + + eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; + + dev->priv = NULL; + dev->flags &= ~VIRTIO_DEV_RUNNING; + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + vq = eth_dev->data->rx_queues[i]; + if (vq == NULL) + continue; + vq->device = NULL; + } + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + vq = eth_dev->data->tx_queues[i]; + if (vq == NULL) + continue; + vq->device = NULL; + } + + RTE_LOG(INFO, PMD, "Connection closed\n"); + + _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC); +} + +static int +vring_state_changed(struct virtio_net *dev, uint16_t vring, int enable) +{ + struct rte_vhost_vring_state *state; + struct rte_eth_dev *eth_dev; + struct internal_list *list; + + if (dev == NULL) { + RTE_LOG(ERR, PMD, "Invalid argument\n"); + return -1; + } + + list = find_internal_resource(dev->ifname); + if (list == NULL) { + RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", dev->ifname); + return -1; + } + + eth_dev = list->eth_dev; + /* won't be NULL */ + state = vring_states[eth_dev->data->port_id]; + rte_spinlock_lock(&state->lock); + state->cur[vring] = enable; + state->max_vring = RTE_MAX(vring, state->max_vring); + rte_spinlock_unlock(&state->lock); + + RTE_LOG(INFO, PMD, "vring%u is %s\n", + vring, enable ? "enabled" : "disabled"); + + _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE); + + return 0; +} + +int +rte_eth_vhost_get_queue_event(uint8_t port_id, + struct rte_eth_vhost_queue_event *event) +{ + struct rte_vhost_vring_state *state; + unsigned int i; + int idx; + + if (port_id >= RTE_MAX_ETHPORTS) { + RTE_LOG(ERR, PMD, "Invalid port id\n"); + return -1; + } + + state = vring_states[port_id]; + if (!state) { + RTE_LOG(ERR, PMD, "Unused port\n"); + return -1; + } + + rte_spinlock_lock(&state->lock); + for (i = 0; i <= state->max_vring; i++) { + idx = state->index++ % (state->max_vring + 1); + + if (state->cur[idx] != state->seen[idx]) { + state->seen[idx] = state->cur[idx]; + event->queue_id = idx / 2; + event->rx = idx & 1; + event->enable = state->cur[idx]; + rte_spinlock_unlock(&state->lock); + return 0; + } + } + rte_spinlock_unlock(&state->lock); + + return -1; +} + +static void * +vhost_driver_session(void *param __rte_unused) +{ + static struct virtio_net_device_ops vhost_ops; + + /* set vhost arguments */ + vhost_ops.new_device = new_device; + vhost_ops.destroy_device = destroy_device; + vhost_ops.vring_state_changed = vring_state_changed; + if (rte_vhost_driver_callback_register(&vhost_ops) < 0) + RTE_LOG(ERR, PMD, "Can't register callbacks\n"); + + /* start event handling */ + rte_vhost_driver_session_start(); + + return NULL; +} + +static int +vhost_driver_session_start(void) +{ + int ret; + + ret = pthread_create(&session_th, + NULL, vhost_driver_session, NULL); + if (ret) + RTE_LOG(ERR, PMD, "Can't create a thread\n"); + + return ret; +} + +static void +vhost_driver_session_stop(void) +{ + int ret; + + ret = pthread_cancel(session_th); + if (ret) + RTE_LOG(ERR, PMD, "Can't cancel the thread\n"); + + ret = pthread_join(session_th, NULL); + if (ret) + RTE_LOG(ERR, PMD, "Can't join the thread\n"); +} + +static int +eth_dev_start(struct rte_eth_dev *dev) +{ + struct pmd_internal *internal = dev->data->dev_private; + int ret = 0; + + if (rte_atomic16_cmpset(&internal->once, 0, 1)) { + ret = rte_vhost_driver_register(internal->iface_name); + if (ret) + return ret; + } + + /* We need only one message handling thread */ + if (rte_atomic16_add_return(&nb_started_ports, 1) == 1) + ret = vhost_driver_session_start(); + + return ret; +} + +static void +eth_dev_stop(struct rte_eth_dev *dev) +{ + struct pmd_internal *internal = dev->data->dev_private; + + if (rte_atomic16_cmpset(&internal->once, 1, 0)) + rte_vhost_driver_unregister(internal->iface_name); + + if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0) + vhost_driver_session_stop(); +} + +static int +eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct vhost_queue *vq; + + vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (vq == NULL) { + RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n"); + return -ENOMEM; + } + + vq->mb_pool = mb_pool; + vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ; + dev->data->rx_queues[rx_queue_id] = vq; + + return 0; +} + +static int +eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct vhost_queue *vq; + + vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (vq == NULL) { + RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n"); + return -ENOMEM; + } + + vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ; + dev->data->tx_queues[tx_queue_id] = vq; + + return 0; +} + +static void +eth_dev_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct pmd_internal *internal; + + internal = dev->data->dev_private; + if (internal == NULL) { + RTE_LOG(ERR, PMD, "Invalid device specified\n"); + return; + } + + dev_info->driver_name = drivername; + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = (uint32_t)-1; + dev_info->max_rx_queues = internal->max_queues; + dev_info->max_tx_queues = internal->max_queues; + dev_info->min_rx_bufsize = 0; +} + +static void +eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + unsigned i; + unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0; + unsigned long rx_total_bytes = 0, tx_total_bytes = 0; + struct vhost_queue *vq; + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && + i < dev->data->nb_rx_queues; i++) { + if (dev->data->rx_queues[i] == NULL) + continue; + vq = dev->data->rx_queues[i]; + stats->q_ipackets[i] = vq->rx_pkts; + rx_total += stats->q_ipackets[i]; + + stats->q_ibytes[i] = vq->rx_bytes; + rx_total_bytes += stats->q_ibytes[i]; + } + + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && + i < dev->data->nb_tx_queues; i++) { + if (dev->data->tx_queues[i] == NULL) + continue; + vq = dev->data->tx_queues[i]; + stats->q_opackets[i] = vq->tx_pkts; + tx_missed_total += vq->missed_pkts; + tx_total += stats->q_opackets[i]; + + stats->q_obytes[i] = vq->tx_bytes; + tx_total_bytes += stats->q_obytes[i]; + } + + stats->ipackets = rx_total; + stats->opackets = tx_total; + stats->imissed = tx_missed_total; + stats->ibytes = rx_total_bytes; + stats->obytes = tx_total_bytes; +} + +static void +eth_stats_reset(struct rte_eth_dev *dev) +{ + struct vhost_queue *vq; + unsigned i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (dev->data->rx_queues[i] == NULL) + continue; + vq = dev->data->rx_queues[i]; + vq->rx_pkts = 0; + vq->rx_bytes = 0; + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (dev->data->tx_queues[i] == NULL) + continue; + vq = dev->data->tx_queues[i]; + vq->tx_pkts = 0; + vq->tx_bytes = 0; + vq->missed_pkts = 0; + } +} + +static void +eth_queue_release(void *q) +{ + rte_free(q); +} + +static int +eth_link_update(struct rte_eth_dev *dev __rte_unused, + int wait_to_complete __rte_unused) +{ + return 0; +} + +/** + * Disable features in feature_mask. Returns 0 on success. + */ +int +rte_eth_vhost_feature_disable(uint64_t feature_mask) +{ + return rte_vhost_feature_disable(feature_mask); +} + +/** + * Enable features in feature_mask. Returns 0 on success. + */ +int +rte_eth_vhost_feature_enable(uint64_t feature_mask) +{ + return rte_vhost_feature_enable(feature_mask); +} + +/* Returns currently supported vhost features */ +uint64_t +rte_eth_vhost_feature_get(void) +{ + return rte_vhost_feature_get(); +} + +static const struct eth_dev_ops ops = { + .dev_start = eth_dev_start, + .dev_stop = eth_dev_stop, + .dev_configure = eth_dev_configure, + .dev_infos_get = eth_dev_info, + .rx_queue_setup = eth_rx_queue_setup, + .tx_queue_setup = eth_tx_queue_setup, + .rx_queue_release = eth_queue_release, + .tx_queue_release = eth_queue_release, + .link_update = eth_link_update, + .stats_get = eth_stats_get, + .stats_reset = eth_stats_reset, +}; + +static int +eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues, + const unsigned numa_node) +{ + struct rte_eth_dev_data *data = NULL; + struct pmd_internal *internal = NULL; + struct rte_eth_dev *eth_dev = NULL; + struct ether_addr *eth_addr = NULL; + struct rte_vhost_vring_state *vring_state = NULL; + struct internal_list *list = NULL; + + RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n", + numa_node); + + /* now do all data allocation - for eth_dev structure, dummy pci driver + * and internal (private) data + */ + data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); + if (data == NULL) + goto error; + + internal = rte_zmalloc_socket(name, sizeof(*internal), 0, numa_node); + if (internal == NULL) + goto error; + + list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node); + if (list == NULL) + goto error; + + /* reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL); + if (eth_dev == NULL) + goto error; + + eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node); + if (eth_addr == NULL) + goto error; + *eth_addr = base_eth_addr; + eth_addr->addr_bytes[5] = eth_dev->data->port_id; + + vring_state = rte_zmalloc_socket(name, + sizeof(*vring_state), 0, numa_node); + if (vring_state == NULL) + goto error; + + TAILQ_INIT(ð_dev->link_intr_cbs); + + /* now put it all together + * - store queue data in internal, + * - store numa_node info in ethdev data + * - point eth_dev_data to internals + * - and point eth_dev structure to new eth_dev_data structure + */ + internal->dev_name = strdup(name); + if (internal->dev_name == NULL) + goto error; + internal->iface_name = strdup(iface_name); + if (internal->iface_name == NULL) + goto error; + + list->eth_dev = eth_dev; + pthread_mutex_lock(&internal_list_lock); + TAILQ_INSERT_TAIL(&internal_list, list, next); + pthread_mutex_unlock(&internal_list_lock); + + rte_spinlock_init(&vring_state->lock); + vring_states[eth_dev->data->port_id] = vring_state; + + data->dev_private = internal; + data->port_id = eth_dev->data->port_id; + memmove(data->name, eth_dev->data->name, sizeof(data->name)); + data->nb_rx_queues = queues; + data->nb_tx_queues = queues; + internal->max_queues = queues; + data->dev_link = pmd_link; + data->mac_addrs = eth_addr; + + /* We'll replace the 'data' originally allocated by eth_dev. So the + * vhost PMD resources won't be shared between multi processes. + */ + eth_dev->data = data; + eth_dev->dev_ops = &ops; + eth_dev->driver = NULL; + data->dev_flags = + RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC; + data->kdrv = RTE_KDRV_NONE; + data->drv_name = internal->dev_name; + data->numa_node = numa_node; + + /* finally assign rx and tx ops */ + eth_dev->rx_pkt_burst = eth_vhost_rx; + eth_dev->tx_pkt_burst = eth_vhost_tx; + + return data->port_id; + +error: + if (internal) + free(internal->dev_name); + rte_free(vring_state); + rte_free(eth_addr); + if (eth_dev) + rte_eth_dev_release_port(eth_dev); + rte_free(internal); + rte_free(list); + rte_free(data); + + return -1; +} + +static inline int +open_iface(const char *key __rte_unused, const char *value, void *extra_args) +{ + const char **iface_name = extra_args; + + if (value == NULL) + return -1; + + *iface_name = value; + + return 0; +} + +static inline int +open_queues(const char *key __rte_unused, const char *value, void *extra_args) +{ + uint16_t *q = extra_args; + + if (value == NULL || extra_args == NULL) + return -EINVAL; + + *q = (uint16_t)strtoul(value, NULL, 0); + if (*q == USHRT_MAX && errno == ERANGE) + return -1; + + if (*q > RTE_MAX_QUEUES_PER_PORT) + return -1; + + return 0; +} + +static int +rte_pmd_vhost_devinit(const char *name, const char *params) +{ + struct rte_kvargs *kvlist = NULL; + int ret = 0; + char *iface_name; + uint16_t queues; + + RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n", name); + + kvlist = rte_kvargs_parse(params, valid_arguments); + if (kvlist == NULL) + return -1; + + if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) { + ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG, + &open_iface, &iface_name); + if (ret < 0) + goto out_free; + } else { + ret = -1; + goto out_free; + } + + if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) { + ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG, + &open_queues, &queues); + if (ret < 0) + goto out_free; + + } else + queues = 1; + + eth_dev_vhost_create(name, iface_name, queues, rte_socket_id()); + +out_free: + rte_kvargs_free(kvlist); + return ret; +} + +static int +rte_pmd_vhost_devuninit(const char *name) +{ + struct rte_eth_dev *eth_dev = NULL; + struct pmd_internal *internal; + struct internal_list *list; + unsigned int i; + + RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name); + + /* find an ethdev entry */ + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev == NULL) + return -ENODEV; + + internal = eth_dev->data->dev_private; + if (internal == NULL) + return -ENODEV; + + list = find_internal_resource(internal->iface_name); + if (list == NULL) + return -ENODEV; + + pthread_mutex_lock(&internal_list_lock); + TAILQ_REMOVE(&internal_list, list, next); + pthread_mutex_unlock(&internal_list_lock); + rte_free(list); + + eth_dev_stop(eth_dev); + + rte_free(vring_states[eth_dev->data->port_id]); + vring_states[eth_dev->data->port_id] = NULL; + + free(internal->dev_name); + free(internal->iface_name); + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + rte_free(eth_dev->data->rx_queues[i]); + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) + rte_free(eth_dev->data->tx_queues[i]); + + rte_free(eth_dev->data->mac_addrs); + rte_free(eth_dev->data); + rte_free(internal); + + rte_eth_dev_release_port(eth_dev); + + return 0; +} + +static struct rte_driver pmd_vhost_drv = { + .name = "eth_vhost", + .type = PMD_VDEV, + .init = rte_pmd_vhost_devinit, + .uninit = rte_pmd_vhost_devuninit, +}; + +PMD_REGISTER_DRIVER(pmd_vhost_drv); diff --git a/drivers/net/vhost/rte_eth_vhost.h b/drivers/net/vhost/rte_eth_vhost.h new file mode 100644 index 00000000..ff5d877b --- /dev/null +++ b/drivers/net/vhost/rte_eth_vhost.h @@ -0,0 +1,109 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 IGEL Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of IGEL Co., Ltd. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_ETH_VHOST_H_ +#define _RTE_ETH_VHOST_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include <stdint.h> +#include <stdbool.h> + +#include <rte_virtio_net.h> + +/** + * Disable features in feature_mask. + * + * @param feature_mask + * Vhost features defined in "linux/virtio_net.h". + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int rte_eth_vhost_feature_disable(uint64_t feature_mask); + +/** + * Enable features in feature_mask. + * + * @param feature_mask + * Vhost features defined in "linux/virtio_net.h". + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int rte_eth_vhost_feature_enable(uint64_t feature_mask); + +/** + * Returns currently supported vhost features. + * + * @return + * Vhost features defined in "linux/virtio_net.h". + */ +uint64_t rte_eth_vhost_feature_get(void); + +/* + * Event description. + */ +struct rte_eth_vhost_queue_event { + uint16_t queue_id; + bool rx; + bool enable; +}; + +/** + * Get queue events from specified port. + * If a callback for below event is registered by + * rte_eth_dev_callback_register(), this function will describe what was + * changed. + * - RTE_ETH_EVENT_QUEUE_STATE + * Multiple events may cause only one callback kicking, so call this function + * while returning 0. + * + * @param port_id + * Port id. + * @param event + * Pointer to a rte_eth_vhost_queue_event structure. + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int rte_eth_vhost_get_queue_event(uint8_t port_id, + struct rte_eth_vhost_queue_event *event); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/vhost/rte_pmd_vhost_version.map b/drivers/net/vhost/rte_pmd_vhost_version.map new file mode 100644 index 00000000..65bf3a8c --- /dev/null +++ b/drivers/net/vhost/rte_pmd_vhost_version.map @@ -0,0 +1,10 @@ +DPDK_16.04 { + global: + + rte_eth_vhost_feature_disable; + rte_eth_vhost_feature_enable; + rte_eth_vhost_feature_get; + rte_eth_vhost_get_queue_event; + + local: *; +}; diff --git a/drivers/net/virtio/Makefile b/drivers/net/virtio/Makefile new file mode 100644 index 00000000..ef84f604 --- /dev/null +++ b/drivers/net/virtio/Makefile @@ -0,0 +1,63 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_virtio.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_virtio_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtqueue.c +SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_pci.c +SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_ethdev.c + +ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSSE3,$(CFLAGS)),RTE_MACHINE_CPUFLAG_SSSE3) +SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx_simple.c +endif + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_net + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/virtio/rte_pmd_virtio_version.map b/drivers/net/virtio/rte_pmd_virtio_version.map new file mode 100644 index 00000000..ef353984 --- /dev/null +++ b/drivers/net/virtio/rte_pmd_virtio_version.map @@ -0,0 +1,4 @@ +DPDK_2.0 { + + local: *; +}; diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c new file mode 100644 index 00000000..63a368ac --- /dev/null +++ b/drivers/net/virtio/virtio_ethdev.c @@ -0,0 +1,1461 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <string.h> +#include <stdio.h> +#include <errno.h> +#include <unistd.h> + +#include <rte_ethdev.h> +#include <rte_memcpy.h> +#include <rte_string_fns.h> +#include <rte_memzone.h> +#include <rte_malloc.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_common.h> +#include <rte_errno.h> + +#include <rte_memory.h> +#include <rte_eal.h> +#include <rte_dev.h> + +#include "virtio_ethdev.h" +#include "virtio_pci.h" +#include "virtio_logs.h" +#include "virtqueue.h" +#include "virtio_rxtx.h" + + +static int eth_virtio_dev_init(struct rte_eth_dev *eth_dev); +static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev); +static int virtio_dev_configure(struct rte_eth_dev *dev); +static int virtio_dev_start(struct rte_eth_dev *dev); +static void virtio_dev_stop(struct rte_eth_dev *dev); +static void virtio_dev_promiscuous_enable(struct rte_eth_dev *dev); +static void virtio_dev_promiscuous_disable(struct rte_eth_dev *dev); +static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev); +static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev); +static void virtio_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int virtio_dev_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete); + +static void virtio_set_hwaddr(struct virtio_hw *hw); +static void virtio_get_hwaddr(struct virtio_hw *hw); + +static void virtio_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static int virtio_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstats *xstats, unsigned n); +static void virtio_dev_stats_reset(struct rte_eth_dev *dev); +static void virtio_dev_free_mbufs(struct rte_eth_dev *dev); +static int virtio_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static void virtio_mac_addr_add(struct rte_eth_dev *dev, + struct ether_addr *mac_addr, + uint32_t index, uint32_t vmdq __rte_unused); +static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); +static void virtio_mac_addr_set(struct rte_eth_dev *dev, + struct ether_addr *mac_addr); + +static int virtio_dev_queue_stats_mapping_set( + __rte_unused struct rte_eth_dev *eth_dev, + __rte_unused uint16_t queue_id, + __rte_unused uint8_t stat_idx, + __rte_unused uint8_t is_rx); + +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id pci_id_virtio_map[] = { + +#define RTE_PCI_DEV_ID_DECL_VIRTIO(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" + +{ .vendor_id = 0, /* sentinel */ }, +}; + +struct rte_virtio_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned offset; +}; + +/* [rt]x_qX_ is prepended to the name string here */ +static const struct rte_virtio_xstats_name_off rte_virtio_q_stat_strings[] = { + {"good_packets", offsetof(struct virtqueue, packets)}, + {"good_bytes", offsetof(struct virtqueue, bytes)}, + {"errors", offsetof(struct virtqueue, errors)}, + {"multicast_packets", offsetof(struct virtqueue, multicast)}, + {"broadcast_packets", offsetof(struct virtqueue, broadcast)}, + {"undersize_packets", offsetof(struct virtqueue, size_bins[0])}, + {"size_64_packets", offsetof(struct virtqueue, size_bins[1])}, + {"size_65_127_packets", offsetof(struct virtqueue, size_bins[2])}, + {"size_128_255_packets", offsetof(struct virtqueue, size_bins[3])}, + {"size_256_511_packets", offsetof(struct virtqueue, size_bins[4])}, + {"size_512_1023_packets", offsetof(struct virtqueue, size_bins[5])}, + {"size_1024_1517_packets", offsetof(struct virtqueue, size_bins[6])}, + {"size_1518_max_packets", offsetof(struct virtqueue, size_bins[7])}, +}; + +#define VIRTIO_NB_Q_XSTATS (sizeof(rte_virtio_q_stat_strings) / \ + sizeof(rte_virtio_q_stat_strings[0])) + +static int +virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl, + int *dlen, int pkt_num) +{ + uint32_t head, i; + int k, sum = 0; + virtio_net_ctrl_ack status = ~0; + struct virtio_pmd_ctrl result; + + ctrl->status = status; + + if (!(vq && vq->hw->cvq)) { + PMD_INIT_LOG(ERR, "Control queue is not supported."); + return -1; + } + head = vq->vq_desc_head_idx; + + PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, " + "vq->hw->cvq = %p vq = %p", + vq->vq_desc_head_idx, status, vq->hw->cvq, vq); + + if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1)) + return -1; + + memcpy(vq->virtio_net_hdr_mz->addr, ctrl, + sizeof(struct virtio_pmd_ctrl)); + + /* + * Format is enforced in qemu code: + * One TX packet for header; + * At least one TX packet per argument; + * One RX packet for ACK. + */ + vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT; + vq->vq_ring.desc[head].addr = vq->virtio_net_hdr_mz->phys_addr; + vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr); + vq->vq_free_cnt--; + i = vq->vq_ring.desc[head].next; + + for (k = 0; k < pkt_num; k++) { + vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT; + vq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr + + sizeof(struct virtio_net_ctrl_hdr) + + sizeof(ctrl->status) + sizeof(uint8_t)*sum; + vq->vq_ring.desc[i].len = dlen[k]; + sum += dlen[k]; + vq->vq_free_cnt--; + i = vq->vq_ring.desc[i].next; + } + + vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE; + vq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr + + sizeof(struct virtio_net_ctrl_hdr); + vq->vq_ring.desc[i].len = sizeof(ctrl->status); + vq->vq_free_cnt--; + + vq->vq_desc_head_idx = vq->vq_ring.desc[i].next; + + vq_update_avail_ring(vq, head); + vq_update_avail_idx(vq); + + PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index); + + virtqueue_notify(vq); + + rte_rmb(); + while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) { + rte_rmb(); + usleep(100); + } + + while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) { + uint32_t idx, desc_idx, used_idx; + struct vring_used_elem *uep; + + used_idx = (uint32_t)(vq->vq_used_cons_idx + & (vq->vq_nentries - 1)); + uep = &vq->vq_ring.used->ring[used_idx]; + idx = (uint32_t) uep->id; + desc_idx = idx; + + while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) { + desc_idx = vq->vq_ring.desc[desc_idx].next; + vq->vq_free_cnt++; + } + + vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx; + vq->vq_desc_head_idx = idx; + + vq->vq_used_cons_idx++; + vq->vq_free_cnt++; + } + + PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d", + vq->vq_free_cnt, vq->vq_desc_head_idx); + + memcpy(&result, vq->virtio_net_hdr_mz->addr, + sizeof(struct virtio_pmd_ctrl)); + + return result.status; +} + +static int +virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues) +{ + struct virtio_hw *hw = dev->data->dev_private; + struct virtio_pmd_ctrl ctrl; + int dlen[1]; + int ret; + + ctrl.hdr.class = VIRTIO_NET_CTRL_MQ; + ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET; + memcpy(ctrl.data, &nb_queues, sizeof(uint16_t)); + + dlen[0] = sizeof(uint16_t); + + ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); + if (ret) { + PMD_INIT_LOG(ERR, "Multiqueue configured but send command " + "failed, this is too late now..."); + return -EINVAL; + } + + return 0; +} + +void +virtio_dev_queue_release(struct virtqueue *vq) { + struct virtio_hw *hw; + + if (vq) { + hw = vq->hw; + hw->vtpci_ops->del_queue(hw, vq); + + rte_free(vq->sw_ring); + rte_free(vq); + } +} + +int virtio_dev_queue_setup(struct rte_eth_dev *dev, + int queue_type, + uint16_t queue_idx, + uint16_t vtpci_queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + struct virtqueue **pvq) +{ + char vq_name[VIRTQUEUE_MAX_NAME_SZ]; + const struct rte_memzone *mz; + unsigned int vq_size, size; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = NULL; + + PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx); + + /* + * Read the virtqueue size from the Queue Size field + * Always power of 2 and if 0 virtqueue does not exist + */ + vq_size = hw->vtpci_ops->get_queue_num(hw, vtpci_queue_idx); + PMD_INIT_LOG(DEBUG, "vq_size: %u nb_desc:%u", vq_size, nb_desc); + if (vq_size == 0) { + PMD_INIT_LOG(ERR, "virtqueue does not exist"); + return -EINVAL; + } + + if (!rte_is_power_of_2(vq_size)) { + PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2"); + return -EINVAL; + } + + if (queue_type == VTNET_RQ) { + snprintf(vq_name, sizeof(vq_name), "port%d_rvq%d", + dev->data->port_id, queue_idx); + vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) + + vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE); + vq->sw_ring = rte_zmalloc_socket("rxq->sw_ring", + (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) * + sizeof(vq->sw_ring[0]), RTE_CACHE_LINE_SIZE, socket_id); + } else if (queue_type == VTNET_TQ) { + snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d", + dev->data->port_id, queue_idx); + vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) + + vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE); + } else if (queue_type == VTNET_CQ) { + snprintf(vq_name, sizeof(vq_name), "port%d_cvq", + dev->data->port_id); + vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) + + vq_size * sizeof(struct vq_desc_extra), + RTE_CACHE_LINE_SIZE); + } + if (vq == NULL) { + PMD_INIT_LOG(ERR, "Can not allocate virtqueue"); + return -ENOMEM; + } + if (queue_type == VTNET_RQ && vq->sw_ring == NULL) { + PMD_INIT_LOG(ERR, "Can not allocate RX soft ring"); + rte_free(vq); + return -ENOMEM; + } + + vq->hw = hw; + vq->port_id = dev->data->port_id; + vq->queue_id = queue_idx; + vq->vq_queue_index = vtpci_queue_idx; + vq->vq_nentries = vq_size; + + if (nb_desc == 0 || nb_desc > vq_size) + nb_desc = vq_size; + vq->vq_free_cnt = nb_desc; + + /* + * Reserve a memzone for vring elements + */ + size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN); + vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN); + PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size); + + mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, + socket_id, 0, VIRTIO_PCI_VRING_ALIGN); + if (mz == NULL) { + if (rte_errno == EEXIST) + mz = rte_memzone_lookup(vq_name); + if (mz == NULL) { + rte_free(vq); + return -ENOMEM; + } + } + + /* + * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, + * and only accepts 32 bit page frame number. + * Check if the allocated physical memory exceeds 16TB. + */ + if ((mz->phys_addr + vq->vq_ring_size - 1) >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { + PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!"); + rte_free(vq); + return -ENOMEM; + } + + memset(mz->addr, 0, sizeof(mz->len)); + vq->mz = mz; + vq->vq_ring_mem = mz->phys_addr; + vq->vq_ring_virt_mem = mz->addr; + PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%"PRIx64, (uint64_t)mz->phys_addr); + PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%"PRIx64, (uint64_t)(uintptr_t)mz->addr); + vq->virtio_net_hdr_mz = NULL; + vq->virtio_net_hdr_mem = 0; + + if (queue_type == VTNET_TQ) { + const struct rte_memzone *hdr_mz; + struct virtio_tx_region *txr; + unsigned int i; + + /* + * For each xmit packet, allocate a virtio_net_hdr + * and indirect ring elements + */ + snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d_hdrzone", + dev->data->port_id, queue_idx); + hdr_mz = rte_memzone_reserve_aligned(vq_name, + vq_size * sizeof(*txr), + socket_id, 0, + RTE_CACHE_LINE_SIZE); + if (hdr_mz == NULL) { + if (rte_errno == EEXIST) + hdr_mz = rte_memzone_lookup(vq_name); + if (hdr_mz == NULL) { + rte_free(vq); + return -ENOMEM; + } + } + vq->virtio_net_hdr_mz = hdr_mz; + vq->virtio_net_hdr_mem = hdr_mz->phys_addr; + + txr = hdr_mz->addr; + memset(txr, 0, vq_size * sizeof(*txr)); + for (i = 0; i < vq_size; i++) { + struct vring_desc *start_dp = txr[i].tx_indir; + + vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir)); + + /* first indirect descriptor is always the tx header */ + start_dp->addr = vq->virtio_net_hdr_mem + + i * sizeof(*txr) + + offsetof(struct virtio_tx_region, tx_hdr); + + start_dp->len = vq->hw->vtnet_hdr_size; + start_dp->flags = VRING_DESC_F_NEXT; + } + + } else if (queue_type == VTNET_CQ) { + /* Allocate a page for control vq command, data and status */ + snprintf(vq_name, sizeof(vq_name), "port%d_cvq_hdrzone", + dev->data->port_id); + vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name, + PAGE_SIZE, socket_id, 0, RTE_CACHE_LINE_SIZE); + if (vq->virtio_net_hdr_mz == NULL) { + if (rte_errno == EEXIST) + vq->virtio_net_hdr_mz = + rte_memzone_lookup(vq_name); + if (vq->virtio_net_hdr_mz == NULL) { + rte_free(vq); + return -ENOMEM; + } + } + vq->virtio_net_hdr_mem = + vq->virtio_net_hdr_mz->phys_addr; + memset(vq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE); + } + + hw->vtpci_ops->setup_queue(hw, vq); + + *pvq = vq; + return 0; +} + +static int +virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx, + uint32_t socket_id) +{ + struct virtqueue *vq; + int ret; + struct virtio_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + ret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX, + vtpci_queue_idx, 0, socket_id, &vq); + if (ret < 0) { + PMD_INIT_LOG(ERR, "control vq initialization failed"); + return ret; + } + + hw->cvq = vq; + return 0; +} + +static void +virtio_free_queues(struct rte_eth_dev *dev) +{ + unsigned int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) + virtio_dev_rx_queue_release(dev->data->rx_queues[i]); + + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) + virtio_dev_tx_queue_release(dev->data->tx_queues[i]); + + dev->data->nb_tx_queues = 0; +} + +static void +virtio_dev_close(struct rte_eth_dev *dev) +{ + struct virtio_hw *hw = dev->data->dev_private; + struct rte_pci_device *pci_dev = dev->pci_dev; + + PMD_INIT_LOG(DEBUG, "virtio_dev_close"); + + if (hw->started == 1) + virtio_dev_stop(dev); + + /* reset the NIC */ + if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC) + vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR); + vtpci_reset(hw); + virtio_dev_free_mbufs(dev); + virtio_free_queues(dev); +} + +static void +virtio_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct virtio_hw *hw = dev->data->dev_private; + struct virtio_pmd_ctrl ctrl; + int dlen[1]; + int ret; + + if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { + PMD_INIT_LOG(INFO, "host does not support rx control\n"); + return; + } + + ctrl.hdr.class = VIRTIO_NET_CTRL_RX; + ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC; + ctrl.data[0] = 1; + dlen[0] = 1; + + ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); + if (ret) + PMD_INIT_LOG(ERR, "Failed to enable promisc"); +} + +static void +virtio_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct virtio_hw *hw = dev->data->dev_private; + struct virtio_pmd_ctrl ctrl; + int dlen[1]; + int ret; + + if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { + PMD_INIT_LOG(INFO, "host does not support rx control\n"); + return; + } + + ctrl.hdr.class = VIRTIO_NET_CTRL_RX; + ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC; + ctrl.data[0] = 0; + dlen[0] = 1; + + ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); + if (ret) + PMD_INIT_LOG(ERR, "Failed to disable promisc"); +} + +static void +virtio_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct virtio_hw *hw = dev->data->dev_private; + struct virtio_pmd_ctrl ctrl; + int dlen[1]; + int ret; + + if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { + PMD_INIT_LOG(INFO, "host does not support rx control\n"); + return; + } + + ctrl.hdr.class = VIRTIO_NET_CTRL_RX; + ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI; + ctrl.data[0] = 1; + dlen[0] = 1; + + ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); + if (ret) + PMD_INIT_LOG(ERR, "Failed to enable allmulticast"); +} + +static void +virtio_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct virtio_hw *hw = dev->data->dev_private; + struct virtio_pmd_ctrl ctrl; + int dlen[1]; + int ret; + + if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { + PMD_INIT_LOG(INFO, "host does not support rx control\n"); + return; + } + + ctrl.hdr.class = VIRTIO_NET_CTRL_RX; + ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI; + ctrl.data[0] = 0; + dlen[0] = 1; + + ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1); + if (ret) + PMD_INIT_LOG(ERR, "Failed to disable allmulticast"); +} + +/* + * dev_ops for virtio, bare necessities for basic operation + */ +static const struct eth_dev_ops virtio_eth_dev_ops = { + .dev_configure = virtio_dev_configure, + .dev_start = virtio_dev_start, + .dev_stop = virtio_dev_stop, + .dev_close = virtio_dev_close, + .promiscuous_enable = virtio_dev_promiscuous_enable, + .promiscuous_disable = virtio_dev_promiscuous_disable, + .allmulticast_enable = virtio_dev_allmulticast_enable, + .allmulticast_disable = virtio_dev_allmulticast_disable, + + .dev_infos_get = virtio_dev_info_get, + .stats_get = virtio_dev_stats_get, + .xstats_get = virtio_dev_xstats_get, + .stats_reset = virtio_dev_stats_reset, + .xstats_reset = virtio_dev_stats_reset, + .link_update = virtio_dev_link_update, + .rx_queue_setup = virtio_dev_rx_queue_setup, + .rx_queue_release = virtio_dev_rx_queue_release, + .tx_queue_setup = virtio_dev_tx_queue_setup, + .tx_queue_release = virtio_dev_tx_queue_release, + /* collect stats per queue */ + .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set, + .vlan_filter_set = virtio_vlan_filter_set, + .mac_addr_add = virtio_mac_addr_add, + .mac_addr_remove = virtio_mac_addr_remove, + .mac_addr_set = virtio_mac_addr_set, +}; + +static inline int +virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &(dev->data->dev_link); + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/** + * Atomically writes the link status information into global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &(dev->data->dev_link); + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static void +virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + unsigned i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + const struct virtqueue *txvq = dev->data->tx_queues[i]; + if (txvq == NULL) + continue; + + stats->opackets += txvq->packets; + stats->obytes += txvq->bytes; + stats->oerrors += txvq->errors; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_opackets[i] = txvq->packets; + stats->q_obytes[i] = txvq->bytes; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + const struct virtqueue *rxvq = dev->data->rx_queues[i]; + if (rxvq == NULL) + continue; + + stats->ipackets += rxvq->packets; + stats->ibytes += rxvq->bytes; + stats->ierrors += rxvq->errors; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_ipackets[i] = rxvq->packets; + stats->q_ibytes[i] = rxvq->bytes; + } + } + + stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; +} + +static int +virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, + unsigned n) +{ + unsigned i; + unsigned count = 0; + + unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_Q_XSTATS + + dev->data->nb_rx_queues * VIRTIO_NB_Q_XSTATS; + + if (n < nstats) + return nstats; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct virtqueue *rxvq = dev->data->rx_queues[i]; + + if (rxvq == NULL) + continue; + + unsigned t; + + for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) { + snprintf(xstats[count].name, sizeof(xstats[count].name), + "rx_q%u_%s", i, + rte_virtio_q_stat_strings[t].name); + xstats[count].value = *(uint64_t *)(((char *)rxvq) + + rte_virtio_q_stat_strings[t].offset); + count++; + } + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct virtqueue *txvq = dev->data->tx_queues[i]; + + if (txvq == NULL) + continue; + + unsigned t; + + for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) { + snprintf(xstats[count].name, sizeof(xstats[count].name), + "tx_q%u_%s", i, + rte_virtio_q_stat_strings[t].name); + xstats[count].value = *(uint64_t *)(((char *)txvq) + + rte_virtio_q_stat_strings[t].offset); + count++; + } + } + + return count; +} + +static void +virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + virtio_update_stats(dev, stats); +} + +static void +virtio_dev_stats_reset(struct rte_eth_dev *dev) +{ + unsigned int i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct virtqueue *txvq = dev->data->tx_queues[i]; + if (txvq == NULL) + continue; + + txvq->packets = 0; + txvq->bytes = 0; + txvq->errors = 0; + txvq->multicast = 0; + txvq->broadcast = 0; + memset(txvq->size_bins, 0, sizeof(txvq->size_bins[0]) * 8); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct virtqueue *rxvq = dev->data->rx_queues[i]; + if (rxvq == NULL) + continue; + + rxvq->packets = 0; + rxvq->bytes = 0; + rxvq->errors = 0; + rxvq->multicast = 0; + rxvq->broadcast = 0; + memset(rxvq->size_bins, 0, sizeof(rxvq->size_bins[0]) * 8); + } +} + +static void +virtio_set_hwaddr(struct virtio_hw *hw) +{ + vtpci_write_dev_config(hw, + offsetof(struct virtio_net_config, mac), + &hw->mac_addr, ETHER_ADDR_LEN); +} + +static void +virtio_get_hwaddr(struct virtio_hw *hw) +{ + if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) { + vtpci_read_dev_config(hw, + offsetof(struct virtio_net_config, mac), + &hw->mac_addr, ETHER_ADDR_LEN); + } else { + eth_random_addr(&hw->mac_addr[0]); + virtio_set_hwaddr(hw); + } +} + +static void +virtio_mac_table_set(struct virtio_hw *hw, + const struct virtio_net_ctrl_mac *uc, + const struct virtio_net_ctrl_mac *mc) +{ + struct virtio_pmd_ctrl ctrl; + int err, len[2]; + + if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) { + PMD_DRV_LOG(INFO, "host does not support mac table\n"); + return; + } + + ctrl.hdr.class = VIRTIO_NET_CTRL_MAC; + ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; + + len[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries); + memcpy(ctrl.data, uc, len[0]); + + len[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries); + memcpy(ctrl.data + len[0], mc, len[1]); + + err = virtio_send_command(hw->cvq, &ctrl, len, 2); + if (err != 0) + PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err); +} + +static void +virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t vmdq __rte_unused) +{ + struct virtio_hw *hw = dev->data->dev_private; + const struct ether_addr *addrs = dev->data->mac_addrs; + unsigned int i; + struct virtio_net_ctrl_mac *uc, *mc; + + if (index >= VIRTIO_MAX_MAC_ADDRS) { + PMD_DRV_LOG(ERR, "mac address index %u out of range", index); + return; + } + + uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries)); + uc->entries = 0; + mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries)); + mc->entries = 0; + + for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) { + const struct ether_addr *addr + = (i == index) ? mac_addr : addrs + i; + struct virtio_net_ctrl_mac *tbl + = is_multicast_ether_addr(addr) ? mc : uc; + + memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN); + } + + virtio_mac_table_set(hw, uc, mc); +} + +static void +virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct virtio_hw *hw = dev->data->dev_private; + struct ether_addr *addrs = dev->data->mac_addrs; + struct virtio_net_ctrl_mac *uc, *mc; + unsigned int i; + + if (index >= VIRTIO_MAX_MAC_ADDRS) { + PMD_DRV_LOG(ERR, "mac address index %u out of range", index); + return; + } + + uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries)); + uc->entries = 0; + mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries)); + mc->entries = 0; + + for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) { + struct virtio_net_ctrl_mac *tbl; + + if (i == index || is_zero_ether_addr(addrs + i)) + continue; + + tbl = is_multicast_ether_addr(addrs + i) ? mc : uc; + memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN); + } + + virtio_mac_table_set(hw, uc, mc); +} + +static void +virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) +{ + struct virtio_hw *hw = dev->data->dev_private; + + memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN); + + /* Use atomic update if available */ + if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) { + struct virtio_pmd_ctrl ctrl; + int len = ETHER_ADDR_LEN; + + ctrl.hdr.class = VIRTIO_NET_CTRL_MAC; + ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; + + memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN); + virtio_send_command(hw->cvq, &ctrl, &len, 1); + } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) + virtio_set_hwaddr(hw); +} + +static int +virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct virtio_hw *hw = dev->data->dev_private; + struct virtio_pmd_ctrl ctrl; + int len; + + if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) + return -ENOTSUP; + + ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN; + ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; + memcpy(ctrl.data, &vlan_id, sizeof(vlan_id)); + len = sizeof(vlan_id); + + return virtio_send_command(hw->cvq, &ctrl, &len, 1); +} + +static int +virtio_negotiate_features(struct virtio_hw *hw) +{ + uint64_t host_features; + + /* Prepare guest_features: feature that driver wants to support */ + hw->guest_features = VIRTIO_PMD_GUEST_FEATURES; + PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64, + hw->guest_features); + + /* Read device(host) feature bits */ + host_features = hw->vtpci_ops->get_features(hw); + PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64, + host_features); + + /* + * Negotiate features: Subset of device feature bits are written back + * guest feature bits. + */ + hw->guest_features = vtpci_negotiate_features(hw, host_features); + PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64, + hw->guest_features); + + if (hw->modern) { + if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) { + PMD_INIT_LOG(ERR, + "VIRTIO_F_VERSION_1 features is not enabled."); + return -1; + } + vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK); + if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) { + PMD_INIT_LOG(ERR, + "failed to set FEATURES_OK status!"); + return -1; + } + } + + return 0; +} + +/* + * Process Virtio Config changed interrupt and call the callback + * if link state changed. + */ +static void +virtio_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) +{ + struct rte_eth_dev *dev = param; + struct virtio_hw *hw = dev->data->dev_private; + uint8_t isr; + + /* Read interrupt status which clears interrupt */ + isr = vtpci_isr(hw); + PMD_DRV_LOG(INFO, "interrupt status = %#x", isr); + + if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0) + PMD_DRV_LOG(ERR, "interrupt enable failed"); + + if (isr & VIRTIO_PCI_ISR_CONFIG) { + if (virtio_dev_link_update(dev, 0) == 0) + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC); + } + +} + +static void +rx_func_get(struct rte_eth_dev *eth_dev) +{ + struct virtio_hw *hw = eth_dev->data->dev_private; + if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) + eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts; + else + eth_dev->rx_pkt_burst = &virtio_recv_pkts; +} + +/* + * This function is based on probe() function in virtio_pci.c + * It returns 0 on success. + */ +static int +eth_virtio_dev_init(struct rte_eth_dev *eth_dev) +{ + struct virtio_hw *hw = eth_dev->data->dev_private; + struct virtio_net_config *config; + struct virtio_net_config local_config; + struct rte_pci_device *pci_dev; + int ret; + + RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr)); + + eth_dev->dev_ops = &virtio_eth_dev_ops; + eth_dev->tx_pkt_burst = &virtio_xmit_pkts; + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + rx_func_get(eth_dev); + return 0; + } + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate %d bytes needed to store MAC addresses", + VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN); + return -ENOMEM; + } + + pci_dev = eth_dev->pci_dev; + + ret = vtpci_init(pci_dev, hw); + if (ret) + return ret; + + /* Reset the device although not necessary at startup */ + vtpci_reset(hw); + + /* Tell the host we've noticed this device. */ + vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK); + + /* Tell the host we've known how to drive the device. */ + vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER); + if (virtio_negotiate_features(hw) < 0) + return -1; + + /* If host does not support status then disable LSC */ + if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) + pci_dev->driver->drv_flags &= ~RTE_PCI_DRV_INTR_LSC; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + rx_func_get(eth_dev); + + /* Setting up rx_header size for the device */ + if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || + vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) + hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); + else + hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr); + + /* Copy the permanent MAC address to: virtio_hw */ + virtio_get_hwaddr(hw); + ether_addr_copy((struct ether_addr *) hw->mac_addr, + ð_dev->data->mac_addrs[0]); + PMD_INIT_LOG(DEBUG, + "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X", + hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2], + hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]); + + if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) { + config = &local_config; + + vtpci_read_dev_config(hw, + offsetof(struct virtio_net_config, mac), + &config->mac, sizeof(config->mac)); + + if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) { + vtpci_read_dev_config(hw, + offsetof(struct virtio_net_config, status), + &config->status, sizeof(config->status)); + } else { + PMD_INIT_LOG(DEBUG, + "VIRTIO_NET_F_STATUS is not supported"); + config->status = 0; + } + + if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) { + vtpci_read_dev_config(hw, + offsetof(struct virtio_net_config, max_virtqueue_pairs), + &config->max_virtqueue_pairs, + sizeof(config->max_virtqueue_pairs)); + } else { + PMD_INIT_LOG(DEBUG, + "VIRTIO_NET_F_MQ is not supported"); + config->max_virtqueue_pairs = 1; + } + + hw->max_rx_queues = + (VIRTIO_MAX_RX_QUEUES < config->max_virtqueue_pairs) ? + VIRTIO_MAX_RX_QUEUES : config->max_virtqueue_pairs; + hw->max_tx_queues = + (VIRTIO_MAX_TX_QUEUES < config->max_virtqueue_pairs) ? + VIRTIO_MAX_TX_QUEUES : config->max_virtqueue_pairs; + + virtio_dev_cq_queue_setup(eth_dev, + config->max_virtqueue_pairs * 2, + SOCKET_ID_ANY); + + PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d", + config->max_virtqueue_pairs); + PMD_INIT_LOG(DEBUG, "config->status=%d", config->status); + PMD_INIT_LOG(DEBUG, + "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X", + config->mac[0], config->mac[1], + config->mac[2], config->mac[3], + config->mac[4], config->mac[5]); + } else { + hw->max_rx_queues = 1; + hw->max_tx_queues = 1; + } + + PMD_INIT_LOG(DEBUG, "hw->max_rx_queues=%d hw->max_tx_queues=%d", + hw->max_rx_queues, hw->max_tx_queues); + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + /* Setup interrupt callback */ + if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC) + rte_intr_callback_register(&pci_dev->intr_handle, + virtio_interrupt_handler, eth_dev); + + virtio_dev_cq_start(eth_dev); + + return 0; +} + +static int +eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct virtio_hw *hw = eth_dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + return -EPERM; + + /* Close it anyway since there's no way to know if closed */ + virtio_dev_close(eth_dev); + + pci_dev = eth_dev->pci_dev; + + eth_dev->dev_ops = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->rx_pkt_burst = NULL; + + virtio_dev_queue_release(hw->cvq); + + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + + /* reset interrupt callback */ + if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC) + rte_intr_callback_unregister(&pci_dev->intr_handle, + virtio_interrupt_handler, + eth_dev); + rte_eal_pci_unmap_device(pci_dev); + + PMD_INIT_LOG(DEBUG, "dev_uninit completed"); + + return 0; +} + +static struct eth_driver rte_virtio_pmd = { + .pci_drv = { + .name = "rte_virtio_pmd", + .id_table = pci_id_virtio_map, + .drv_flags = RTE_PCI_DRV_DETACHABLE, + }, + .eth_dev_init = eth_virtio_dev_init, + .eth_dev_uninit = eth_virtio_dev_uninit, + .dev_private_size = sizeof(struct virtio_hw), +}; + +/* + * Driver initialization routine. + * Invoked once at EAL init time. + * Register itself as the [Poll Mode] Driver of PCI virtio devices. + * Returns 0 on success. + */ +static int +rte_virtio_pmd_init(const char *name __rte_unused, + const char *param __rte_unused) +{ + if (rte_eal_iopl_init() != 0) { + PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD"); + return -1; + } + + rte_eth_driver_register(&rte_virtio_pmd); + return 0; +} + +/* + * Configure virtio device + * It returns 0 on success. + */ +static int +virtio_dev_configure(struct rte_eth_dev *dev) +{ + const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + struct virtio_hw *hw = dev->data->dev_private; + struct rte_pci_device *pci_dev = dev->pci_dev; + + PMD_INIT_LOG(DEBUG, "configure"); + + if (rxmode->hw_ip_checksum) { + PMD_DRV_LOG(ERR, "HW IP checksum not supported"); + return -EINVAL; + } + + hw->vlan_strip = rxmode->hw_vlan_strip; + + if (rxmode->hw_vlan_filter + && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) { + PMD_DRV_LOG(NOTICE, + "vlan filtering not available on this host"); + return -ENOTSUP; + } + + if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC) + if (vtpci_irq_config(hw, 0) == VIRTIO_MSI_NO_VECTOR) { + PMD_DRV_LOG(ERR, "failed to set config vector"); + return -EBUSY; + } + + return 0; +} + + +static int +virtio_dev_start(struct rte_eth_dev *dev) +{ + uint16_t nb_queues, i; + struct virtio_hw *hw = dev->data->dev_private; + struct rte_pci_device *pci_dev = dev->pci_dev; + + /* check if lsc interrupt feature is enabled */ + if (dev->data->dev_conf.intr_conf.lsc) { + if (!(pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) { + PMD_DRV_LOG(ERR, "link status not supported by host"); + return -ENOTSUP; + } + + if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0) { + PMD_DRV_LOG(ERR, "interrupt enable failed"); + return -EIO; + } + } + + /* Initialize Link state */ + virtio_dev_link_update(dev, 0); + + /* On restart after stop do not touch queues */ + if (hw->started) + return 0; + + /* Do final configuration before rx/tx engine starts */ + virtio_dev_rxtx_start(dev); + vtpci_reinit_complete(hw); + + hw->started = 1; + + /*Notify the backend + *Otherwise the tap backend might already stop its queue due to fullness. + *vhost backend will have no chance to be waked up + */ + nb_queues = dev->data->nb_rx_queues; + if (nb_queues > 1) { + if (virtio_set_multiple_queues(dev, nb_queues) != 0) + return -EINVAL; + } + + PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues); + + for (i = 0; i < nb_queues; i++) + virtqueue_notify(dev->data->rx_queues[i]); + + PMD_INIT_LOG(DEBUG, "Notified backend at initialization"); + + for (i = 0; i < dev->data->nb_rx_queues; i++) + VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]); + + for (i = 0; i < dev->data->nb_tx_queues; i++) + VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]); + + return 0; +} + +static void virtio_dev_free_mbufs(struct rte_eth_dev *dev) +{ + struct rte_mbuf *buf; + int i, mbuf_num = 0; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + PMD_INIT_LOG(DEBUG, + "Before freeing rxq[%d] used and unused buf", i); + VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]); + + PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", + i, dev->data->rx_queues[i]); + while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused( + dev->data->rx_queues[i])) != NULL) { + rte_pktmbuf_free(buf); + mbuf_num++; + } + + PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num); + PMD_INIT_LOG(DEBUG, + "After freeing rxq[%d] used and unused buf", i); + VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]); + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + PMD_INIT_LOG(DEBUG, + "Before freeing txq[%d] used and unused bufs", + i); + VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]); + + mbuf_num = 0; + while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused( + dev->data->tx_queues[i])) != NULL) { + rte_pktmbuf_free(buf); + + mbuf_num++; + } + + PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num); + PMD_INIT_LOG(DEBUG, + "After freeing txq[%d] used and unused buf", i); + VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]); + } +} + +/* + * Stop device: disable interrupt and mark link down + */ +static void +virtio_dev_stop(struct rte_eth_dev *dev) +{ + struct rte_eth_link link; + struct virtio_hw *hw = dev->data->dev_private; + + PMD_INIT_LOG(DEBUG, "stop"); + + hw->started = 0; + + if (dev->data->dev_conf.intr_conf.lsc) + rte_intr_disable(&dev->pci_dev->intr_handle); + + memset(&link, 0, sizeof(link)); + virtio_dev_atomic_write_link_status(dev, &link); +} + +static int +virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) +{ + struct rte_eth_link link, old; + uint16_t status; + struct virtio_hw *hw = dev->data->dev_private; + memset(&link, 0, sizeof(link)); + virtio_dev_atomic_read_link_status(dev, &link); + old = link; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = SPEED_10G; + + if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) { + PMD_INIT_LOG(DEBUG, "Get link status from hw"); + vtpci_read_dev_config(hw, + offsetof(struct virtio_net_config, status), + &status, sizeof(status)); + if ((status & VIRTIO_NET_S_LINK_UP) == 0) { + link.link_status = ETH_LINK_DOWN; + PMD_INIT_LOG(DEBUG, "Port %d is down", + dev->data->port_id); + } else { + link.link_status = ETH_LINK_UP; + PMD_INIT_LOG(DEBUG, "Port %d is up", + dev->data->port_id); + } + } else { + link.link_status = ETH_LINK_UP; + } + virtio_dev_atomic_write_link_status(dev, &link); + + return (old.link_status == link.link_status) ? -1 : 0; +} + +static void +virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct virtio_hw *hw = dev->data->dev_private; + + dev_info->driver_name = dev->driver->pci_drv.name; + dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; + dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE; + dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN; + dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS; + dev_info->default_txconf = (struct rte_eth_txconf) { + .txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS + }; +} + +/* + * It enables testpmd to collect per queue stats. + */ +static int +virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev, +__rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx, +__rte_unused uint8_t is_rx) +{ + return 0; +} + +static struct rte_driver rte_virtio_driver = { + .type = PMD_PDEV, + .init = rte_virtio_pmd_init, +}; + +PMD_REGISTER_DRIVER(rte_virtio_driver); diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h new file mode 100644 index 00000000..66423a07 --- /dev/null +++ b/drivers/net/virtio/virtio_ethdev.h @@ -0,0 +1,125 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VIRTIO_ETHDEV_H_ +#define _VIRTIO_ETHDEV_H_ + +#include <stdint.h> + +#include "virtio_pci.h" + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_10G 10000 + +#ifndef PAGE_SIZE +#define PAGE_SIZE 4096 +#endif + +#define VIRTIO_MAX_RX_QUEUES 128 +#define VIRTIO_MAX_TX_QUEUES 128 +#define VIRTIO_MAX_MAC_ADDRS 64 +#define VIRTIO_MIN_RX_BUFSIZE 64 +#define VIRTIO_MAX_RX_PKTLEN 9728 + +/* Features desired/implemented by this driver. */ +#define VIRTIO_PMD_GUEST_FEATURES \ + (1u << VIRTIO_NET_F_MAC | \ + 1u << VIRTIO_NET_F_STATUS | \ + 1u << VIRTIO_NET_F_MQ | \ + 1u << VIRTIO_NET_F_CTRL_MAC_ADDR | \ + 1u << VIRTIO_NET_F_CTRL_VQ | \ + 1u << VIRTIO_NET_F_CTRL_RX | \ + 1u << VIRTIO_NET_F_CTRL_VLAN | \ + 1u << VIRTIO_NET_F_MRG_RXBUF | \ + 1ULL << VIRTIO_F_VERSION_1) + +/* + * CQ function prototype + */ +void virtio_dev_cq_start(struct rte_eth_dev *dev); + +/* + * RX/TX function prototypes + */ +void virtio_dev_rxtx_start(struct rte_eth_dev *dev); + +int virtio_dev_queue_setup(struct rte_eth_dev *dev, + int queue_type, + uint16_t queue_idx, + uint16_t vtpci_queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + struct virtqueue **pvq); + +void virtio_dev_queue_release(struct virtqueue *vq); + +int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +void virtio_dev_rx_queue_release(void *rxq); + +int virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +void virtio_dev_tx_queue_release(void *txq); + +uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +/* + * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us + * frames larger than 1514 bytes. We do not yet support software LRO + * via tcp_lro_rx(). + */ +#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \ + VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN) + + +#endif /* _VIRTIO_ETHDEV_H_ */ diff --git a/drivers/net/virtio/virtio_logs.h b/drivers/net/virtio/virtio_logs.h new file mode 100644 index 00000000..d6c33f7b --- /dev/null +++ b/drivers/net/virtio/virtio_logs.h @@ -0,0 +1,70 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VIRTIO_LOGS_H_ +#define _VIRTIO_LOGS_H_ + +#include <rte_log.h> + +#ifdef RTE_LIBRTE_VIRTIO_DEBUG_INIT +#define PMD_INIT_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +#else +#define PMD_INIT_LOG(level, fmt, args...) do { } while(0) +#define PMD_INIT_FUNC_TRACE() do { } while(0) +#endif + +#ifdef RTE_LIBRTE_VIRTIO_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s() rx: " fmt , __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_VIRTIO_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s() tx: " fmt , __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while(0) +#endif + + +#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DRIVER +#define PMD_DRV_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt , __func__, ## args) +#else +#define PMD_DRV_LOG(level, fmt, args...) do { } while(0) +#endif + +#endif /* _VIRTIO_LOGS_H_ */ diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c new file mode 100644 index 00000000..c007959f --- /dev/null +++ b/drivers/net/virtio/virtio_pci.c @@ -0,0 +1,666 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <stdint.h> + +#ifdef RTE_EXEC_ENV_LINUXAPP + #include <dirent.h> + #include <fcntl.h> +#endif + +#include "virtio_pci.h" +#include "virtio_logs.h" +#include "virtqueue.h" + +/* + * Following macros are derived from linux/pci_regs.h, however, + * we can't simply include that header here, as there is no such + * file for non-Linux platform. + */ +#define PCI_CAPABILITY_LIST 0x34 +#define PCI_CAP_ID_VNDR 0x09 + +/* + * The remaining space is defined by each driver as the per-driver + * configuration space. + */ +#define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20) + +static void +legacy_read_dev_config(struct virtio_hw *hw, size_t offset, + void *dst, int length) +{ + rte_eal_pci_ioport_read(&hw->io, dst, length, + VIRTIO_PCI_CONFIG(hw) + offset); +} + +static void +legacy_write_dev_config(struct virtio_hw *hw, size_t offset, + const void *src, int length) +{ + rte_eal_pci_ioport_write(&hw->io, src, length, + VIRTIO_PCI_CONFIG(hw) + offset); +} + +static uint64_t +legacy_get_features(struct virtio_hw *hw) +{ + uint32_t dst; + + rte_eal_pci_ioport_read(&hw->io, &dst, 4, VIRTIO_PCI_HOST_FEATURES); + return dst; +} + +static void +legacy_set_features(struct virtio_hw *hw, uint64_t features) +{ + if ((features >> 32) != 0) { + PMD_DRV_LOG(ERR, + "only 32 bit features are allowed for legacy virtio!"); + return; + } + rte_eal_pci_ioport_write(&hw->io, &features, 4, + VIRTIO_PCI_GUEST_FEATURES); +} + +static uint8_t +legacy_get_status(struct virtio_hw *hw) +{ + uint8_t dst; + + rte_eal_pci_ioport_read(&hw->io, &dst, 1, VIRTIO_PCI_STATUS); + return dst; +} + +static void +legacy_set_status(struct virtio_hw *hw, uint8_t status) +{ + rte_eal_pci_ioport_write(&hw->io, &status, 1, VIRTIO_PCI_STATUS); +} + +static void +legacy_reset(struct virtio_hw *hw) +{ + legacy_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); +} + +static uint8_t +legacy_get_isr(struct virtio_hw *hw) +{ + uint8_t dst; + + rte_eal_pci_ioport_read(&hw->io, &dst, 1, VIRTIO_PCI_ISR); + return dst; +} + +/* Enable one vector (0) for Link State Intrerrupt */ +static uint16_t +legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec) +{ + uint16_t dst; + + rte_eal_pci_ioport_write(&hw->io, &vec, 2, VIRTIO_MSI_CONFIG_VECTOR); + rte_eal_pci_ioport_read(&hw->io, &dst, 2, VIRTIO_MSI_CONFIG_VECTOR); + return dst; +} + +static uint16_t +legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) +{ + uint16_t dst; + + rte_eal_pci_ioport_write(&hw->io, &queue_id, 2, VIRTIO_PCI_QUEUE_SEL); + rte_eal_pci_ioport_read(&hw->io, &dst, 2, VIRTIO_PCI_QUEUE_NUM); + return dst; +} + +static void +legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) +{ + uint32_t src; + + rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2, + VIRTIO_PCI_QUEUE_SEL); + src = vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; + rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN); +} + +static void +legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq) +{ + uint32_t src = 0; + + rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2, + VIRTIO_PCI_QUEUE_SEL); + rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN); +} + +static void +legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) +{ + rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2, + VIRTIO_PCI_QUEUE_NOTIFY); +} + +#ifdef RTE_EXEC_ENV_LINUXAPP +static int +legacy_virtio_has_msix(const struct rte_pci_addr *loc) +{ + DIR *d; + char dirname[PATH_MAX]; + + snprintf(dirname, sizeof(dirname), + SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/msi_irqs", + loc->domain, loc->bus, loc->devid, loc->function); + + d = opendir(dirname); + if (d) + closedir(d); + + return d != NULL; +} +#else +static int +legacy_virtio_has_msix(const struct rte_pci_addr *loc __rte_unused) +{ + /* nic_uio does not enable interrupts, return 0 (false). */ + return 0; +} +#endif + +static int +legacy_virtio_resource_init(struct rte_pci_device *pci_dev, + struct virtio_hw *hw) +{ + if (rte_eal_pci_ioport_map(pci_dev, 0, &hw->io) < 0) + return -1; + + if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UNKNOWN) + pci_dev->driver->drv_flags |= RTE_PCI_DRV_INTR_LSC; + else + pci_dev->driver->drv_flags &= ~RTE_PCI_DRV_INTR_LSC; + + return 0; +} + +static const struct virtio_pci_ops legacy_ops = { + .read_dev_cfg = legacy_read_dev_config, + .write_dev_cfg = legacy_write_dev_config, + .reset = legacy_reset, + .get_status = legacy_get_status, + .set_status = legacy_set_status, + .get_features = legacy_get_features, + .set_features = legacy_set_features, + .get_isr = legacy_get_isr, + .set_config_irq = legacy_set_config_irq, + .get_queue_num = legacy_get_queue_num, + .setup_queue = legacy_setup_queue, + .del_queue = legacy_del_queue, + .notify_queue = legacy_notify_queue, +}; + + +static inline uint8_t +io_read8(uint8_t *addr) +{ + return *(volatile uint8_t *)addr; +} + +static inline void +io_write8(uint8_t val, uint8_t *addr) +{ + *(volatile uint8_t *)addr = val; +} + +static inline uint16_t +io_read16(uint16_t *addr) +{ + return *(volatile uint16_t *)addr; +} + +static inline void +io_write16(uint16_t val, uint16_t *addr) +{ + *(volatile uint16_t *)addr = val; +} + +static inline uint32_t +io_read32(uint32_t *addr) +{ + return *(volatile uint32_t *)addr; +} + +static inline void +io_write32(uint32_t val, uint32_t *addr) +{ + *(volatile uint32_t *)addr = val; +} + +static inline void +io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) +{ + io_write32(val & ((1ULL << 32) - 1), lo); + io_write32(val >> 32, hi); +} + +static void +modern_read_dev_config(struct virtio_hw *hw, size_t offset, + void *dst, int length) +{ + int i; + uint8_t *p; + uint8_t old_gen, new_gen; + + do { + old_gen = io_read8(&hw->common_cfg->config_generation); + + p = dst; + for (i = 0; i < length; i++) + *p++ = io_read8((uint8_t *)hw->dev_cfg + offset + i); + + new_gen = io_read8(&hw->common_cfg->config_generation); + } while (old_gen != new_gen); +} + +static void +modern_write_dev_config(struct virtio_hw *hw, size_t offset, + const void *src, int length) +{ + int i; + const uint8_t *p = src; + + for (i = 0; i < length; i++) + io_write8(*p++, (uint8_t *)hw->dev_cfg + offset + i); +} + +static uint64_t +modern_get_features(struct virtio_hw *hw) +{ + uint32_t features_lo, features_hi; + + io_write32(0, &hw->common_cfg->device_feature_select); + features_lo = io_read32(&hw->common_cfg->device_feature); + + io_write32(1, &hw->common_cfg->device_feature_select); + features_hi = io_read32(&hw->common_cfg->device_feature); + + return ((uint64_t)features_hi << 32) | features_lo; +} + +static void +modern_set_features(struct virtio_hw *hw, uint64_t features) +{ + io_write32(0, &hw->common_cfg->guest_feature_select); + io_write32(features & ((1ULL << 32) - 1), + &hw->common_cfg->guest_feature); + + io_write32(1, &hw->common_cfg->guest_feature_select); + io_write32(features >> 32, + &hw->common_cfg->guest_feature); +} + +static uint8_t +modern_get_status(struct virtio_hw *hw) +{ + return io_read8(&hw->common_cfg->device_status); +} + +static void +modern_set_status(struct virtio_hw *hw, uint8_t status) +{ + io_write8(status, &hw->common_cfg->device_status); +} + +static void +modern_reset(struct virtio_hw *hw) +{ + modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); + modern_get_status(hw); +} + +static uint8_t +modern_get_isr(struct virtio_hw *hw) +{ + return io_read8(hw->isr); +} + +static uint16_t +modern_set_config_irq(struct virtio_hw *hw, uint16_t vec) +{ + io_write16(vec, &hw->common_cfg->msix_config); + return io_read16(&hw->common_cfg->msix_config); +} + +static uint16_t +modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) +{ + io_write16(queue_id, &hw->common_cfg->queue_select); + return io_read16(&hw->common_cfg->queue_size); +} + +static void +modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) +{ + uint64_t desc_addr, avail_addr, used_addr; + uint16_t notify_off; + + desc_addr = vq->mz->phys_addr; + avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); + used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, + ring[vq->vq_nentries]), + VIRTIO_PCI_VRING_ALIGN); + + io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); + + io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, + &hw->common_cfg->queue_desc_hi); + io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, + &hw->common_cfg->queue_avail_hi); + io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, + &hw->common_cfg->queue_used_hi); + + notify_off = io_read16(&hw->common_cfg->queue_notify_off); + vq->notify_addr = (void *)((uint8_t *)hw->notify_base + + notify_off * hw->notify_off_multiplier); + + io_write16(1, &hw->common_cfg->queue_enable); + + PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index); + PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr); + PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr); + PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr); + PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)", + vq->notify_addr, notify_off); +} + +static void +modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq) +{ + io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); + + io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, + &hw->common_cfg->queue_desc_hi); + io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, + &hw->common_cfg->queue_avail_hi); + io_write64_twopart(0, &hw->common_cfg->queue_used_lo, + &hw->common_cfg->queue_used_hi); + + io_write16(0, &hw->common_cfg->queue_enable); +} + +static void +modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq) +{ + io_write16(1, vq->notify_addr); +} + +static const struct virtio_pci_ops modern_ops = { + .read_dev_cfg = modern_read_dev_config, + .write_dev_cfg = modern_write_dev_config, + .reset = modern_reset, + .get_status = modern_get_status, + .set_status = modern_set_status, + .get_features = modern_get_features, + .set_features = modern_set_features, + .get_isr = modern_get_isr, + .set_config_irq = modern_set_config_irq, + .get_queue_num = modern_get_queue_num, + .setup_queue = modern_setup_queue, + .del_queue = modern_del_queue, + .notify_queue = modern_notify_queue, +}; + + +void +vtpci_read_dev_config(struct virtio_hw *hw, size_t offset, + void *dst, int length) +{ + hw->vtpci_ops->read_dev_cfg(hw, offset, dst, length); +} + +void +vtpci_write_dev_config(struct virtio_hw *hw, size_t offset, + const void *src, int length) +{ + hw->vtpci_ops->write_dev_cfg(hw, offset, src, length); +} + +uint64_t +vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features) +{ + uint64_t features; + + /* + * Limit negotiated features to what the driver, virtqueue, and + * host all support. + */ + features = host_features & hw->guest_features; + hw->vtpci_ops->set_features(hw, features); + + return features; +} + +void +vtpci_reset(struct virtio_hw *hw) +{ + hw->vtpci_ops->set_status(hw, VIRTIO_CONFIG_STATUS_RESET); + /* flush status write */ + hw->vtpci_ops->get_status(hw); +} + +void +vtpci_reinit_complete(struct virtio_hw *hw) +{ + vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK); +} + +void +vtpci_set_status(struct virtio_hw *hw, uint8_t status) +{ + if (status != VIRTIO_CONFIG_STATUS_RESET) + status |= hw->vtpci_ops->get_status(hw); + + hw->vtpci_ops->set_status(hw, status); +} + +uint8_t +vtpci_get_status(struct virtio_hw *hw) +{ + return hw->vtpci_ops->get_status(hw); +} + +uint8_t +vtpci_isr(struct virtio_hw *hw) +{ + return hw->vtpci_ops->get_isr(hw); +} + + +/* Enable one vector (0) for Link State Intrerrupt */ +uint16_t +vtpci_irq_config(struct virtio_hw *hw, uint16_t vec) +{ + return hw->vtpci_ops->set_config_irq(hw, vec); +} + +static void * +get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap) +{ + uint8_t bar = cap->bar; + uint32_t length = cap->length; + uint32_t offset = cap->offset; + uint8_t *base; + + if (bar > 5) { + PMD_INIT_LOG(ERR, "invalid bar: %u", bar); + return NULL; + } + + if (offset + length < offset) { + PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows", + offset, length); + return NULL; + } + + if (offset + length > dev->mem_resource[bar].len) { + PMD_INIT_LOG(ERR, + "invalid cap: overflows bar space: %u > %" PRIu64, + offset + length, dev->mem_resource[bar].len); + return NULL; + } + + base = dev->mem_resource[bar].addr; + if (base == NULL) { + PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar); + return NULL; + } + + return base + offset; +} + +static int +virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw) +{ + uint8_t pos; + struct virtio_pci_cap cap; + int ret; + + if (rte_eal_pci_map_device(dev)) { + PMD_INIT_LOG(DEBUG, "failed to map pci device!"); + return -1; + } + + ret = rte_eal_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); + if (ret < 0) { + PMD_INIT_LOG(DEBUG, "failed to read pci capability list"); + return -1; + } + + while (pos) { + ret = rte_eal_pci_read_config(dev, &cap, sizeof(cap), pos); + if (ret < 0) { + PMD_INIT_LOG(ERR, + "failed to read pci cap at pos: %x", pos); + break; + } + + if (cap.cap_vndr != PCI_CAP_ID_VNDR) { + PMD_INIT_LOG(DEBUG, + "[%2x] skipping non VNDR cap id: %02x", + pos, cap.cap_vndr); + goto next; + } + + PMD_INIT_LOG(DEBUG, + "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u", + pos, cap.cfg_type, cap.bar, cap.offset, cap.length); + + switch (cap.cfg_type) { + case VIRTIO_PCI_CAP_COMMON_CFG: + hw->common_cfg = get_cfg_addr(dev, &cap); + break; + case VIRTIO_PCI_CAP_NOTIFY_CFG: + rte_eal_pci_read_config(dev, &hw->notify_off_multiplier, + 4, pos + sizeof(cap)); + hw->notify_base = get_cfg_addr(dev, &cap); + break; + case VIRTIO_PCI_CAP_DEVICE_CFG: + hw->dev_cfg = get_cfg_addr(dev, &cap); + break; + case VIRTIO_PCI_CAP_ISR_CFG: + hw->isr = get_cfg_addr(dev, &cap); + break; + } + +next: + pos = cap.cap_next; + } + + if (hw->common_cfg == NULL || hw->notify_base == NULL || + hw->dev_cfg == NULL || hw->isr == NULL) { + PMD_INIT_LOG(INFO, "no modern virtio pci device found."); + return -1; + } + + PMD_INIT_LOG(INFO, "found modern virtio pci device."); + + PMD_INIT_LOG(DEBUG, "common cfg mapped at: %p", hw->common_cfg); + PMD_INIT_LOG(DEBUG, "device cfg mapped at: %p", hw->dev_cfg); + PMD_INIT_LOG(DEBUG, "isr cfg mapped at: %p", hw->isr); + PMD_INIT_LOG(DEBUG, "notify base: %p, notify off multiplier: %u", + hw->notify_base, hw->notify_off_multiplier); + + return 0; +} + +/* + * Return -1: + * if there is error mapping with VFIO/UIO. + * if port map error when driver type is KDRV_NONE. + * Return 1 if kernel driver is managing the device. + * Return 0 on success. + */ +int +vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw) +{ + hw->dev = dev; + + /* + * Try if we can succeed reading virtio pci caps, which exists + * only on modern pci device. If failed, we fallback to legacy + * virtio handling. + */ + if (virtio_read_caps(dev, hw) == 0) { + PMD_INIT_LOG(INFO, "modern virtio pci detected."); + hw->vtpci_ops = &modern_ops; + hw->modern = 1; + dev->driver->drv_flags |= RTE_PCI_DRV_INTR_LSC; + return 0; + } + + PMD_INIT_LOG(INFO, "trying with legacy virtio pci."); + if (legacy_virtio_resource_init(dev, hw) < 0) { + if (dev->kdrv == RTE_KDRV_UNKNOWN && + dev->devargs->type != RTE_DEVTYPE_WHITELISTED_PCI) { + PMD_INIT_LOG(INFO, + "skip kernel managed virtio device."); + return 1; + } + return -1; + } + + hw->vtpci_ops = &legacy_ops; + hw->use_msix = legacy_virtio_has_msix(&dev->addr); + hw->modern = 0; + + return 0; +} diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h new file mode 100644 index 00000000..b69785ea --- /dev/null +++ b/drivers/net/virtio/virtio_pci.h @@ -0,0 +1,314 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VIRTIO_PCI_H_ +#define _VIRTIO_PCI_H_ + +#include <stdint.h> + +#include <rte_pci.h> +#include <rte_ethdev.h> + +struct virtqueue; + +/* VirtIO PCI vendor/device ID. */ +#define VIRTIO_PCI_VENDORID 0x1AF4 +#define VIRTIO_PCI_DEVICEID_MIN 0x1000 +#define VIRTIO_PCI_DEVICEID_MAX 0x103F + +/* VirtIO ABI version, this must match exactly. */ +#define VIRTIO_PCI_ABI_VERSION 0 + +/* + * VirtIO Header, located in BAR 0. + */ +#define VIRTIO_PCI_HOST_FEATURES 0 /* host's supported features (32bit, RO)*/ +#define VIRTIO_PCI_GUEST_FEATURES 4 /* guest's supported features (32, RW) */ +#define VIRTIO_PCI_QUEUE_PFN 8 /* physical address of VQ (32, RW) */ +#define VIRTIO_PCI_QUEUE_NUM 12 /* number of ring entries (16, RO) */ +#define VIRTIO_PCI_QUEUE_SEL 14 /* current VQ selection (16, RW) */ +#define VIRTIO_PCI_QUEUE_NOTIFY 16 /* notify host regarding VQ (16, RW) */ +#define VIRTIO_PCI_STATUS 18 /* device status register (8, RW) */ +#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading + * also clears the register (8, RO) */ +/* Only if MSIX is enabled: */ +#define VIRTIO_MSI_CONFIG_VECTOR 20 /* configuration change vector (16, RW) */ +#define VIRTIO_MSI_QUEUE_VECTOR 22 /* vector for selected VQ notifications + (16, RW) */ + +/* The bit of the ISR which indicates a device has an interrupt. */ +#define VIRTIO_PCI_ISR_INTR 0x1 +/* The bit of the ISR which indicates a device configuration change. */ +#define VIRTIO_PCI_ISR_CONFIG 0x2 +/* Vector value used to disable MSI for queue. */ +#define VIRTIO_MSI_NO_VECTOR 0xFFFF + +/* VirtIO device IDs. */ +#define VIRTIO_ID_NETWORK 0x01 +#define VIRTIO_ID_BLOCK 0x02 +#define VIRTIO_ID_CONSOLE 0x03 +#define VIRTIO_ID_ENTROPY 0x04 +#define VIRTIO_ID_BALLOON 0x05 +#define VIRTIO_ID_IOMEMORY 0x06 +#define VIRTIO_ID_9P 0x09 + +/* Status byte for guest to report progress. */ +#define VIRTIO_CONFIG_STATUS_RESET 0x00 +#define VIRTIO_CONFIG_STATUS_ACK 0x01 +#define VIRTIO_CONFIG_STATUS_DRIVER 0x02 +#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04 +#define VIRTIO_CONFIG_STATUS_FEATURES_OK 0x08 +#define VIRTIO_CONFIG_STATUS_FAILED 0x80 + +/* + * Each virtqueue indirect descriptor list must be physically contiguous. + * To allow us to malloc(9) each list individually, limit the number + * supported to what will fit in one page. With 4KB pages, this is a limit + * of 256 descriptors. If there is ever a need for more, we can switch to + * contigmalloc(9) for the larger allocations, similar to what + * bus_dmamem_alloc(9) does. + * + * Note the sizeof(struct vring_desc) is 16 bytes. + */ +#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16)) + +/* The feature bitmap for virtio net */ +#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ +#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ +#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ +#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ +#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ +#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */ +#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */ +#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */ +#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */ +#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */ +#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ +#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ +#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */ +#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */ +#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */ +#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */ +#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */ +#define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the + * network */ +#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow + * Steering */ +#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ + +/* Do we get callbacks when the ring is completely used, even if we've + * suppressed them? */ +#define VIRTIO_F_NOTIFY_ON_EMPTY 24 + +/* Can the device handle any descriptor layout? */ +#define VIRTIO_F_ANY_LAYOUT 27 + +/* We support indirect buffer descriptors */ +#define VIRTIO_RING_F_INDIRECT_DESC 28 + +#define VIRTIO_F_VERSION_1 32 + +/* + * Some VirtIO feature bits (currently bits 28 through 31) are + * reserved for the transport being used (eg. virtio_ring), the + * rest are per-device feature bits. + */ +#define VIRTIO_TRANSPORT_F_START 28 +#define VIRTIO_TRANSPORT_F_END 32 + +/* The Guest publishes the used index for which it expects an interrupt + * at the end of the avail ring. Host should ignore the avail->flags field. */ +/* The Host publishes the avail index for which it expects a kick + * at the end of the used ring. Guest should ignore the used->flags field. */ +#define VIRTIO_RING_F_EVENT_IDX 29 + +#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ +#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */ + +/* + * Maximum number of virtqueues per device. + */ +#define VIRTIO_MAX_VIRTQUEUES 8 + +/* Common configuration */ +#define VIRTIO_PCI_CAP_COMMON_CFG 1 +/* Notifications */ +#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 +/* ISR Status */ +#define VIRTIO_PCI_CAP_ISR_CFG 3 +/* Device specific configuration */ +#define VIRTIO_PCI_CAP_DEVICE_CFG 4 +/* PCI configuration access */ +#define VIRTIO_PCI_CAP_PCI_CFG 5 + +/* This is the PCI capability header: */ +struct virtio_pci_cap { + uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ + uint8_t cap_next; /* Generic PCI field: next ptr. */ + uint8_t cap_len; /* Generic PCI field: capability length */ + uint8_t cfg_type; /* Identifies the structure. */ + uint8_t bar; /* Where to find it. */ + uint8_t padding[3]; /* Pad to full dword. */ + uint32_t offset; /* Offset within bar. */ + uint32_t length; /* Length of the structure, in bytes. */ +}; + +struct virtio_pci_notify_cap { + struct virtio_pci_cap cap; + uint32_t notify_off_multiplier; /* Multiplier for queue_notify_off. */ +}; + +/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */ +struct virtio_pci_common_cfg { + /* About the whole device. */ + uint32_t device_feature_select; /* read-write */ + uint32_t device_feature; /* read-only */ + uint32_t guest_feature_select; /* read-write */ + uint32_t guest_feature; /* read-write */ + uint16_t msix_config; /* read-write */ + uint16_t num_queues; /* read-only */ + uint8_t device_status; /* read-write */ + uint8_t config_generation; /* read-only */ + + /* About a specific virtqueue. */ + uint16_t queue_select; /* read-write */ + uint16_t queue_size; /* read-write, power of 2. */ + uint16_t queue_msix_vector; /* read-write */ + uint16_t queue_enable; /* read-write */ + uint16_t queue_notify_off; /* read-only */ + uint32_t queue_desc_lo; /* read-write */ + uint32_t queue_desc_hi; /* read-write */ + uint32_t queue_avail_lo; /* read-write */ + uint32_t queue_avail_hi; /* read-write */ + uint32_t queue_used_lo; /* read-write */ + uint32_t queue_used_hi; /* read-write */ +}; + +struct virtio_hw; + +struct virtio_pci_ops { + void (*read_dev_cfg)(struct virtio_hw *hw, size_t offset, + void *dst, int len); + void (*write_dev_cfg)(struct virtio_hw *hw, size_t offset, + const void *src, int len); + void (*reset)(struct virtio_hw *hw); + + uint8_t (*get_status)(struct virtio_hw *hw); + void (*set_status)(struct virtio_hw *hw, uint8_t status); + + uint64_t (*get_features)(struct virtio_hw *hw); + void (*set_features)(struct virtio_hw *hw, uint64_t features); + + uint8_t (*get_isr)(struct virtio_hw *hw); + + uint16_t (*set_config_irq)(struct virtio_hw *hw, uint16_t vec); + + uint16_t (*get_queue_num)(struct virtio_hw *hw, uint16_t queue_id); + void (*setup_queue)(struct virtio_hw *hw, struct virtqueue *vq); + void (*del_queue)(struct virtio_hw *hw, struct virtqueue *vq); + void (*notify_queue)(struct virtio_hw *hw, struct virtqueue *vq); +}; + +struct virtio_net_config; + +struct virtio_hw { + struct virtqueue *cvq; + struct rte_pci_ioport io; + uint64_t guest_features; + uint32_t max_tx_queues; + uint32_t max_rx_queues; + uint16_t vtnet_hdr_size; + uint8_t vlan_strip; + uint8_t use_msix; + uint8_t started; + uint8_t modern; + uint8_t mac_addr[ETHER_ADDR_LEN]; + uint32_t notify_off_multiplier; + uint8_t *isr; + uint16_t *notify_base; + struct rte_pci_device *dev; + struct virtio_pci_common_cfg *common_cfg; + struct virtio_net_config *dev_cfg; + const struct virtio_pci_ops *vtpci_ops; +}; + +/* + * This structure is just a reference to read + * net device specific config space; it just a chodu structure + * + */ +struct virtio_net_config { + /* The config defining mac address (if VIRTIO_NET_F_MAC) */ + uint8_t mac[ETHER_ADDR_LEN]; + /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ + uint16_t status; + uint16_t max_virtqueue_pairs; +} __attribute__((packed)); + +/* + * How many bits to shift physical queue address written to QUEUE_PFN. + * 12 is historical, and due to x86 page size. + */ +#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12 + +/* The alignment to use between consumer and producer parts of vring. */ +#define VIRTIO_PCI_VRING_ALIGN 4096 + +static inline int +vtpci_with_feature(struct virtio_hw *hw, uint64_t bit) +{ + return (hw->guest_features & (1ULL << bit)) != 0; +} + +/* + * Function declaration from virtio_pci.c + */ +int vtpci_init(struct rte_pci_device *, struct virtio_hw *); +void vtpci_reset(struct virtio_hw *); + +void vtpci_reinit_complete(struct virtio_hw *); + +uint8_t vtpci_get_status(struct virtio_hw *); +void vtpci_set_status(struct virtio_hw *, uint8_t); + +uint64_t vtpci_negotiate_features(struct virtio_hw *, uint64_t); + +void vtpci_write_dev_config(struct virtio_hw *, size_t, const void *, int); + +void vtpci_read_dev_config(struct virtio_hw *, size_t, void *, int); + +uint8_t vtpci_isr(struct virtio_hw *); + +uint16_t vtpci_irq_config(struct virtio_hw *, uint16_t); + +#endif /* _VIRTIO_PCI_H_ */ diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h new file mode 100644 index 00000000..447760a8 --- /dev/null +++ b/drivers/net/virtio/virtio_ring.h @@ -0,0 +1,163 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VIRTIO_RING_H_ +#define _VIRTIO_RING_H_ + +#include <stdint.h> + +#include <rte_common.h> + +/* This marks a buffer as continuing via the next field. */ +#define VRING_DESC_F_NEXT 1 +/* This marks a buffer as write-only (otherwise read-only). */ +#define VRING_DESC_F_WRITE 2 +/* This means the buffer contains a list of buffer descriptors. */ +#define VRING_DESC_F_INDIRECT 4 + +/* The Host uses this in used->flags to advise the Guest: don't kick me + * when you add a buffer. It's unreliable, so it's simply an + * optimization. Guest will still kick if it's out of buffers. */ +#define VRING_USED_F_NO_NOTIFY 1 +/* The Guest uses this in avail->flags to advise the Host: don't + * interrupt me when you consume a buffer. It's unreliable, so it's + * simply an optimization. */ +#define VRING_AVAIL_F_NO_INTERRUPT 1 + +/* VirtIO ring descriptors: 16 bytes. + * These can chain together via "next". */ +struct vring_desc { + uint64_t addr; /* Address (guest-physical). */ + uint32_t len; /* Length. */ + uint16_t flags; /* The flags as indicated above. */ + uint16_t next; /* We chain unused descriptors via this. */ +}; + +struct vring_avail { + uint16_t flags; + uint16_t idx; + uint16_t ring[0]; +}; + +/* id is a 16bit index. uint32_t is used here for ids for padding reasons. */ +struct vring_used_elem { + /* Index of start of used descriptor chain. */ + uint32_t id; + /* Total length of the descriptor chain which was written to. */ + uint32_t len; +}; + +struct vring_used { + uint16_t flags; + uint16_t idx; + struct vring_used_elem ring[0]; +}; + +struct vring { + unsigned int num; + struct vring_desc *desc; + struct vring_avail *avail; + struct vring_used *used; +}; + +/* The standard layout for the ring is a continuous chunk of memory which + * looks like this. We assume num is a power of 2. + * + * struct vring { + * // The actual descriptors (16 bytes each) + * struct vring_desc desc[num]; + * + * // A ring of available descriptor heads with free-running index. + * __u16 avail_flags; + * __u16 avail_idx; + * __u16 available[num]; + * __u16 used_event_idx; + * + * // Padding to the next align boundary. + * char pad[]; + * + * // A ring of used descriptor heads with free-running index. + * __u16 used_flags; + * __u16 used_idx; + * struct vring_used_elem used[num]; + * __u16 avail_event_idx; + * }; + * + * NOTE: for VirtIO PCI, align is 4096. + */ + +/* + * We publish the used event index at the end of the available ring, and vice + * versa. They are at the end for backwards compatibility. + */ +#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) +#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num]) + +static inline size_t +vring_size(unsigned int num, unsigned long align) +{ + size_t size; + + size = num * sizeof(struct vring_desc); + size += sizeof(struct vring_avail) + (num * sizeof(uint16_t)); + size = RTE_ALIGN_CEIL(size, align); + size += sizeof(struct vring_used) + + (num * sizeof(struct vring_used_elem)); + return size; +} + +static inline void +vring_init(struct vring *vr, unsigned int num, uint8_t *p, + unsigned long align) +{ + vr->num = num; + vr->desc = (struct vring_desc *) p; + vr->avail = (struct vring_avail *) (p + + num * sizeof(struct vring_desc)); + vr->used = (void *) + RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align); +} + +/* + * The following is used with VIRTIO_RING_F_EVENT_IDX. + * Assuming a given event_idx value from the other size, if we have + * just incremented index from old to new_idx, should we trigger an + * event? + */ +static inline int +vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) +{ + return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old); +} + +#endif /* _VIRTIO_RING_H_ */ diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c new file mode 100644 index 00000000..ef21d8e3 --- /dev/null +++ b/drivers/net/virtio/virtio_rxtx.c @@ -0,0 +1,940 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> + +#include <rte_cycles.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_branch_prediction.h> +#include <rte_mempool.h> +#include <rte_malloc.h> +#include <rte_mbuf.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_prefetch.h> +#include <rte_string_fns.h> +#include <rte_errno.h> +#include <rte_byteorder.h> + +#include "virtio_logs.h" +#include "virtio_ethdev.h" +#include "virtio_pci.h" +#include "virtqueue.h" +#include "virtio_rxtx.h" + +#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP +#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len) +#else +#define VIRTIO_DUMP_PACKET(m, len) do { } while (0) +#endif + + +#define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ + ETH_TXQ_FLAGS_NOOFFLOADS) + +#ifdef RTE_MACHINE_CPUFLAG_SSSE3 +static int use_simple_rxtx; +#endif + +static void +vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) +{ + struct vring_desc *dp, *dp_tail; + struct vq_desc_extra *dxp; + uint16_t desc_idx_last = desc_idx; + + dp = &vq->vq_ring.desc[desc_idx]; + dxp = &vq->vq_descx[desc_idx]; + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs); + if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { + while (dp->flags & VRING_DESC_F_NEXT) { + desc_idx_last = dp->next; + dp = &vq->vq_ring.desc[dp->next]; + } + } + dxp->ndescs = 0; + + /* + * We must append the existing free chain, if any, to the end of + * newly freed chain. If the virtqueue was completely used, then + * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above). + */ + if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) { + vq->vq_desc_head_idx = desc_idx; + } else { + dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx]; + dp_tail->next = desc_idx; + } + + vq->vq_desc_tail_idx = desc_idx_last; + dp->next = VQ_RING_DESC_CHAIN_END; +} + +static uint16_t +virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, + uint32_t *len, uint16_t num) +{ + struct vring_used_elem *uep; + struct rte_mbuf *cookie; + uint16_t used_idx, desc_idx; + uint16_t i; + + /* Caller does the check */ + for (i = 0; i < num ; i++) { + used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); + uep = &vq->vq_ring.used->ring[used_idx]; + desc_idx = (uint16_t) uep->id; + len[i] = uep->len; + cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie; + + if (unlikely(cookie == NULL)) { + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n", + vq->vq_used_cons_idx); + break; + } + + rte_prefetch0(cookie); + rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *)); + rx_pkts[i] = cookie; + vq->vq_used_cons_idx++; + vq_ring_free_chain(vq, desc_idx); + vq->vq_descx[desc_idx].cookie = NULL; + } + + return i; +} + +#ifndef DEFAULT_TX_FREE_THRESH +#define DEFAULT_TX_FREE_THRESH 32 +#endif + +/* Cleanup from completed transmits. */ +static void +virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num) +{ + uint16_t i, used_idx, desc_idx; + for (i = 0; i < num; i++) { + struct vring_used_elem *uep; + struct vq_desc_extra *dxp; + + used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); + uep = &vq->vq_ring.used->ring[used_idx]; + + desc_idx = (uint16_t) uep->id; + dxp = &vq->vq_descx[desc_idx]; + vq->vq_used_cons_idx++; + vq_ring_free_chain(vq, desc_idx); + + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + } +} + + +static inline int +virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) +{ + struct vq_desc_extra *dxp; + struct virtio_hw *hw = vq->hw; + struct vring_desc *start_dp; + uint16_t needed = 1; + uint16_t head_idx, idx; + + if (unlikely(vq->vq_free_cnt == 0)) + return -ENOSPC; + if (unlikely(vq->vq_free_cnt < needed)) + return -EMSGSIZE; + + head_idx = vq->vq_desc_head_idx; + if (unlikely(head_idx >= vq->vq_nentries)) + return -EFAULT; + + idx = head_idx; + dxp = &vq->vq_descx[idx]; + dxp->cookie = (void *)cookie; + dxp->ndescs = needed; + + start_dp = vq->vq_ring.desc; + start_dp[idx].addr = + (uint64_t)(cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM + - hw->vtnet_hdr_size); + start_dp[idx].len = + cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size; + start_dp[idx].flags = VRING_DESC_F_WRITE; + idx = start_dp[idx].next; + vq->vq_desc_head_idx = idx; + if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) + vq->vq_desc_tail_idx = idx; + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); + vq_update_avail_ring(vq, head_idx); + + return 0; +} + +static inline void +virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie, + uint16_t needed, int use_indirect, int can_push) +{ + struct vq_desc_extra *dxp; + struct vring_desc *start_dp; + uint16_t seg_num = cookie->nb_segs; + uint16_t head_idx, idx; + uint16_t head_size = txvq->hw->vtnet_hdr_size; + unsigned long offs; + + head_idx = txvq->vq_desc_head_idx; + idx = head_idx; + dxp = &txvq->vq_descx[idx]; + dxp->cookie = (void *)cookie; + dxp->ndescs = needed; + + start_dp = txvq->vq_ring.desc; + + if (can_push) { + /* put on zero'd transmit header (no offloads) */ + void *hdr = rte_pktmbuf_prepend(cookie, head_size); + + memset(hdr, 0, head_size); + } else if (use_indirect) { + /* setup tx ring slot to point to indirect + * descriptor list stored in reserved region. + * + * the first slot in indirect ring is already preset + * to point to the header in reserved region + */ + struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; + + offs = idx * sizeof(struct virtio_tx_region) + + offsetof(struct virtio_tx_region, tx_indir); + + start_dp[idx].addr = txvq->virtio_net_hdr_mem + offs; + start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc); + start_dp[idx].flags = VRING_DESC_F_INDIRECT; + + /* loop below will fill in rest of the indirect elements */ + start_dp = txr[idx].tx_indir; + idx = 1; + } else { + /* setup first tx ring slot to point to header + * stored in reserved region. + */ + offs = idx * sizeof(struct virtio_tx_region) + + offsetof(struct virtio_tx_region, tx_hdr); + + start_dp[idx].addr = txvq->virtio_net_hdr_mem + offs; + start_dp[idx].len = txvq->hw->vtnet_hdr_size; + start_dp[idx].flags = VRING_DESC_F_NEXT; + idx = start_dp[idx].next; + } + + do { + start_dp[idx].addr = rte_mbuf_data_dma_addr(cookie); + start_dp[idx].len = cookie->data_len; + start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0; + idx = start_dp[idx].next; + } while ((cookie = cookie->next) != NULL); + + start_dp[idx].flags &= ~VRING_DESC_F_NEXT; + + if (use_indirect) + idx = txvq->vq_ring.desc[head_idx].next; + + txvq->vq_desc_head_idx = idx; + if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) + txvq->vq_desc_tail_idx = idx; + txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed); + vq_update_avail_ring(txvq, head_idx); +} + +static inline struct rte_mbuf * +rte_rxmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + m = __rte_mbuf_raw_alloc(mp); + __rte_mbuf_sanity_check_raw(m, 0); + + return m; +} + +static void +virtio_dev_vring_start(struct virtqueue *vq, int queue_type) +{ + struct rte_mbuf *m; + int i, nbufs, error, size = vq->vq_nentries; + struct vring *vr = &vq->vq_ring; + uint8_t *ring_mem = vq->vq_ring_virt_mem; + + PMD_INIT_FUNC_TRACE(); + + /* + * Reinitialise since virtio port might have been stopped and restarted + */ + memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size); + vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN); + vq->vq_used_cons_idx = 0; + vq->vq_desc_head_idx = 0; + vq->vq_avail_idx = 0; + vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); + vq->vq_free_cnt = vq->vq_nentries; + memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); + + vring_desc_init(vr->desc, size); + + /* + * Disable device(host) interrupting guest + */ + virtqueue_disable_intr(vq); + + /* Only rx virtqueue needs mbufs to be allocated at initialization */ + if (queue_type == VTNET_RQ) { + if (vq->mpool == NULL) + rte_exit(EXIT_FAILURE, + "Cannot allocate initial mbufs for rx virtqueue"); + + /* Allocate blank mbufs for the each rx descriptor */ + nbufs = 0; + error = ENOSPC; + +#ifdef RTE_MACHINE_CPUFLAG_SSSE3 + if (use_simple_rxtx) + for (i = 0; i < vq->vq_nentries; i++) { + vq->vq_ring.avail->ring[i] = i; + vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE; + } +#endif + memset(&vq->fake_mbuf, 0, sizeof(vq->fake_mbuf)); + for (i = 0; i < RTE_PMD_VIRTIO_RX_MAX_BURST; i++) + vq->sw_ring[vq->vq_nentries + i] = &vq->fake_mbuf; + + while (!virtqueue_full(vq)) { + m = rte_rxmbuf_alloc(vq->mpool); + if (m == NULL) + break; + + /****************************************** + * Enqueue allocated buffers * + *******************************************/ +#ifdef RTE_MACHINE_CPUFLAG_SSSE3 + if (use_simple_rxtx) + error = virtqueue_enqueue_recv_refill_simple(vq, m); + else +#endif + error = virtqueue_enqueue_recv_refill(vq, m); + if (error) { + rte_pktmbuf_free(m); + break; + } + nbufs++; + } + + vq_update_avail_idx(vq); + + PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); + } else if (queue_type == VTNET_TQ) { +#ifdef RTE_MACHINE_CPUFLAG_SSSE3 + if (use_simple_rxtx) { + int mid_idx = vq->vq_nentries >> 1; + for (i = 0; i < mid_idx; i++) { + vq->vq_ring.avail->ring[i] = i + mid_idx; + vq->vq_ring.desc[i + mid_idx].next = i; + vq->vq_ring.desc[i + mid_idx].addr = + vq->virtio_net_hdr_mem + + offsetof(struct virtio_tx_region, tx_hdr); + vq->vq_ring.desc[i + mid_idx].len = + vq->hw->vtnet_hdr_size; + vq->vq_ring.desc[i + mid_idx].flags = + VRING_DESC_F_NEXT; + vq->vq_ring.desc[i].flags = 0; + } + for (i = mid_idx; i < vq->vq_nentries; i++) + vq->vq_ring.avail->ring[i] = i; + } +#endif + } +} + +void +virtio_dev_cq_start(struct rte_eth_dev *dev) +{ + struct virtio_hw *hw = dev->data->dev_private; + + if (hw->cvq) { + virtio_dev_vring_start(hw->cvq, VTNET_CQ); + VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq); + } +} + +void +virtio_dev_rxtx_start(struct rte_eth_dev *dev) +{ + /* + * Start receive and transmit vrings + * - Setup vring structure for all queues + * - Initialize descriptor for the rx vring + * - Allocate blank mbufs for the each rx descriptor + * + */ + int i; + + PMD_INIT_FUNC_TRACE(); + + /* Start rx vring. */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + virtio_dev_vring_start(dev->data->rx_queues[i], VTNET_RQ); + VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]); + } + + /* Start tx vring. */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + virtio_dev_vring_start(dev->data->tx_queues[i], VTNET_TQ); + VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]); + } +} + +int +virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; + struct virtqueue *vq; + int ret; + + PMD_INIT_FUNC_TRACE(); + ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx, + nb_desc, socket_id, &vq); + if (ret < 0) { + PMD_INIT_LOG(ERR, "rvq initialization failed"); + return ret; + } + + /* Create mempool for rx mbuf allocation */ + vq->mpool = mp; + + dev->data->rx_queues[queue_idx] = vq; + +#ifdef RTE_MACHINE_CPUFLAG_SSSE3 + virtio_rxq_vec_setup(vq); +#endif + + return 0; +} + +void +virtio_dev_rx_queue_release(void *rxq) +{ + virtio_dev_queue_release(rxq); +} + +/* + * struct rte_eth_dev *dev: Used to update dev + * uint16_t nb_desc: Defaults to values read from config space + * unsigned int socket_id: Used to allocate memzone + * const struct rte_eth_txconf *tx_conf: Used to setup tx engine + * uint16_t queue_idx: Just used as an index in dev txq list + */ +int +virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX; + +#ifdef RTE_MACHINE_CPUFLAG_SSSE3 + struct virtio_hw *hw = dev->data->dev_private; +#endif + struct virtqueue *vq; + uint16_t tx_free_thresh; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMS) + != ETH_TXQ_FLAGS_NOXSUMS) { + PMD_INIT_LOG(ERR, "TX checksum offload not supported\n"); + return -EINVAL; + } + +#ifdef RTE_MACHINE_CPUFLAG_SSSE3 + /* Use simple rx/tx func if single segment and no offloads */ + if ((tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) == VIRTIO_SIMPLE_FLAGS && + !vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { + PMD_INIT_LOG(INFO, "Using simple rx/tx path"); + dev->tx_pkt_burst = virtio_xmit_pkts_simple; + dev->rx_pkt_burst = virtio_recv_pkts_vec; + use_simple_rxtx = 1; + } +#endif + + ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx, + nb_desc, socket_id, &vq); + if (ret < 0) { + PMD_INIT_LOG(ERR, "rvq initialization failed"); + return ret; + } + + tx_free_thresh = tx_conf->tx_free_thresh; + if (tx_free_thresh == 0) + tx_free_thresh = + RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH); + + if (tx_free_thresh >= (vq->vq_nentries - 3)) { + RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the " + "number of TX entries minus 3 (%u)." + " (tx_free_thresh=%u port=%u queue=%u)\n", + vq->vq_nentries - 3, + tx_free_thresh, dev->data->port_id, queue_idx); + return -EINVAL; + } + + vq->vq_free_thresh = tx_free_thresh; + + dev->data->tx_queues[queue_idx] = vq; + return 0; +} + +void +virtio_dev_tx_queue_release(void *txq) +{ + virtio_dev_queue_release(txq); +} + +static void +virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m) +{ + int error; + /* + * Requeue the discarded mbuf. This should always be + * successful since it was just dequeued. + */ + error = virtqueue_enqueue_recv_refill(vq, m); + if (unlikely(error)) { + RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf"); + rte_pktmbuf_free(m); + } +} + +static void +virtio_update_packet_stats(struct virtqueue *vq, struct rte_mbuf *mbuf) +{ + uint32_t s = mbuf->pkt_len; + struct ether_addr *ea; + + if (s == 64) { + vq->size_bins[1]++; + } else if (s > 64 && s < 1024) { + uint32_t bin; + + /* count zeros, and offset into correct bin */ + bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; + vq->size_bins[bin]++; + } else { + if (s < 64) + vq->size_bins[0]++; + else if (s < 1519) + vq->size_bins[6]++; + else if (s >= 1519) + vq->size_bins[7]++; + } + + ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *); + if (is_multicast_ether_addr(ea)) { + if (is_broadcast_ether_addr(ea)) + vq->broadcast++; + else + vq->multicast++; + } +} + +#define VIRTIO_MBUF_BURST_SZ 64 +#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc)) +uint16_t +virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct virtqueue *rxvq = rx_queue; + struct virtio_hw *hw; + struct rte_mbuf *rxm, *new_mbuf; + uint16_t nb_used, num, nb_rx; + uint32_t len[VIRTIO_MBUF_BURST_SZ]; + struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; + int error; + uint32_t i, nb_enqueued; + uint32_t hdr_size; + + nb_used = VIRTQUEUE_NUSED(rxvq); + + virtio_rmb(); + + num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts); + num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ); + if (likely(num > DESC_PER_CACHELINE)) + num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); + + num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num); + PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num); + + hw = rxvq->hw; + nb_rx = 0; + nb_enqueued = 0; + hdr_size = hw->vtnet_hdr_size; + + for (i = 0; i < num ; i++) { + rxm = rcv_pkts[i]; + + PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); + + if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { + PMD_RX_LOG(ERR, "Packet drop"); + nb_enqueued++; + virtio_discard_rxbuf(rxvq, rxm); + rxvq->errors++; + continue; + } + + rxm->port = rxvq->port_id; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->ol_flags = 0; + rxm->vlan_tci = 0; + + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = (uint32_t)(len[i] - hdr_size); + rxm->data_len = (uint16_t)(len[i] - hdr_size); + + if (hw->vlan_strip) + rte_vlan_strip(rxm); + + VIRTIO_DUMP_PACKET(rxm, rxm->data_len); + + rx_pkts[nb_rx++] = rxm; + + rxvq->bytes += rx_pkts[nb_rx - 1]->pkt_len; + virtio_update_packet_stats(rxvq, rxm); + } + + rxvq->packets += nb_rx; + + /* Allocate new mbuf for the used descriptor */ + error = ENOSPC; + while (likely(!virtqueue_full(rxvq))) { + new_mbuf = rte_rxmbuf_alloc(rxvq->mpool); + if (unlikely(new_mbuf == NULL)) { + struct rte_eth_dev *dev + = &rte_eth_devices[rxvq->port_id]; + dev->data->rx_mbuf_alloc_failed++; + break; + } + error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf); + if (unlikely(error)) { + rte_pktmbuf_free(new_mbuf); + break; + } + nb_enqueued++; + } + + if (likely(nb_enqueued)) { + vq_update_avail_idx(rxvq); + + if (unlikely(virtqueue_kick_prepare(rxvq))) { + virtqueue_notify(rxvq); + PMD_RX_LOG(DEBUG, "Notified\n"); + } + } + + return nb_rx; +} + +uint16_t +virtio_recv_mergeable_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct virtqueue *rxvq = rx_queue; + struct virtio_hw *hw; + struct rte_mbuf *rxm, *new_mbuf; + uint16_t nb_used, num, nb_rx; + uint32_t len[VIRTIO_MBUF_BURST_SZ]; + struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; + struct rte_mbuf *prev; + int error; + uint32_t i, nb_enqueued; + uint32_t seg_num; + uint16_t extra_idx; + uint32_t seg_res; + uint32_t hdr_size; + + nb_used = VIRTQUEUE_NUSED(rxvq); + + virtio_rmb(); + + PMD_RX_LOG(DEBUG, "used:%d\n", nb_used); + + hw = rxvq->hw; + nb_rx = 0; + i = 0; + nb_enqueued = 0; + seg_num = 0; + extra_idx = 0; + seg_res = 0; + hdr_size = hw->vtnet_hdr_size; + + while (i < nb_used) { + struct virtio_net_hdr_mrg_rxbuf *header; + + if (nb_rx == nb_pkts) + break; + + num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, 1); + if (num != 1) + continue; + + i++; + + PMD_RX_LOG(DEBUG, "dequeue:%d\n", num); + PMD_RX_LOG(DEBUG, "packet len:%d\n", len[0]); + + rxm = rcv_pkts[0]; + + if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) { + PMD_RX_LOG(ERR, "Packet drop\n"); + nb_enqueued++; + virtio_discard_rxbuf(rxvq, rxm); + rxvq->errors++; + continue; + } + + header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)rxm->buf_addr + + RTE_PKTMBUF_HEADROOM - hdr_size); + seg_num = header->num_buffers; + + if (seg_num == 0) + seg_num = 1; + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->nb_segs = seg_num; + rxm->next = NULL; + rxm->ol_flags = 0; + rxm->vlan_tci = 0; + rxm->pkt_len = (uint32_t)(len[0] - hdr_size); + rxm->data_len = (uint16_t)(len[0] - hdr_size); + + rxm->port = rxvq->port_id; + rx_pkts[nb_rx] = rxm; + prev = rxm; + + seg_res = seg_num - 1; + + while (seg_res != 0) { + /* + * Get extra segments for current uncompleted packet. + */ + uint16_t rcv_cnt = + RTE_MIN(seg_res, RTE_DIM(rcv_pkts)); + if (likely(VIRTQUEUE_NUSED(rxvq) >= rcv_cnt)) { + uint32_t rx_num = + virtqueue_dequeue_burst_rx(rxvq, + rcv_pkts, len, rcv_cnt); + i += rx_num; + rcv_cnt = rx_num; + } else { + PMD_RX_LOG(ERR, + "No enough segments for packet.\n"); + nb_enqueued++; + virtio_discard_rxbuf(rxvq, rxm); + rxvq->errors++; + break; + } + + extra_idx = 0; + + while (extra_idx < rcv_cnt) { + rxm = rcv_pkts[extra_idx]; + + rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; + rxm->next = NULL; + rxm->pkt_len = (uint32_t)(len[extra_idx]); + rxm->data_len = (uint16_t)(len[extra_idx]); + + if (prev) + prev->next = rxm; + + prev = rxm; + rx_pkts[nb_rx]->pkt_len += rxm->pkt_len; + extra_idx++; + }; + seg_res -= rcv_cnt; + } + + if (hw->vlan_strip) + rte_vlan_strip(rx_pkts[nb_rx]); + + VIRTIO_DUMP_PACKET(rx_pkts[nb_rx], + rx_pkts[nb_rx]->data_len); + + rxvq->bytes += rx_pkts[nb_rx]->pkt_len; + virtio_update_packet_stats(rxvq, rx_pkts[nb_rx]); + nb_rx++; + } + + rxvq->packets += nb_rx; + + /* Allocate new mbuf for the used descriptor */ + error = ENOSPC; + while (likely(!virtqueue_full(rxvq))) { + new_mbuf = rte_rxmbuf_alloc(rxvq->mpool); + if (unlikely(new_mbuf == NULL)) { + struct rte_eth_dev *dev + = &rte_eth_devices[rxvq->port_id]; + dev->data->rx_mbuf_alloc_failed++; + break; + } + error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf); + if (unlikely(error)) { + rte_pktmbuf_free(new_mbuf); + break; + } + nb_enqueued++; + } + + if (likely(nb_enqueued)) { + vq_update_avail_idx(rxvq); + + if (unlikely(virtqueue_kick_prepare(rxvq))) { + virtqueue_notify(rxvq); + PMD_RX_LOG(DEBUG, "Notified"); + } + } + + return nb_rx; +} + +uint16_t +virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct virtqueue *txvq = tx_queue; + struct virtio_hw *hw = txvq->hw; + uint16_t hdr_size = hw->vtnet_hdr_size; + uint16_t nb_used, nb_tx; + int error; + + if (unlikely(nb_pkts < 1)) + return nb_pkts; + + PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); + nb_used = VIRTQUEUE_NUSED(txvq); + + virtio_rmb(); + if (likely(nb_used > txvq->vq_nentries - txvq->vq_free_thresh)) + virtio_xmit_cleanup(txvq, nb_used); + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + struct rte_mbuf *txm = tx_pkts[nb_tx]; + int can_push = 0, use_indirect = 0, slots, need; + + /* Do VLAN tag insertion */ + if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { + error = rte_vlan_insert(&txm); + if (unlikely(error)) { + rte_pktmbuf_free(txm); + continue; + } + } + + /* optimize ring usage */ + if (vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) && + rte_mbuf_refcnt_read(txm) == 1 && + txm->nb_segs == 1 && + rte_pktmbuf_headroom(txm) >= hdr_size && + rte_is_aligned(rte_pktmbuf_mtod(txm, char *), + __alignof__(struct virtio_net_hdr_mrg_rxbuf))) + can_push = 1; + else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) && + txm->nb_segs < VIRTIO_MAX_TX_INDIRECT) + use_indirect = 1; + + /* How many main ring entries are needed to this Tx? + * any_layout => number of segments + * indirect => 1 + * default => number of segments + 1 + */ + slots = use_indirect ? 1 : (txm->nb_segs + !can_push); + need = slots - txvq->vq_free_cnt; + + /* Positive value indicates it need free vring descriptors */ + if (unlikely(need > 0)) { + nb_used = VIRTQUEUE_NUSED(txvq); + virtio_rmb(); + need = RTE_MIN(need, (int)nb_used); + + virtio_xmit_cleanup(txvq, need); + need = slots - txvq->vq_free_cnt; + if (unlikely(need > 0)) { + PMD_TX_LOG(ERR, + "No free tx descriptors to transmit"); + break; + } + } + + /* Enqueue Packet buffers */ + virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, can_push); + + txvq->bytes += txm->pkt_len; + virtio_update_packet_stats(txvq, txm); + } + + txvq->packets += nb_tx; + + if (likely(nb_tx)) { + vq_update_avail_idx(txvq); + + if (unlikely(virtqueue_kick_prepare(txvq))) { + virtqueue_notify(txvq); + PMD_TX_LOG(DEBUG, "Notified backend after xmit"); + } + } + + return nb_tx; +} diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h new file mode 100644 index 00000000..a76c3e52 --- /dev/null +++ b/drivers/net/virtio/virtio_rxtx.h @@ -0,0 +1,41 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define RTE_PMD_VIRTIO_RX_MAX_BURST 64 + +#ifdef RTE_MACHINE_CPUFLAG_SSSE3 +int virtio_rxq_vec_setup(struct virtqueue *rxq); + +int virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq, + struct rte_mbuf *m); +#endif diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c new file mode 100644 index 00000000..8f5293dd --- /dev/null +++ b/drivers/net/virtio/virtio_rxtx_simple.c @@ -0,0 +1,418 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> + +#include <tmmintrin.h> + +#include <rte_cycles.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_branch_prediction.h> +#include <rte_mempool.h> +#include <rte_malloc.h> +#include <rte_mbuf.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_prefetch.h> +#include <rte_string_fns.h> +#include <rte_errno.h> +#include <rte_byteorder.h> + +#include "virtio_logs.h" +#include "virtio_ethdev.h" +#include "virtqueue.h" +#include "virtio_rxtx.h" + +#define RTE_VIRTIO_VPMD_RX_BURST 32 +#define RTE_VIRTIO_DESC_PER_LOOP 8 +#define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +int __attribute__((cold)) +virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq, + struct rte_mbuf *cookie) +{ + struct vq_desc_extra *dxp; + struct vring_desc *start_dp; + uint16_t desc_idx; + + desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1); + dxp = &vq->vq_descx[desc_idx]; + dxp->cookie = (void *)cookie; + vq->sw_ring[desc_idx] = cookie; + + start_dp = vq->vq_ring.desc; + start_dp[desc_idx].addr = (uint64_t)((uintptr_t)cookie->buf_physaddr + + RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size); + start_dp[desc_idx].len = cookie->buf_len - + RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size; + + vq->vq_free_cnt--; + vq->vq_avail_idx++; + + return 0; +} + +static inline void +virtio_rxq_rearm_vec(struct virtqueue *rxvq) +{ + int i; + uint16_t desc_idx; + struct rte_mbuf **sw_ring; + struct vring_desc *start_dp; + int ret; + + desc_idx = rxvq->vq_avail_idx & (rxvq->vq_nentries - 1); + sw_ring = &rxvq->sw_ring[desc_idx]; + start_dp = &rxvq->vq_ring.desc[desc_idx]; + + ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring, + RTE_VIRTIO_VPMD_RX_REARM_THRESH); + if (unlikely(ret)) { + rte_eth_devices[rxvq->port_id].data->rx_mbuf_alloc_failed += + RTE_VIRTIO_VPMD_RX_REARM_THRESH; + return; + } + + for (i = 0; i < RTE_VIRTIO_VPMD_RX_REARM_THRESH; i++) { + uintptr_t p; + + p = (uintptr_t)&sw_ring[i]->rearm_data; + *(uint64_t *)p = rxvq->mbuf_initializer; + + start_dp[i].addr = + (uint64_t)((uintptr_t)sw_ring[i]->buf_physaddr + + RTE_PKTMBUF_HEADROOM - rxvq->hw->vtnet_hdr_size); + start_dp[i].len = sw_ring[i]->buf_len - + RTE_PKTMBUF_HEADROOM + rxvq->hw->vtnet_hdr_size; + } + + rxvq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH; + rxvq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH; + vq_update_avail_idx(rxvq); +} + +/* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP) + * + * This routine is for non-mergeable RX, one desc for each guest buffer. + * This routine is based on the RX ring layout optimization. Each entry in the + * avail ring points to the desc with the same index in the desc ring and this + * will never be changed in the driver. + * + * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet + */ +uint16_t +virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct virtqueue *rxvq = rx_queue; + uint16_t nb_used; + uint16_t desc_idx; + struct vring_used_elem *rused; + struct rte_mbuf **sw_ring; + struct rte_mbuf **sw_ring_end; + uint16_t nb_pkts_received; + __m128i shuf_msk1, shuf_msk2, len_adjust; + + shuf_msk1 = _mm_set_epi8( + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, /* vlan tci */ + 5, 4, /* dat len */ + 0xFF, 0xFF, 5, 4, /* pkt len */ + 0xFF, 0xFF, 0xFF, 0xFF /* packet type */ + + ); + + shuf_msk2 = _mm_set_epi8( + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, /* vlan tci */ + 13, 12, /* dat len */ + 0xFF, 0xFF, 13, 12, /* pkt len */ + 0xFF, 0xFF, 0xFF, 0xFF /* packet type */ + ); + + /* Subtract the header length. + * In which case do we need the header length in used->len ? + */ + len_adjust = _mm_set_epi16( + 0, 0, + 0, + (uint16_t)-rxvq->hw->vtnet_hdr_size, + 0, (uint16_t)-rxvq->hw->vtnet_hdr_size, + 0, 0); + + if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP)) + return 0; + + nb_used = *(volatile uint16_t *)&rxvq->vq_ring.used->idx - + rxvq->vq_used_cons_idx; + + rte_compiler_barrier(); + + if (unlikely(nb_used == 0)) + return 0; + + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP); + nb_used = RTE_MIN(nb_used, nb_pkts); + + desc_idx = (uint16_t)(rxvq->vq_used_cons_idx & (rxvq->vq_nentries - 1)); + rused = &rxvq->vq_ring.used->ring[desc_idx]; + sw_ring = &rxvq->sw_ring[desc_idx]; + sw_ring_end = &rxvq->sw_ring[rxvq->vq_nentries]; + + _mm_prefetch((const void *)rused, _MM_HINT_T0); + + if (rxvq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { + virtio_rxq_rearm_vec(rxvq); + if (unlikely(virtqueue_kick_prepare(rxvq))) + virtqueue_notify(rxvq); + } + + for (nb_pkts_received = 0; + nb_pkts_received < nb_used;) { + __m128i desc[RTE_VIRTIO_DESC_PER_LOOP / 2]; + __m128i mbp[RTE_VIRTIO_DESC_PER_LOOP / 2]; + __m128i pkt_mb[RTE_VIRTIO_DESC_PER_LOOP]; + + mbp[0] = _mm_loadu_si128((__m128i *)(sw_ring + 0)); + desc[0] = _mm_loadu_si128((__m128i *)(rused + 0)); + _mm_storeu_si128((__m128i *)&rx_pkts[0], mbp[0]); + + mbp[1] = _mm_loadu_si128((__m128i *)(sw_ring + 2)); + desc[1] = _mm_loadu_si128((__m128i *)(rused + 2)); + _mm_storeu_si128((__m128i *)&rx_pkts[2], mbp[1]); + + mbp[2] = _mm_loadu_si128((__m128i *)(sw_ring + 4)); + desc[2] = _mm_loadu_si128((__m128i *)(rused + 4)); + _mm_storeu_si128((__m128i *)&rx_pkts[4], mbp[2]); + + mbp[3] = _mm_loadu_si128((__m128i *)(sw_ring + 6)); + desc[3] = _mm_loadu_si128((__m128i *)(rused + 6)); + _mm_storeu_si128((__m128i *)&rx_pkts[6], mbp[3]); + + pkt_mb[1] = _mm_shuffle_epi8(desc[0], shuf_msk2); + pkt_mb[0] = _mm_shuffle_epi8(desc[0], shuf_msk1); + pkt_mb[1] = _mm_add_epi16(pkt_mb[1], len_adjust); + pkt_mb[0] = _mm_add_epi16(pkt_mb[0], len_adjust); + _mm_storeu_si128((void *)&rx_pkts[1]->rx_descriptor_fields1, + pkt_mb[1]); + _mm_storeu_si128((void *)&rx_pkts[0]->rx_descriptor_fields1, + pkt_mb[0]); + + pkt_mb[3] = _mm_shuffle_epi8(desc[1], shuf_msk2); + pkt_mb[2] = _mm_shuffle_epi8(desc[1], shuf_msk1); + pkt_mb[3] = _mm_add_epi16(pkt_mb[3], len_adjust); + pkt_mb[2] = _mm_add_epi16(pkt_mb[2], len_adjust); + _mm_storeu_si128((void *)&rx_pkts[3]->rx_descriptor_fields1, + pkt_mb[3]); + _mm_storeu_si128((void *)&rx_pkts[2]->rx_descriptor_fields1, + pkt_mb[2]); + + pkt_mb[5] = _mm_shuffle_epi8(desc[2], shuf_msk2); + pkt_mb[4] = _mm_shuffle_epi8(desc[2], shuf_msk1); + pkt_mb[5] = _mm_add_epi16(pkt_mb[5], len_adjust); + pkt_mb[4] = _mm_add_epi16(pkt_mb[4], len_adjust); + _mm_storeu_si128((void *)&rx_pkts[5]->rx_descriptor_fields1, + pkt_mb[5]); + _mm_storeu_si128((void *)&rx_pkts[4]->rx_descriptor_fields1, + pkt_mb[4]); + + pkt_mb[7] = _mm_shuffle_epi8(desc[3], shuf_msk2); + pkt_mb[6] = _mm_shuffle_epi8(desc[3], shuf_msk1); + pkt_mb[7] = _mm_add_epi16(pkt_mb[7], len_adjust); + pkt_mb[6] = _mm_add_epi16(pkt_mb[6], len_adjust); + _mm_storeu_si128((void *)&rx_pkts[7]->rx_descriptor_fields1, + pkt_mb[7]); + _mm_storeu_si128((void *)&rx_pkts[6]->rx_descriptor_fields1, + pkt_mb[6]); + + if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) { + if (sw_ring + nb_used <= sw_ring_end) + nb_pkts_received += nb_used; + else + nb_pkts_received += sw_ring_end - sw_ring; + break; + } else { + if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >= + sw_ring_end)) { + nb_pkts_received += sw_ring_end - sw_ring; + break; + } else { + nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP; + + rx_pkts += RTE_VIRTIO_DESC_PER_LOOP; + sw_ring += RTE_VIRTIO_DESC_PER_LOOP; + rused += RTE_VIRTIO_DESC_PER_LOOP; + nb_used -= RTE_VIRTIO_DESC_PER_LOOP; + } + } + } + + rxvq->vq_used_cons_idx += nb_pkts_received; + rxvq->vq_free_cnt += nb_pkts_received; + rxvq->packets += nb_pkts_received; + return nb_pkts_received; +} + +#define VIRTIO_TX_FREE_THRESH 32 +#define VIRTIO_TX_MAX_FREE_BUF_SZ 32 +#define VIRTIO_TX_FREE_NR 32 +/* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */ +static inline void +virtio_xmit_cleanup(struct virtqueue *vq) +{ + uint16_t i, desc_idx; + int nb_free = 0; + struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ]; + + desc_idx = (uint16_t)(vq->vq_used_cons_idx & + ((vq->vq_nentries >> 1) - 1)); + m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie; + m = __rte_pktmbuf_prefree_seg(m); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < VIRTIO_TX_FREE_NR; i++) { + m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie; + m = __rte_pktmbuf_prefree_seg(m); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) + free[nb_free++] = m; + else { + rte_mempool_put_bulk(free[0]->pool, + (void **)free, nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < VIRTIO_TX_FREE_NR; i++) { + m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie; + m = __rte_pktmbuf_prefree_seg(m); + if (m != NULL) + rte_mempool_put(m->pool, m); + } + } + + vq->vq_used_cons_idx += VIRTIO_TX_FREE_NR; + vq->vq_free_cnt += (VIRTIO_TX_FREE_NR << 1); +} + +uint16_t +virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct virtqueue *txvq = tx_queue; + uint16_t nb_used; + uint16_t desc_idx; + struct vring_desc *start_dp; + uint16_t nb_tail, nb_commit; + int i; + uint16_t desc_idx_max = (txvq->vq_nentries >> 1) - 1; + + nb_used = VIRTQUEUE_NUSED(txvq); + rte_compiler_barrier(); + + if (nb_used >= VIRTIO_TX_FREE_THRESH) + virtio_xmit_cleanup(tx_queue); + + nb_commit = nb_pkts = RTE_MIN((txvq->vq_free_cnt >> 1), nb_pkts); + desc_idx = (uint16_t) (txvq->vq_avail_idx & desc_idx_max); + start_dp = txvq->vq_ring.desc; + nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx); + + if (nb_commit >= nb_tail) { + for (i = 0; i < nb_tail; i++) + txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; + for (i = 0; i < nb_tail; i++) { + start_dp[desc_idx].addr = + rte_mbuf_data_dma_addr(*tx_pkts); + start_dp[desc_idx].len = (*tx_pkts)->pkt_len; + tx_pkts++; + desc_idx++; + } + nb_commit -= nb_tail; + desc_idx = 0; + } + for (i = 0; i < nb_commit; i++) + txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; + for (i = 0; i < nb_commit; i++) { + start_dp[desc_idx].addr = rte_mbuf_data_dma_addr(*tx_pkts); + start_dp[desc_idx].len = (*tx_pkts)->pkt_len; + tx_pkts++; + desc_idx++; + } + + rte_compiler_barrier(); + + txvq->vq_free_cnt -= (uint16_t)(nb_pkts << 1); + txvq->vq_avail_idx += nb_pkts; + txvq->vq_ring.avail->idx = txvq->vq_avail_idx; + txvq->packets += nb_pkts; + + if (likely(nb_pkts)) { + if (unlikely(virtqueue_kick_prepare(txvq))) + virtqueue_notify(txvq); + } + + return nb_pkts; +} + +int __attribute__((cold)) +virtio_rxq_vec_setup(struct virtqueue *rxq) +{ + uintptr_t p; + struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ + + mb_def.nb_segs = 1; + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = rxq->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; + + return 0; +} diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c new file mode 100644 index 00000000..7f60e3ef --- /dev/null +++ b/drivers/net/virtio/virtqueue.c @@ -0,0 +1,72 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <stdint.h> + +#include <rte_mbuf.h> + +#include "virtqueue.h" +#include "virtio_logs.h" +#include "virtio_pci.h" + +void +virtqueue_disable_intr(struct virtqueue *vq) +{ + /* + * Set VRING_AVAIL_F_NO_INTERRUPT to hint host + * not to interrupt when it consumes packets + * Note: this is only considered a hint to the host + */ + vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; +} + +/* + * Two types of mbuf to be cleaned: + * 1) mbuf that has been consumed by backend but not used by virtio. + * 2) mbuf that hasn't been consued by backend. + */ +struct rte_mbuf * +virtqueue_detatch_unused(struct virtqueue *vq) +{ + struct rte_mbuf *cookie; + int idx; + + if (vq != NULL) + for (idx = 0; idx < vq->vq_nentries; idx++) { + cookie = vq->vq_descx[idx].cookie; + if (cookie != NULL) { + vq->vq_descx[idx].cookie = NULL; + return cookie; + } + } + return NULL; +} diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h new file mode 100644 index 00000000..4e9239e0 --- /dev/null +++ b/drivers/net/virtio/virtqueue.h @@ -0,0 +1,344 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VIRTQUEUE_H_ +#define _VIRTQUEUE_H_ + +#include <stdint.h> + +#include <rte_atomic.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_mempool.h> + +#include "virtio_pci.h" +#include "virtio_ring.h" +#include "virtio_logs.h" + +struct rte_mbuf; + +/* + * Per virtio_config.h in Linux. + * For virtio_pci on SMP, we don't need to order with respect to MMIO + * accesses through relaxed memory I/O windows, so smp_mb() et al are + * sufficient. + * + */ +#define virtio_mb() rte_smp_mb() +#define virtio_rmb() rte_smp_rmb() +#define virtio_wmb() rte_smp_wmb() + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while(0) +#endif + +#define VIRTQUEUE_MAX_NAME_SZ 32 + +#define VTNET_SQ_RQ_QUEUE_IDX 0 +#define VTNET_SQ_TQ_QUEUE_IDX 1 +#define VTNET_SQ_CQ_QUEUE_IDX 2 + +enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 }; +/** + * The maximum virtqueue size is 2^15. Use that value as the end of + * descriptor chain terminator since it will never be a valid index + * in the descriptor table. This is used to verify we are correctly + * handling vq_free_cnt. + */ +#define VQ_RING_DESC_CHAIN_END 32768 + +/** + * Control the RX mode, ie. promiscuous, allmulti, etc... + * All commands require an "out" sg entry containing a 1 byte + * state value, zero = disable, non-zero = enable. Commands + * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature. + * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA. + */ +#define VIRTIO_NET_CTRL_RX 0 +#define VIRTIO_NET_CTRL_RX_PROMISC 0 +#define VIRTIO_NET_CTRL_RX_ALLMULTI 1 +#define VIRTIO_NET_CTRL_RX_ALLUNI 2 +#define VIRTIO_NET_CTRL_RX_NOMULTI 3 +#define VIRTIO_NET_CTRL_RX_NOUNI 4 +#define VIRTIO_NET_CTRL_RX_NOBCAST 5 + +/** + * Control the MAC + * + * The MAC filter table is managed by the hypervisor, the guest should + * assume the size is infinite. Filtering should be considered + * non-perfect, ie. based on hypervisor resources, the guest may + * received packets from sources not specified in the filter list. + * + * In addition to the class/cmd header, the TABLE_SET command requires + * two out scatterlists. Each contains a 4 byte count of entries followed + * by a concatenated byte stream of the ETH_ALEN MAC addresses. The + * first sg list contains unicast addresses, the second is for multicast. + * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature + * is available. + * + * The ADDR_SET command requests one out scatterlist, it contains a + * 6 bytes MAC address. This functionality is present if the + * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available. + */ +struct virtio_net_ctrl_mac { + uint32_t entries; + uint8_t macs[][ETHER_ADDR_LEN]; +} __attribute__((__packed__)); + +#define VIRTIO_NET_CTRL_MAC 1 + #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 + #define VIRTIO_NET_CTRL_MAC_ADDR_SET 1 + +/** + * Control VLAN filtering + * + * The VLAN filter table is controlled via a simple ADD/DEL interface. + * VLAN IDs not added may be filtered by the hypervisor. Del is the + * opposite of add. Both commands expect an out entry containing a 2 + * byte VLAN ID. VLAN filtering is available with the + * VIRTIO_NET_F_CTRL_VLAN feature bit. + */ +#define VIRTIO_NET_CTRL_VLAN 2 +#define VIRTIO_NET_CTRL_VLAN_ADD 0 +#define VIRTIO_NET_CTRL_VLAN_DEL 1 + +struct virtio_net_ctrl_hdr { + uint8_t class; + uint8_t cmd; +} __attribute__((packed)); + +typedef uint8_t virtio_net_ctrl_ack; + +#define VIRTIO_NET_OK 0 +#define VIRTIO_NET_ERR 1 + +#define VIRTIO_MAX_CTRL_DATA 2048 + +struct virtio_pmd_ctrl { + struct virtio_net_ctrl_hdr hdr; + virtio_net_ctrl_ack status; + uint8_t data[VIRTIO_MAX_CTRL_DATA]; +}; + +struct virtqueue { + struct virtio_hw *hw; /**< virtio_hw structure pointer. */ + const struct rte_memzone *mz; /**< mem zone to populate RX ring. */ + const struct rte_memzone *virtio_net_hdr_mz; /**< memzone to populate hdr. */ + struct rte_mempool *mpool; /**< mempool for mbuf allocation */ + uint16_t queue_id; /**< DPDK queue index. */ + uint8_t port_id; /**< Device port identifier. */ + uint16_t vq_queue_index; /**< PCI queue index */ + + void *vq_ring_virt_mem; /**< linear address of vring*/ + unsigned int vq_ring_size; + phys_addr_t vq_ring_mem; /**< physical address of vring */ + + struct vring vq_ring; /**< vring keeping desc, used and avail */ + uint16_t vq_free_cnt; /**< num of desc available */ + uint16_t vq_nentries; /**< vring desc numbers */ + uint16_t vq_free_thresh; /**< free threshold */ + /** + * Head of the free chain in the descriptor table. If + * there are no free descriptors, this will be set to + * VQ_RING_DESC_CHAIN_END. + */ + uint16_t vq_desc_head_idx; + uint16_t vq_desc_tail_idx; + /** + * Last consumed descriptor in the used table, + * trails vq_ring.used->idx. + */ + uint16_t vq_used_cons_idx; + uint16_t vq_avail_idx; + uint64_t mbuf_initializer; /**< value to init mbufs. */ + phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */ + + struct rte_mbuf **sw_ring; /**< RX software ring. */ + /* dummy mbuf, for wraparound when processing RX ring. */ + struct rte_mbuf fake_mbuf; + + /* Statistics */ + uint64_t packets; + uint64_t bytes; + uint64_t errors; + uint64_t multicast; + uint64_t broadcast; + /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */ + uint64_t size_bins[8]; + + uint16_t *notify_addr; + + struct vq_desc_extra { + void *cookie; + uint16_t ndescs; + } vq_descx[0]; +}; + +/* If multiqueue is provided by host, then we suppport it. */ +#define VIRTIO_NET_CTRL_MQ 4 +#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0 +#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 +#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 + +#define VIRTIO_NET_CTRL_MAC_ADDR_SET 1 + +/** + * This is the first element of the scatter-gather list. If you don't + * specify GSO or CSUM features, you can simply ignore the header. + */ +struct virtio_net_hdr { +#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /**< Use csum_start,csum_offset*/ + uint8_t flags; +#define VIRTIO_NET_HDR_GSO_NONE 0 /**< Not a GSO frame */ +#define VIRTIO_NET_HDR_GSO_TCPV4 1 /**< GSO frame, IPv4 TCP (TSO) */ +#define VIRTIO_NET_HDR_GSO_UDP 3 /**< GSO frame, IPv4 UDP (UFO) */ +#define VIRTIO_NET_HDR_GSO_TCPV6 4 /**< GSO frame, IPv6 TCP */ +#define VIRTIO_NET_HDR_GSO_ECN 0x80 /**< TCP has ECN set */ + uint8_t gso_type; + uint16_t hdr_len; /**< Ethernet + IP + tcp/udp hdrs */ + uint16_t gso_size; /**< Bytes to append to hdr_len per frame */ + uint16_t csum_start; /**< Position to start checksumming from */ + uint16_t csum_offset; /**< Offset after that to place checksum */ +}; + +/** + * This is the version of the header to use when the MRG_RXBUF + * feature has been negotiated. + */ +struct virtio_net_hdr_mrg_rxbuf { + struct virtio_net_hdr hdr; + uint16_t num_buffers; /**< Number of merged rx buffers */ +}; + +/* Region reserved to allow for transmit header and indirect ring */ +#define VIRTIO_MAX_TX_INDIRECT 8 +struct virtio_tx_region { + struct virtio_net_hdr_mrg_rxbuf tx_hdr; + struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT] + __attribute__((__aligned__(16))); +}; + +/* Chain all the descriptors in the ring with an END */ +static inline void +vring_desc_init(struct vring_desc *dp, uint16_t n) +{ + uint16_t i; + + for (i = 0; i < n - 1; i++) + dp[i].next = (uint16_t)(i + 1); + dp[i].next = VQ_RING_DESC_CHAIN_END; +} + +/** + * Tell the backend not to interrupt us. + */ +void virtqueue_disable_intr(struct virtqueue *vq); +/** + * Dump virtqueue internal structures, for debug purpose only. + */ +void virtqueue_dump(struct virtqueue *vq); +/** + * Get all mbufs to be freed. + */ +struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq); + +static inline int +virtqueue_full(const struct virtqueue *vq) +{ + return vq->vq_free_cnt == 0; +} + +#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx)) + +static inline void +vq_update_avail_idx(struct virtqueue *vq) +{ + virtio_wmb(); + vq->vq_ring.avail->idx = vq->vq_avail_idx; +} + +static inline void +vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx) +{ + uint16_t avail_idx; + /* + * Place the head of the descriptor chain into the next slot and make + * it usable to the host. The chain is made available now rather than + * deferring to virtqueue_notify() in the hopes that if the host is + * currently running on another CPU, we can keep it processing the new + * descriptor. + */ + avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1)); + vq->vq_ring.avail->ring[avail_idx] = desc_idx; + vq->vq_avail_idx++; +} + +static inline int +virtqueue_kick_prepare(struct virtqueue *vq) +{ + return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY); +} + +static inline void +virtqueue_notify(struct virtqueue *vq) +{ + /* + * Ensure updated avail->idx is visible to host. + * For virtio on IA, the notificaiton is through io port operation + * which is a serialization instruction itself. + */ + vq->hw->vtpci_ops->notify_queue(vq->hw, vq); +} + +#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP +#define VIRTQUEUE_DUMP(vq) do { \ + uint16_t used_idx, nused; \ + used_idx = (vq)->vq_ring.used->idx; \ + nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \ + PMD_INIT_LOG(DEBUG, \ + "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \ + " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \ + " avail.flags=0x%x; used.flags=0x%x", \ + (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \ + (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \ + (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \ + (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \ +} while (0) +#else +#define VIRTQUEUE_DUMP(vq) do { } while (0) +#endif + +#endif /* _VIRTQUEUE_H_ */ diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile new file mode 100644 index 00000000..4cf3b33b --- /dev/null +++ b/drivers/net/vmxnet3/Makefile @@ -0,0 +1,84 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2015 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_vmxnet3_uio.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +ifeq ($(CC), icc) +# +# CFLAGS for icc +# +CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 + +else ifeq ($(CC), clang) +# +# CFLAGS for clang +# +CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args + +else +# +# CFLAGS for gcc +# +ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1) +CFLAGS += -Wno-deprecated +endif +CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args + +endif + +VPATH += $(SRCDIR)/base + +EXPORT_MAP := rte_pmd_vmxnet3_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_ethdev.c + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_net + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/vmxnet3/base/README b/drivers/net/vmxnet3/base/README new file mode 100644 index 00000000..599a3661 --- /dev/null +++ b/drivers/net/vmxnet3/base/README @@ -0,0 +1,47 @@ +.. + BSD LICENSE + + Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Intel VMXNET3 driver +=================== + +This directory contains source code of FreeBSD VMXNET3 driver released by VMware. +In which, upt1_defs.h and vmxnet3_defs.h is introduced without any change. +The other 4 files: includeCheck.h, vmware_pack_begin.h, vmware_pack_end.h and vmxnet3_osdep.h +are crated to adapt to the needs from above 2 files. + +Updating the driver +=================== + +NOTE: The source code in this directory should not be modified apart from +the following file(s): + + vmxnet3_osdep.h diff --git a/drivers/net/vmxnet3/base/upt1_defs.h b/drivers/net/vmxnet3/base/upt1_defs.h new file mode 100644 index 00000000..d9144e32 --- /dev/null +++ b/drivers/net/vmxnet3/base/upt1_defs.h @@ -0,0 +1,117 @@ +/********************************************************* + * Copyright (C) 2007 VMware, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + *********************************************************/ + +/* upt1_defs.h + * + * Definitions for UPTv1 + * + * Some of the defs are duplicated in vmkapi_net_upt.h, because + * vmkapi_net_upt.h cannot distribute with OSS yet and vmkapi headers can + * only include vmkapi headers. Make sure they are kept in sync! + */ + +#ifndef _UPT1_DEFS_H +#define _UPT1_DEFS_H + +#define UPT1_MAX_TX_QUEUES 64 +#define UPT1_MAX_RX_QUEUES 64 + +#define UPT1_MAX_INTRS (UPT1_MAX_TX_QUEUES + UPT1_MAX_RX_QUEUES) + +typedef +#include "vmware_pack_begin.h" +struct UPT1_TxStats { + uint64 TSOPktsTxOK; /* TSO pkts post-segmentation */ + uint64 TSOBytesTxOK; + uint64 ucastPktsTxOK; + uint64 ucastBytesTxOK; + uint64 mcastPktsTxOK; + uint64 mcastBytesTxOK; + uint64 bcastPktsTxOK; + uint64 bcastBytesTxOK; + uint64 pktsTxError; + uint64 pktsTxDiscard; +} +#include "vmware_pack_end.h" +UPT1_TxStats; + +typedef +#include "vmware_pack_begin.h" +struct UPT1_RxStats { + uint64 LROPktsRxOK; /* LRO pkts */ + uint64 LROBytesRxOK; /* bytes from LRO pkts */ + /* the following counters are for pkts from the wire, i.e., pre-LRO */ + uint64 ucastPktsRxOK; + uint64 ucastBytesRxOK; + uint64 mcastPktsRxOK; + uint64 mcastBytesRxOK; + uint64 bcastPktsRxOK; + uint64 bcastBytesRxOK; + uint64 pktsRxOutOfBuf; + uint64 pktsRxError; +} +#include "vmware_pack_end.h" +UPT1_RxStats; + +/* interrupt moderation level */ +#define UPT1_IML_NONE 0 /* no interrupt moderation */ +#define UPT1_IML_HIGHEST 7 /* least intr generated */ +#define UPT1_IML_ADAPTIVE 8 /* adpative intr moderation */ + +/* values for UPT1_RSSConf.hashFunc */ +#define UPT1_RSS_HASH_TYPE_NONE 0x0 +#define UPT1_RSS_HASH_TYPE_IPV4 0x01 +#define UPT1_RSS_HASH_TYPE_TCP_IPV4 0x02 +#define UPT1_RSS_HASH_TYPE_IPV6 0x04 +#define UPT1_RSS_HASH_TYPE_TCP_IPV6 0x08 + +#define UPT1_RSS_HASH_FUNC_NONE 0x0 +#define UPT1_RSS_HASH_FUNC_TOEPLITZ 0x01 + +#define UPT1_RSS_MAX_KEY_SIZE 40 +#define UPT1_RSS_MAX_IND_TABLE_SIZE 128 + +typedef +#include "vmware_pack_begin.h" +struct UPT1_RSSConf { + uint16 hashType; + uint16 hashFunc; + uint16 hashKeySize; + uint16 indTableSize; + uint8 hashKey[UPT1_RSS_MAX_KEY_SIZE]; + uint8 indTable[UPT1_RSS_MAX_IND_TABLE_SIZE]; +} +#include "vmware_pack_end.h" +UPT1_RSSConf; + +/* features */ +#define UPT1_F_RXCSUM 0x0001 /* rx csum verification */ +#define UPT1_F_RSS 0x0002 +#define UPT1_F_RXVLAN 0x0004 /* VLAN tag stripping */ +#define UPT1_F_LRO 0x0008 + +#endif diff --git a/drivers/net/vmxnet3/base/vmware_pack_begin.h b/drivers/net/vmxnet3/base/vmware_pack_begin.h new file mode 100644 index 00000000..860ec4c3 --- /dev/null +++ b/drivers/net/vmxnet3/base/vmware_pack_begin.h @@ -0,0 +1,32 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/drivers/net/vmxnet3/base/vmware_pack_end.h b/drivers/net/vmxnet3/base/vmware_pack_end.h new file mode 100644 index 00000000..860ec4c3 --- /dev/null +++ b/drivers/net/vmxnet3/base/vmware_pack_end.h @@ -0,0 +1,32 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/drivers/net/vmxnet3/base/vmxnet3_defs.h b/drivers/net/vmxnet3/base/vmxnet3_defs.h new file mode 100644 index 00000000..68ae8b6d --- /dev/null +++ b/drivers/net/vmxnet3/base/vmxnet3_defs.h @@ -0,0 +1,744 @@ +/********************************************************* + * Copyright (C) 2007 VMware, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + *********************************************************/ + +/* + * vmxnet3_defs.h -- + * + * Definitions shared by device emulation and guest drivers for + * VMXNET3 NIC + */ + +#ifndef _VMXNET3_DEFS_H_ +#define _VMXNET3_DEFS_H_ + +#include "vmxnet3_osdep.h" +#include "upt1_defs.h" + +/* all registers are 32 bit wide */ +/* BAR 1 */ +#define VMXNET3_REG_VRRS 0x0 /* Vmxnet3 Revision Report Selection */ +#define VMXNET3_REG_UVRS 0x8 /* UPT Version Report Selection */ +#define VMXNET3_REG_DSAL 0x10 /* Driver Shared Address Low */ +#define VMXNET3_REG_DSAH 0x18 /* Driver Shared Address High */ +#define VMXNET3_REG_CMD 0x20 /* Command */ +#define VMXNET3_REG_MACL 0x28 /* MAC Address Low */ +#define VMXNET3_REG_MACH 0x30 /* MAC Address High */ +#define VMXNET3_REG_ICR 0x38 /* Interrupt Cause Register */ +#define VMXNET3_REG_ECR 0x40 /* Event Cause Register */ + +#define VMXNET3_REG_WSAL 0xF00 /* Wireless Shared Address Lo */ +#define VMXNET3_REG_WSAH 0xF08 /* Wireless Shared Address Hi */ +#define VMXNET3_REG_WCMD 0xF18 /* Wireless Command */ + +/* BAR 0 */ +#define VMXNET3_REG_IMR 0x0 /* Interrupt Mask Register */ +#define VMXNET3_REG_TXPROD 0x600 /* Tx Producer Index */ +#define VMXNET3_REG_RXPROD 0x800 /* Rx Producer Index for ring 1 */ +#define VMXNET3_REG_RXPROD2 0xA00 /* Rx Producer Index for ring 2 */ + +#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */ +#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */ + +/* + * The two Vmxnet3 MMIO Register PCI BARs (BAR 0 at offset 10h and BAR 1 at + * offset 14h) as well as the MSI-X BAR are combined into one PhysMem region: + * <-VMXNET3_PT_REG_SIZE-><-VMXNET3_VD_REG_SIZE-><-VMXNET3_MSIX_BAR_SIZE--> + * ------------------------------------------------------------------------- + * |Pass Thru Registers | Virtual Dev Registers | MSI-X Vector/PBA Table | + * ------------------------------------------------------------------------- + * VMXNET3_MSIX_BAR_SIZE is defined in "vmxnet3Int.h" + */ +#define VMXNET3_PHYSMEM_PAGES 4 + +#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */ +#define VMXNET3_REG_ALIGN_MASK 0x7 + +/* I/O Mapped access to registers */ +#define VMXNET3_IO_TYPE_PT 0 +#define VMXNET3_IO_TYPE_VD 1 +#define VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF)) +#define VMXNET3_IO_TYPE(addr) ((addr) >> 24) +#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF) + +#ifndef __le16 +#define __le16 uint16 +#endif +#ifndef __le32 +#define __le32 uint32 +#endif +#ifndef __le64 +#define __le64 uint64 +#endif + +typedef enum { + VMXNET3_CMD_FIRST_SET = 0xCAFE0000, + VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET, + VMXNET3_CMD_QUIESCE_DEV, + VMXNET3_CMD_RESET_DEV, + VMXNET3_CMD_UPDATE_RX_MODE, + VMXNET3_CMD_UPDATE_MAC_FILTERS, + VMXNET3_CMD_UPDATE_VLAN_FILTERS, + VMXNET3_CMD_UPDATE_RSSIDT, + VMXNET3_CMD_UPDATE_IML, + VMXNET3_CMD_UPDATE_PMCFG, + VMXNET3_CMD_UPDATE_FEATURE, + VMXNET3_CMD_STOP_EMULATION, + VMXNET3_CMD_LOAD_PLUGIN, + VMXNET3_CMD_ACTIVATE_VF, + + VMXNET3_CMD_FIRST_GET = 0xF00D0000, + VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET, + VMXNET3_CMD_GET_STATS, + VMXNET3_CMD_GET_LINK, + VMXNET3_CMD_GET_PERM_MAC_LO, + VMXNET3_CMD_GET_PERM_MAC_HI, + VMXNET3_CMD_GET_DID_LO, + VMXNET3_CMD_GET_DID_HI, + VMXNET3_CMD_GET_DEV_EXTRA_INFO, + VMXNET3_CMD_GET_CONF_INTR, + VMXNET3_CMD_GET_ADAPTIVE_RING_INFO +} Vmxnet3_Cmd; + +/* Adaptive Ring Info Flags */ +#define VMXNET3_DISABLE_ADAPTIVE_RING 1 + +/* + * Little Endian layout of bitfields - + * Byte 0 : 7.....len.....0 + * Byte 1 : rsvd gen 13.len.8 + * Byte 2 : 5.msscof.0 ext1 dtype + * Byte 3 : 13...msscof...6 + * + * Big Endian layout of bitfields - + * Byte 0: 13...msscof...6 + * Byte 1 : 5.msscof.0 ext1 dtype + * Byte 2 : rsvd gen 13.len.8 + * Byte 3 : 7.....len.....0 + * + * Thus, le32_to_cpu on the dword will allow the big endian driver to read + * the bit fields correctly. And cpu_to_le32 will convert bitfields + * bit fields written by big endian driver to format required by device. + */ + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_TxDesc { + __le64 addr; + +#ifdef __BIG_ENDIAN_BITFIELD + uint32 msscof:14; /* MSS, checksum offset, flags */ + uint32 ext1:1; + uint32 dtype:1; /* descriptor type */ + uint32 rsvd:1; + uint32 gen:1; /* generation bit */ + uint32 len:14; +#else + uint32 len:14; + uint32 gen:1; /* generation bit */ + uint32 rsvd:1; + uint32 dtype:1; /* descriptor type */ + uint32 ext1:1; + uint32 msscof:14; /* MSS, checksum offset, flags */ +#endif /* __BIG_ENDIAN_BITFIELD */ + +#ifdef __BIG_ENDIAN_BITFIELD + uint32 tci:16; /* Tag to Insert */ + uint32 ti:1; /* VLAN Tag Insertion */ + uint32 ext2:1; + uint32 cq:1; /* completion request */ + uint32 eop:1; /* End Of Packet */ + uint32 om:2; /* offload mode */ + uint32 hlen:10; /* header len */ +#else + uint32 hlen:10; /* header len */ + uint32 om:2; /* offload mode */ + uint32 eop:1; /* End Of Packet */ + uint32 cq:1; /* completion request */ + uint32 ext2:1; + uint32 ti:1; /* VLAN Tag Insertion */ + uint32 tci:16; /* Tag to Insert */ +#endif /* __BIG_ENDIAN_BITFIELD */ +} +#include "vmware_pack_end.h" +Vmxnet3_TxDesc; + +/* TxDesc.OM values */ +#define VMXNET3_OM_NONE 0 +#define VMXNET3_OM_CSUM 2 +#define VMXNET3_OM_TSO 3 + +/* fields in TxDesc we access w/o using bit fields */ +#define VMXNET3_TXD_EOP_SHIFT 12 +#define VMXNET3_TXD_CQ_SHIFT 13 +#define VMXNET3_TXD_GEN_SHIFT 14 +#define VMXNET3_TXD_EOP_DWORD_SHIFT 3 +#define VMXNET3_TXD_GEN_DWORD_SHIFT 2 + +#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT) +#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT) +#define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT) + +#define VMXNET3_TXD_GEN_SIZE 1 +#define VMXNET3_TXD_EOP_SIZE 1 + +#define VMXNET3_HDR_COPY_SIZE 128 + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_TxDataDesc { + uint8 data[VMXNET3_HDR_COPY_SIZE]; +} +#include "vmware_pack_end.h" +Vmxnet3_TxDataDesc; + +#define VMXNET3_TCD_GEN_SHIFT 31 +#define VMXNET3_TCD_GEN_SIZE 1 +#define VMXNET3_TCD_TXIDX_SHIFT 0 +#define VMXNET3_TCD_TXIDX_SIZE 12 +#define VMXNET3_TCD_GEN_DWORD_SHIFT 3 + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_TxCompDesc { + uint32 txdIdx:12; /* Index of the EOP TxDesc */ + uint32 ext1:20; + + __le32 ext2; + __le32 ext3; + + uint32 rsvd:24; + uint32 type:7; /* completion type */ + uint32 gen:1; /* generation bit */ +} +#include "vmware_pack_end.h" +Vmxnet3_TxCompDesc; + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_RxDesc { + __le64 addr; + +#ifdef __BIG_ENDIAN_BITFIELD + uint32 gen:1; /* Generation bit */ + uint32 rsvd:15; + uint32 dtype:1; /* Descriptor type */ + uint32 btype:1; /* Buffer Type */ + uint32 len:14; +#else + uint32 len:14; + uint32 btype:1; /* Buffer Type */ + uint32 dtype:1; /* Descriptor type */ + uint32 rsvd:15; + uint32 gen:1; /* Generation bit */ +#endif + __le32 ext1; +} +#include "vmware_pack_end.h" +Vmxnet3_RxDesc; + +/* values of RXD.BTYPE */ +#define VMXNET3_RXD_BTYPE_HEAD 0 /* head only */ +#define VMXNET3_RXD_BTYPE_BODY 1 /* body only */ + +/* fields in RxDesc we access w/o using bit fields */ +#define VMXNET3_RXD_BTYPE_SHIFT 14 +#define VMXNET3_RXD_GEN_SHIFT 31 + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_RxCompDesc { +#ifdef __BIG_ENDIAN_BITFIELD + uint32 ext2:1; + uint32 cnc:1; /* Checksum Not Calculated */ + uint32 rssType:4; /* RSS hash type used */ + uint32 rqID:10; /* rx queue/ring ID */ + uint32 sop:1; /* Start of Packet */ + uint32 eop:1; /* End of Packet */ + uint32 ext1:2; + uint32 rxdIdx:12; /* Index of the RxDesc */ +#else + uint32 rxdIdx:12; /* Index of the RxDesc */ + uint32 ext1:2; + uint32 eop:1; /* End of Packet */ + uint32 sop:1; /* Start of Packet */ + uint32 rqID:10; /* rx queue/ring ID */ + uint32 rssType:4; /* RSS hash type used */ + uint32 cnc:1; /* Checksum Not Calculated */ + uint32 ext2:1; +#endif /* __BIG_ENDIAN_BITFIELD */ + + __le32 rssHash; /* RSS hash value */ + +#ifdef __BIG_ENDIAN_BITFIELD + uint32 tci:16; /* Tag stripped */ + uint32 ts:1; /* Tag is stripped */ + uint32 err:1; /* Error */ + uint32 len:14; /* data length */ +#else + uint32 len:14; /* data length */ + uint32 err:1; /* Error */ + uint32 ts:1; /* Tag is stripped */ + uint32 tci:16; /* Tag stripped */ +#endif /* __BIG_ENDIAN_BITFIELD */ + + +#ifdef __BIG_ENDIAN_BITFIELD + uint32 gen:1; /* generation bit */ + uint32 type:7; /* completion type */ + uint32 fcs:1; /* Frame CRC correct */ + uint32 frg:1; /* IP Fragment */ + uint32 v4:1; /* IPv4 */ + uint32 v6:1; /* IPv6 */ + uint32 ipc:1; /* IP Checksum Correct */ + uint32 tcp:1; /* TCP packet */ + uint32 udp:1; /* UDP packet */ + uint32 tuc:1; /* TCP/UDP Checksum Correct */ + uint32 csum:16; +#else + uint32 csum:16; + uint32 tuc:1; /* TCP/UDP Checksum Correct */ + uint32 udp:1; /* UDP packet */ + uint32 tcp:1; /* TCP packet */ + uint32 ipc:1; /* IP Checksum Correct */ + uint32 v6:1; /* IPv6 */ + uint32 v4:1; /* IPv4 */ + uint32 frg:1; /* IP Fragment */ + uint32 fcs:1; /* Frame CRC correct */ + uint32 type:7; /* completion type */ + uint32 gen:1; /* generation bit */ +#endif /* __BIG_ENDIAN_BITFIELD */ +} +#include "vmware_pack_end.h" +Vmxnet3_RxCompDesc; + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_RxCompDescExt { + __le32 dword1; + uint8 segCnt; /* Number of aggregated packets */ + uint8 dupAckCnt; /* Number of duplicate Acks */ + __le16 tsDelta; /* TCP timestamp difference */ + __le32 dword2[2]; +} +#include "vmware_pack_end.h" +Vmxnet3_RxCompDescExt; + +/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */ +#define VMXNET3_RCD_TUC_SHIFT 16 +#define VMXNET3_RCD_IPC_SHIFT 19 + +/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */ +#define VMXNET3_RCD_TYPE_SHIFT 56 +#define VMXNET3_RCD_GEN_SHIFT 63 + +/* csum OK for TCP/UDP pkts over IP */ +#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | 1 << VMXNET3_RCD_IPC_SHIFT) + +/* value of RxCompDesc.rssType */ +#define VMXNET3_RCD_RSS_TYPE_NONE 0 +#define VMXNET3_RCD_RSS_TYPE_IPV4 1 +#define VMXNET3_RCD_RSS_TYPE_TCPIPV4 2 +#define VMXNET3_RCD_RSS_TYPE_IPV6 3 +#define VMXNET3_RCD_RSS_TYPE_TCPIPV6 4 + +/* a union for accessing all cmd/completion descriptors */ +typedef union Vmxnet3_GenericDesc { + __le64 qword[2]; + __le32 dword[4]; + __le16 word[8]; + Vmxnet3_TxDesc txd; + Vmxnet3_RxDesc rxd; + Vmxnet3_TxCompDesc tcd; + Vmxnet3_RxCompDesc rcd; + Vmxnet3_RxCompDescExt rcdExt; +} Vmxnet3_GenericDesc; + +#define VMXNET3_INIT_GEN 1 + +/* Max size of a single tx buffer */ +#define VMXNET3_MAX_TX_BUF_SIZE (1 << 14) + +/* # of tx desc needed for a tx buffer size */ +#define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / VMXNET3_MAX_TX_BUF_SIZE) + +/* max # of tx descs for a non-tso pkt */ +#define VMXNET3_MAX_TXD_PER_PKT 16 + +/* Max size of a single rx buffer */ +#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1) +/* Minimum size of a type 0 buffer */ +#define VMXNET3_MIN_T0_BUF_SIZE 128 +#define VMXNET3_MAX_CSUM_OFFSET 1024 + +/* Ring base address alignment */ +#define VMXNET3_RING_BA_ALIGN 512 +#define VMXNET3_RING_BA_MASK (VMXNET3_RING_BA_ALIGN - 1) + +/* Ring size must be a multiple of 32 */ +#define VMXNET3_RING_SIZE_ALIGN 32 +#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1) + +/* Max ring size */ +#define VMXNET3_TX_RING_MAX_SIZE 4096 +#define VMXNET3_TC_RING_MAX_SIZE 4096 +#define VMXNET3_RX_RING_MAX_SIZE 4096 +#define VMXNET3_RC_RING_MAX_SIZE 8192 + +/* a list of reasons for queue stop */ + +#define VMXNET3_ERR_NOEOP 0x80000000 /* cannot find the EOP desc of a pkt */ +#define VMXNET3_ERR_TXD_REUSE 0x80000001 /* reuse a TxDesc before tx completion */ +#define VMXNET3_ERR_BIG_PKT 0x80000002 /* too many TxDesc for a pkt */ +#define VMXNET3_ERR_DESC_NOT_SPT 0x80000003 /* descriptor type not supported */ +#define VMXNET3_ERR_SMALL_BUF 0x80000004 /* type 0 buffer too small */ +#define VMXNET3_ERR_STRESS 0x80000005 /* stress option firing in vmkernel */ +#define VMXNET3_ERR_SWITCH 0x80000006 /* mode switch failure */ +#define VMXNET3_ERR_TXD_INVALID 0x80000007 /* invalid TxDesc */ + +/* completion descriptor types */ +#define VMXNET3_CDTYPE_TXCOMP 0 /* Tx Completion Descriptor */ +#define VMXNET3_CDTYPE_RXCOMP 3 /* Rx Completion Descriptor */ +#define VMXNET3_CDTYPE_RXCOMP_LRO 4 /* Rx Completion Descriptor for LRO */ + +#define VMXNET3_GOS_BITS_UNK 0 /* unknown */ +#define VMXNET3_GOS_BITS_32 1 +#define VMXNET3_GOS_BITS_64 2 + +#define VMXNET3_GOS_TYPE_UNK 0 /* unknown */ +#define VMXNET3_GOS_TYPE_LINUX 1 +#define VMXNET3_GOS_TYPE_WIN 2 +#define VMXNET3_GOS_TYPE_SOLARIS 3 +#define VMXNET3_GOS_TYPE_FREEBSD 4 +#define VMXNET3_GOS_TYPE_PXE 5 + +/* All structures in DriverShared are padded to multiples of 8 bytes */ + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_GOSInfo { +#ifdef __BIG_ENDIAN_BITFIELD + uint32 gosMisc: 10; /* other info about gos */ + uint32 gosVer: 16; /* gos version */ + uint32 gosType: 4; /* which guest */ + uint32 gosBits: 2; /* 32-bit or 64-bit? */ +#else + uint32 gosBits: 2; /* 32-bit or 64-bit? */ + uint32 gosType: 4; /* which guest */ + uint32 gosVer: 16; /* gos version */ + uint32 gosMisc: 10; /* other info about gos */ +#endif /* __BIG_ENDIAN_BITFIELD */ +} +#include "vmware_pack_end.h" +Vmxnet3_GOSInfo; + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_DriverInfo { + __le32 version; /* driver version */ + Vmxnet3_GOSInfo gos; + __le32 vmxnet3RevSpt; /* vmxnet3 revision supported */ + __le32 uptVerSpt; /* upt version supported */ +} +#include "vmware_pack_end.h" +Vmxnet3_DriverInfo; + +#define VMXNET3_REV1_MAGIC 0xbabefee1 + +/* + * QueueDescPA must be 128 bytes aligned. It points to an array of + * Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc. + * The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by + * Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively. + */ +#define VMXNET3_QUEUE_DESC_ALIGN 128 + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_MiscConf { + Vmxnet3_DriverInfo driverInfo; + __le64 uptFeatures; + __le64 ddPA; /* driver data PA */ + __le64 queueDescPA; /* queue descriptor table PA */ + __le32 ddLen; /* driver data len */ + __le32 queueDescLen; /* queue descriptor table len, in bytes */ + __le32 mtu; + __le16 maxNumRxSG; + uint8 numTxQueues; + uint8 numRxQueues; + __le32 reserved[4]; +} +#include "vmware_pack_end.h" +Vmxnet3_MiscConf; + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_TxQueueConf { + __le64 txRingBasePA; + __le64 dataRingBasePA; + __le64 compRingBasePA; + __le64 ddPA; /* driver data */ + __le64 reserved; + __le32 txRingSize; /* # of tx desc */ + __le32 dataRingSize; /* # of data desc */ + __le32 compRingSize; /* # of comp desc */ + __le32 ddLen; /* size of driver data */ + uint8 intrIdx; + uint8 _pad[7]; +} +#include "vmware_pack_end.h" +Vmxnet3_TxQueueConf; + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_RxQueueConf { + __le64 rxRingBasePA[2]; + __le64 compRingBasePA; + __le64 ddPA; /* driver data */ + __le64 reserved; + __le32 rxRingSize[2]; /* # of rx desc */ + __le32 compRingSize; /* # of rx comp desc */ + __le32 ddLen; /* size of driver data */ + uint8 intrIdx; + uint8 _pad[7]; +} +#include "vmware_pack_end.h" +Vmxnet3_RxQueueConf; + +enum vmxnet3_intr_mask_mode { + VMXNET3_IMM_AUTO = 0, + VMXNET3_IMM_ACTIVE = 1, + VMXNET3_IMM_LAZY = 2 +}; + +enum vmxnet3_intr_type { + VMXNET3_IT_AUTO = 0, + VMXNET3_IT_INTX = 1, + VMXNET3_IT_MSI = 2, + VMXNET3_IT_MSIX = 3 +}; + +#define VMXNET3_MAX_TX_QUEUES 8 +#define VMXNET3_MAX_RX_QUEUES 16 +/* addition 1 for events */ +#define VMXNET3_MAX_INTRS 25 + +/* value of intrCtrl */ +#define VMXNET3_IC_DISABLE_ALL 0x1 /* bit 0 */ + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_IntrConf { + Bool autoMask; + uint8 numIntrs; /* # of interrupts */ + uint8 eventIntrIdx; + uint8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for each intr */ + __le32 intrCtrl; + __le32 reserved[2]; +} +#include "vmware_pack_end.h" +Vmxnet3_IntrConf; + +/* one bit per VLAN ID, the size is in the units of uint32 */ +#define VMXNET3_VFT_SIZE (4096 / (sizeof(uint32) * 8)) + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_QueueStatus { + Bool stopped; + uint8 _pad[3]; + __le32 error; +} +#include "vmware_pack_end.h" +Vmxnet3_QueueStatus; + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_TxQueueCtrl { + __le32 txNumDeferred; + __le32 txThreshold; + __le64 reserved; +} +#include "vmware_pack_end.h" +Vmxnet3_TxQueueCtrl; + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_RxQueueCtrl { + Bool updateRxProd; + uint8 _pad[7]; + __le64 reserved; +} +#include "vmware_pack_end.h" +Vmxnet3_RxQueueCtrl; + +#define VMXNET3_RXM_UCAST 0x01 /* unicast only */ +#define VMXNET3_RXM_MCAST 0x02 /* multicast passing the filters */ +#define VMXNET3_RXM_BCAST 0x04 /* broadcast only */ +#define VMXNET3_RXM_ALL_MULTI 0x08 /* all multicast */ +#define VMXNET3_RXM_PROMISC 0x10 /* promiscuous */ + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_RxFilterConf { + __le32 rxMode; /* VMXNET3_RXM_xxx */ + __le16 mfTableLen; /* size of the multicast filter table */ + __le16 _pad1; + __le64 mfTablePA; /* PA of the multicast filters table */ + __le32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */ +} +#include "vmware_pack_end.h" +Vmxnet3_RxFilterConf; + +#define VMXNET3_PM_MAX_FILTERS 6 +#define VMXNET3_PM_MAX_PATTERN_SIZE 128 +#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8) + +#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */ +#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching filters */ + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_PM_PktFilter { + uint8 maskSize; + uint8 patternSize; + uint8 mask[VMXNET3_PM_MAX_MASK_SIZE]; + uint8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE]; + uint8 pad[6]; +} +#include "vmware_pack_end.h" +Vmxnet3_PM_PktFilter; + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_PMConf { + __le16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */ + uint8 numFilters; + uint8 pad[5]; + Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS]; +} +#include "vmware_pack_end.h" +Vmxnet3_PMConf; + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_VariableLenConfDesc { + __le32 confVer; + __le32 confLen; + __le64 confPA; +} +#include "vmware_pack_end.h" +Vmxnet3_VariableLenConfDesc; + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_DSDevRead { + /* read-only region for device, read by dev in response to a SET cmd */ + Vmxnet3_MiscConf misc; + Vmxnet3_IntrConf intrConf; + Vmxnet3_RxFilterConf rxFilterConf; + Vmxnet3_VariableLenConfDesc rssConfDesc; + Vmxnet3_VariableLenConfDesc pmConfDesc; + Vmxnet3_VariableLenConfDesc pluginConfDesc; +} +#include "vmware_pack_end.h" +Vmxnet3_DSDevRead; + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_TxQueueDesc { + Vmxnet3_TxQueueCtrl ctrl; + Vmxnet3_TxQueueConf conf; + /* Driver read after a GET command */ + Vmxnet3_QueueStatus status; + UPT1_TxStats stats; + uint8 _pad[88]; /* 128 aligned */ +} +#include "vmware_pack_end.h" +Vmxnet3_TxQueueDesc; + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_RxQueueDesc { + Vmxnet3_RxQueueCtrl ctrl; + Vmxnet3_RxQueueConf conf; + /* Driver read after a GET command */ + Vmxnet3_QueueStatus status; + UPT1_RxStats stats; + uint8 _pad[88]; /* 128 aligned */ +} +#include "vmware_pack_end.h" +Vmxnet3_RxQueueDesc; + +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_DriverShared { + __le32 magic; + __le32 pad; /* make devRead start at 64-bit boundaries */ + Vmxnet3_DSDevRead devRead; + __le32 ecr; + __le32 reserved[5]; +} +#include "vmware_pack_end.h" +Vmxnet3_DriverShared; + +#define VMXNET3_ECR_RQERR (1 << 0) +#define VMXNET3_ECR_TQERR (1 << 1) +#define VMXNET3_ECR_LINK (1 << 2) +#define VMXNET3_ECR_DIC (1 << 3) +#define VMXNET3_ECR_DEBUG (1 << 4) + +/* flip the gen bit of a ring */ +#define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1) + +/* only use this if moving the idx won't affect the gen bit */ +#define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \ +do {\ + (idx)++;\ + if (UNLIKELY((idx) == (ring_size))) {\ + (idx) = 0;\ + }\ +} while (0) + +#define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \ + vfTable[vid >> 5] |= (1 << (vid & 31)) +#define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \ + vfTable[vid >> 5] &= ~(1 << (vid & 31)) + +#define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \ + ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0) + +#define VMXNET3_MAX_MTU 9000 +#define VMXNET3_MIN_MTU 60 + +#define VMXNET3_LINK_UP (10000 << 16 | 1) // 10 Gbps, up +#define VMXNET3_LINK_DOWN 0 + +#define VMXWIFI_DRIVER_SHARED_LEN 8192 + +#define VMXNET3_DID_PASSTHRU 0xFFFF + +#endif /* _VMXNET3_DEFS_H_ */ diff --git a/drivers/net/vmxnet3/base/vmxnet3_osdep.h b/drivers/net/vmxnet3/base/vmxnet3_osdep.h new file mode 100644 index 00000000..b6e3469c --- /dev/null +++ b/drivers/net/vmxnet3/base/vmxnet3_osdep.h @@ -0,0 +1,48 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VMXNET3_OSDEP_H +#define _VMXNET3_OSDEP_H + +typedef uint64_t uint64; +typedef uint32_t uint32; +typedef uint16_t uint16; +typedef uint8_t uint8; +typedef int bool; +typedef char Bool; + +#ifndef UNLIKELY +#define UNLIKELY(x) __builtin_expect((x),0) +#endif /* unlikely */ + +#endif /* _VMXNET3_OSDEP_H */ diff --git a/drivers/net/vmxnet3/rte_pmd_vmxnet3_version.map b/drivers/net/vmxnet3/rte_pmd_vmxnet3_version.map new file mode 100644 index 00000000..ef353984 --- /dev/null +++ b/drivers/net/vmxnet3/rte_pmd_vmxnet3_version.map @@ -0,0 +1,4 @@ +DPDK_2.0 { + + local: *; +}; diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c new file mode 100644 index 00000000..bd7a2bb7 --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -0,0 +1,958 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <stdarg.h> +#include <fcntl.h> +#include <inttypes.h> +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_cycles.h> + +#include <rte_interrupts.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_pci.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_eal.h> +#include <rte_alarm.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_atomic.h> +#include <rte_string_fns.h> +#include <rte_malloc.h> +#include <rte_dev.h> + +#include "base/vmxnet3_defs.h" + +#include "vmxnet3_ring.h" +#include "vmxnet3_logs.h" +#include "vmxnet3_ethdev.h" + +#define PROCESS_SYS_EVENTS 0 + +static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev); +static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev); +static int vmxnet3_dev_configure(struct rte_eth_dev *dev); +static int vmxnet3_dev_start(struct rte_eth_dev *dev); +static void vmxnet3_dev_stop(struct rte_eth_dev *dev); +static void vmxnet3_dev_close(struct rte_eth_dev *dev); +static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set); +static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev); +static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev); +static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev); +static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int vmxnet3_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static void vmxnet3_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static const uint32_t * +vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev); +static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vid, int on); +static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev, + struct ether_addr *mac_addr); + +#if PROCESS_SYS_EVENTS == 1 +static void vmxnet3_process_events(struct vmxnet3_hw *); +#endif +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id pci_id_vmxnet3_map[] = { + +#define RTE_PCI_DEV_ID_DECL_VMXNET3(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" + +{ .vendor_id = 0, /* sentinel */ }, +}; + +static const struct eth_dev_ops vmxnet3_eth_dev_ops = { + .dev_configure = vmxnet3_dev_configure, + .dev_start = vmxnet3_dev_start, + .dev_stop = vmxnet3_dev_stop, + .dev_close = vmxnet3_dev_close, + .promiscuous_enable = vmxnet3_dev_promiscuous_enable, + .promiscuous_disable = vmxnet3_dev_promiscuous_disable, + .allmulticast_enable = vmxnet3_dev_allmulticast_enable, + .allmulticast_disable = vmxnet3_dev_allmulticast_disable, + .link_update = vmxnet3_dev_link_update, + .stats_get = vmxnet3_dev_stats_get, + .mac_addr_set = vmxnet3_mac_addr_set, + .dev_infos_get = vmxnet3_dev_info_get, + .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get, + .vlan_filter_set = vmxnet3_dev_vlan_filter_set, + .vlan_offload_set = vmxnet3_dev_vlan_offload_set, + .rx_queue_setup = vmxnet3_dev_rx_queue_setup, + .rx_queue_release = vmxnet3_dev_rx_queue_release, + .tx_queue_setup = vmxnet3_dev_tx_queue_setup, + .tx_queue_release = vmxnet3_dev_tx_queue_release, +}; + +static const struct rte_memzone * +gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size, + const char *post_string, int socket_id, uint16_t align) +{ + char z_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + + snprintf(z_name, sizeof(z_name), "%s_%d_%s", + dev->driver->pci_drv.name, dev->data->port_id, post_string); + + mz = rte_memzone_lookup(z_name); + if (mz) + return mz; + + return rte_memzone_reserve_aligned(z_name, size, + socket_id, 0, align); +} + +/** + * Atomically reads the link status information from global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ + +static int +vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &(dev->data->dev_link); + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/** + * Atomically writes the link status information into global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to write to. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static int +vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &(dev->data->dev_link); + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/* + * This function is based on vmxnet3_disable_intr() + */ +static void +vmxnet3_disable_intr(struct vmxnet3_hw *hw) +{ + int i; + + PMD_INIT_FUNC_TRACE(); + + hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL; + for (i = 0; i < VMXNET3_MAX_INTRS; i++) + VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1); +} + +/* + * It returns 0 on success. + */ +static int +eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct vmxnet3_hw *hw = eth_dev->data->dev_private; + uint32_t mac_hi, mac_lo, ver; + + PMD_INIT_FUNC_TRACE(); + + eth_dev->dev_ops = &vmxnet3_eth_dev_ops; + eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts; + eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts; + pci_dev = eth_dev->pci_dev; + + /* + * for secondary processes, we don't initialize any further as primary + * has already done this work. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + /* Vendor and Device ID need to be set before init of shared code */ + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr; + hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr; + + hw->num_rx_queues = 1; + hw->num_tx_queues = 1; + hw->bufs_per_pkt = 1; + + /* Check h/w version compatibility with driver. */ + ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS); + PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver); + if (ver & 0x1) + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1); + else { + PMD_INIT_LOG(ERR, "Incompatible h/w version, should be 0x1"); + return -EIO; + } + + /* Check UPT version compatibility with driver. */ + ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS); + PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver); + if (ver & 0x1) + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1); + else { + PMD_INIT_LOG(ERR, "Incompatible UPT version."); + return -EIO; + } + + /* Getting MAC Address */ + mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL); + mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH); + memcpy(hw->perm_addr , &mac_lo, 4); + memcpy(hw->perm_addr+4, &mac_hi, 2); + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN * + VMXNET3_MAX_MAC_ADDRS, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate %d bytes needed to store MAC addresses", + ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS); + return -ENOMEM; + } + /* Copy the permanent MAC address */ + ether_addr_copy((struct ether_addr *) hw->perm_addr, + ð_dev->data->mac_addrs[0]); + + PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x", + hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2], + hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]); + + /* Put device in Quiesce Mode */ + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); + + /* allow untagged pkts */ + VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0); + + return 0; +} + +static int +eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct vmxnet3_hw *hw = eth_dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (hw->adapter_stopped == 0) + vmxnet3_dev_close(eth_dev); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + + return 0; +} + +static struct eth_driver rte_vmxnet3_pmd = { + .pci_drv = { + .name = "rte_vmxnet3_pmd", + .id_table = pci_id_vmxnet3_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, + }, + .eth_dev_init = eth_vmxnet3_dev_init, + .eth_dev_uninit = eth_vmxnet3_dev_uninit, + .dev_private_size = sizeof(struct vmxnet3_hw), +}; + +/* + * Driver initialization routine. + * Invoked once at EAL init time. + * Register itself as the [Poll Mode] Driver of Virtual PCI VMXNET3 devices. + */ +static int +rte_vmxnet3_pmd_init(const char *name __rte_unused, const char *param __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + + rte_eth_driver_register(&rte_vmxnet3_pmd); + return 0; +} + +static int +vmxnet3_dev_configure(struct rte_eth_dev *dev) +{ + const struct rte_memzone *mz; + struct vmxnet3_hw *hw = dev->data->dev_private; + size_t size; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->nb_rx_queues > UINT8_MAX || + dev->data->nb_tx_queues > UINT8_MAX) + return -EINVAL; + + size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + + dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc); + + if (size > UINT16_MAX) + return -EINVAL; + + hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues; + hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues; + + /* + * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead + * on current socket + */ + mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared), + "shared", rte_socket_id(), 8); + + if (mz == NULL) { + PMD_INIT_LOG(ERR, "ERROR: Creating shared zone"); + return -ENOMEM; + } + memset(mz->addr, 0, mz->len); + + hw->shared = mz->addr; + hw->sharedPA = mz->phys_addr; + + /* + * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc + * on current socket + */ + mz = gpa_zone_reserve(dev, size, "queuedesc", + rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN); + if (mz == NULL) { + PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone"); + return -ENOMEM; + } + memset(mz->addr, 0, mz->len); + + hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr; + hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues); + + hw->queueDescPA = mz->phys_addr; + hw->queue_desc_len = (uint16_t)size; + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { + + /* Allocate memory structure for UPT1_RSSConf and configure */ + mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf", + rte_socket_id(), RTE_CACHE_LINE_SIZE); + if (mz == NULL) { + PMD_INIT_LOG(ERR, + "ERROR: Creating rss_conf structure zone"); + return -ENOMEM; + } + memset(mz->addr, 0, mz->len); + + hw->rss_conf = mz->addr; + hw->rss_confPA = mz->phys_addr; + } + + return 0; +} + +static void +vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr) +{ + uint32_t val; + + PMD_INIT_LOG(DEBUG, + "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x", + addr[0], addr[1], addr[2], + addr[3], addr[4], addr[5]); + + val = *(const uint32_t *)addr; + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val); + + val = (addr[5] << 8) | addr[4]; + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val); +} + +static int +vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) +{ + struct rte_eth_conf port_conf = dev->data->dev_conf; + struct vmxnet3_hw *hw = dev->data->dev_private; + uint32_t mtu = dev->data->mtu; + Vmxnet3_DriverShared *shared = hw->shared; + Vmxnet3_DSDevRead *devRead = &shared->devRead; + uint32_t i; + int ret; + + shared->magic = VMXNET3_REV1_MAGIC; + devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; + + /* Setting up Guest OS information */ + devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ? + VMXNET3_GOS_BITS_32 : + VMXNET3_GOS_BITS_64; + devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; + devRead->misc.driverInfo.vmxnet3RevSpt = 1; + devRead->misc.driverInfo.uptVerSpt = 1; + + devRead->misc.mtu = rte_le_to_cpu_32(mtu); + devRead->misc.queueDescPA = hw->queueDescPA; + devRead->misc.queueDescLen = hw->queue_desc_len; + devRead->misc.numTxQueues = hw->num_tx_queues; + devRead->misc.numRxQueues = hw->num_rx_queues; + + /* + * Set number of interrupts to 1 + * PMD disables all the interrupts but this is MUST to activate device + * It needs at least one interrupt for link events to handle + * So we'll disable it later after device activation if needed + */ + devRead->intrConf.numIntrs = 1; + devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL; + + for (i = 0; i < hw->num_tx_queues; i++) { + Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i]; + vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i]; + + tqd->ctrl.txNumDeferred = 0; + tqd->ctrl.txThreshold = 1; + tqd->conf.txRingBasePA = txq->cmd_ring.basePA; + tqd->conf.compRingBasePA = txq->comp_ring.basePA; + tqd->conf.dataRingBasePA = txq->data_ring.basePA; + + tqd->conf.txRingSize = txq->cmd_ring.size; + tqd->conf.compRingSize = txq->comp_ring.size; + tqd->conf.dataRingSize = txq->data_ring.size; + tqd->conf.intrIdx = txq->comp_ring.intr_idx; + tqd->status.stopped = TRUE; + tqd->status.error = 0; + memset(&tqd->stats, 0, sizeof(tqd->stats)); + } + + for (i = 0; i < hw->num_rx_queues; i++) { + Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i]; + vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i]; + + rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA; + rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA; + rqd->conf.compRingBasePA = rxq->comp_ring.basePA; + + rqd->conf.rxRingSize[0] = rxq->cmd_ring[0].size; + rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size; + rqd->conf.compRingSize = rxq->comp_ring.size; + rqd->conf.intrIdx = rxq->comp_ring.intr_idx; + rqd->status.stopped = TRUE; + rqd->status.error = 0; + memset(&rqd->stats, 0, sizeof(rqd->stats)); + } + + /* RxMode set to 0 of VMXNET3_RXM_xxx */ + devRead->rxFilterConf.rxMode = 0; + + /* Setting up feature flags */ + if (dev->data->dev_conf.rxmode.hw_ip_checksum) + devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM; + + if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { + ret = vmxnet3_rss_configure(dev); + if (ret != VMXNET3_SUCCESS) + return ret; + + devRead->misc.uptFeatures |= VMXNET3_F_RSS; + devRead->rssConfDesc.confVer = 1; + devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf); + devRead->rssConfDesc.confPA = hw->rss_confPA; + } + + vmxnet3_dev_vlan_offload_set(dev, + ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK); + + vmxnet3_write_mac(hw, hw->perm_addr); + + return VMXNET3_SUCCESS; +} + +/* + * Configure device link speed and setup link. + * Must be called after eth_vmxnet3_dev_init. Other wise it might fail + * It returns 0 on success. + */ +static int +vmxnet3_dev_start(struct rte_eth_dev *dev) +{ + int status, ret; + struct vmxnet3_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + ret = vmxnet3_setup_driver_shared(dev); + if (ret != VMXNET3_SUCCESS) + return ret; + + /* Exchange shared data with device */ + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, + VMXNET3_GET_ADDR_LO(hw->sharedPA)); + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, + VMXNET3_GET_ADDR_HI(hw->sharedPA)); + + /* Activate device by register write */ + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV); + status = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); + + if (status != 0) { + PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL"); + return -1; + } + + /* Disable interrupts */ + vmxnet3_disable_intr(hw); + + /* + * Load RX queues with blank mbufs and update next2fill index for device + * Update RxMode of the device + */ + ret = vmxnet3_dev_rxtx_init(dev); + if (ret != VMXNET3_SUCCESS) { + PMD_INIT_LOG(ERR, "Device receive init: UNSUCCESSFUL"); + return ret; + } + + /* Setting proper Rx Mode and issue Rx Mode Update command */ + vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1); + + /* + * Don't need to handle events for now + */ +#if PROCESS_SYS_EVENTS == 1 + events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR); + PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events); + vmxnet3_process_events(hw); +#endif + return status; +} + +/* + * Stop device: disable rx and tx functions to allow for reconfiguring. + */ +static void +vmxnet3_dev_stop(struct rte_eth_dev *dev) +{ + struct rte_eth_link link; + struct vmxnet3_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (hw->adapter_stopped == 1) { + PMD_INIT_LOG(DEBUG, "Device already closed."); + return; + } + + /* disable interrupts */ + vmxnet3_disable_intr(hw); + + /* quiesce the device first */ + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0); + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0); + + /* reset the device */ + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); + PMD_INIT_LOG(DEBUG, "Device reset."); + hw->adapter_stopped = 0; + + vmxnet3_dev_clear_queues(dev); + + /* Clear recorded link status */ + memset(&link, 0, sizeof(link)); + vmxnet3_dev_atomic_write_link_status(dev, &link); +} + +/* + * Reset and stop device. + */ +static void +vmxnet3_dev_close(struct rte_eth_dev *dev) +{ + struct vmxnet3_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + vmxnet3_dev_stop(dev); + hw->adapter_stopped = 1; +} + +static void +vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + unsigned int i; + struct vmxnet3_hw *hw = dev->data->dev_private; + + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); + + RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES); + for (i = 0; i < hw->num_tx_queues; i++) { + struct UPT1_TxStats *txStats = &hw->tqd_start[i].stats; + + stats->q_opackets[i] = txStats->ucastPktsTxOK + + txStats->mcastPktsTxOK + + txStats->bcastPktsTxOK; + stats->q_obytes[i] = txStats->ucastBytesTxOK + + txStats->mcastBytesTxOK + + txStats->bcastBytesTxOK; + + stats->opackets += stats->q_opackets[i]; + stats->obytes += stats->q_obytes[i]; + stats->oerrors += txStats->pktsTxError + + txStats->pktsTxDiscard; + } + + RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES); + for (i = 0; i < hw->num_rx_queues; i++) { + struct UPT1_RxStats *rxStats = &hw->rqd_start[i].stats; + + stats->q_ipackets[i] = rxStats->ucastPktsRxOK + + rxStats->mcastPktsRxOK + + rxStats->bcastPktsRxOK; + + stats->q_ibytes[i] = rxStats->ucastBytesRxOK + + rxStats->mcastBytesRxOK + + rxStats->bcastBytesRxOK; + + stats->ipackets += stats->q_ipackets[i]; + stats->ibytes += stats->q_ibytes[i]; + + stats->q_errors[i] = rxStats->pktsRxError; + stats->ierrors += rxStats->pktsRxError; + stats->imcasts += rxStats->mcastPktsRxOK; + stats->rx_nombuf += rxStats->pktsRxOutOfBuf; + } +} + +static void +vmxnet3_dev_info_get(__attribute__((unused))struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES; + dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES; + dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM; + dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */ + dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS; + + dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP; + dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = VMXNET3_RX_RING_MAX_SIZE, + .nb_min = VMXNET3_DEF_RX_RING_SIZE, + .nb_align = 1, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = VMXNET3_TX_RING_MAX_SIZE, + .nb_min = VMXNET3_DEF_TX_RING_SIZE, + .nb_align = 1, + }; + + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; +} + +static const uint32_t * +vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == vmxnet3_recv_pkts) + return ptypes; + return NULL; +} + +static void +vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) +{ + struct vmxnet3_hw *hw = dev->data->dev_private; + + vmxnet3_write_mac(hw, mac_addr->addr_bytes); +} + +/* return 0 means link status changed, -1 means not changed */ +static int +vmxnet3_dev_link_update(struct rte_eth_dev *dev, __attribute__((unused)) int wait_to_complete) +{ + struct vmxnet3_hw *hw = dev->data->dev_private; + struct rte_eth_link old, link; + uint32_t ret; + + if (dev->data->dev_started == 0) + return -1; /* Link status doesn't change for stopped dev */ + + memset(&link, 0, sizeof(link)); + vmxnet3_dev_atomic_read_link_status(dev, &old); + + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); + ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); + + if (ret & 0x1) { + link.link_status = ETH_LINK_UP; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = ETH_SPEED_NUM_10G; + link.link_autoneg = ETH_LINK_SPEED_FIXED; + } + + vmxnet3_dev_atomic_write_link_status(dev, &link); + + return (old.link_status == link.link_status) ? -1 : 0; +} + +/* Updating rxmode through Vmxnet3_DriverShared structure in adapter */ +static void +vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) { + + struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf; + + if (set) + rxConf->rxMode = rxConf->rxMode | feature; + else + rxConf->rxMode = rxConf->rxMode & (~feature); + + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE); +} + +/* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */ +static void +vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct vmxnet3_hw *hw = dev->data->dev_private; + uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable; + + memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE); + vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1); + + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); +} + +/* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */ +static void +vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct vmxnet3_hw *hw = dev->data->dev_private; + uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable; + + memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); + vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0); + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); +} + +/* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */ +static void +vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct vmxnet3_hw *hw = dev->data->dev_private; + + vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1); +} + +/* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */ +static void +vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct vmxnet3_hw *hw = dev->data->dev_private; + + vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0); +} + +/* Enable/disable filter on vlan */ +static int +vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on) +{ + struct vmxnet3_hw *hw = dev->data->dev_private; + struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf; + uint32_t *vf_table = rxConf->vfTable; + + /* save state for restore */ + if (on) + VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid); + else + VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid); + + /* don't change active filter if in promiscuous mode */ + if (rxConf->rxMode & VMXNET3_RXM_PROMISC) + return 0; + + /* set in hardware */ + if (on) + VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid); + else + VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid); + + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); + return 0; +} + +static void +vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct vmxnet3_hw *hw = dev->data->dev_private; + Vmxnet3_DSDevRead *devRead = &hw->shared->devRead; + uint32_t *vf_table = devRead->rxFilterConf.vfTable; + + if (mask & ETH_VLAN_STRIP_MASK) { + if (dev->data->dev_conf.rxmode.hw_vlan_strip) + devRead->misc.uptFeatures |= UPT1_F_RXVLAN; + else + devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; + + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_FEATURE); + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); + else + memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE); + + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); + } +} + +#if PROCESS_SYS_EVENTS == 1 +static void +vmxnet3_process_events(struct vmxnet3_hw *hw) +{ + uint32_t events = hw->shared->ecr; + + if (!events) { + PMD_INIT_LOG(ERR, "No events to process"); + return; + } + + /* + * ECR bits when written with 1b are cleared. Hence write + * events back to ECR so that the bits which were set will be reset. + */ + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events); + + /* Check if link state has changed */ + if (events & VMXNET3_ECR_LINK) + PMD_INIT_LOG(ERR, + "Process events in %s(): VMXNET3_ECR_LINK event", __func__); + + /* Check if there is an error on xmit/recv queues */ + if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS); + + if (hw->tqd_start->status.stopped) + PMD_INIT_LOG(ERR, "tq error 0x%x", + hw->tqd_start->status.error); + + if (hw->rqd_start->status.stopped) + PMD_INIT_LOG(ERR, "rq error 0x%x", + hw->rqd_start->status.error); + + /* Reset the device */ + /* Have to reset the device */ + } + + if (events & VMXNET3_ECR_DIC) + PMD_INIT_LOG(ERR, "Device implementation change event."); + + if (events & VMXNET3_ECR_DEBUG) + PMD_INIT_LOG(ERR, "Debug event generated by device."); + +} +#endif + +static struct rte_driver rte_vmxnet3_driver = { + .type = PMD_PDEV, + .init = rte_vmxnet3_pmd_init, +}; + +PMD_REGISTER_DRIVER(rte_vmxnet3_driver); diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h new file mode 100644 index 00000000..4f9d0bd2 --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h @@ -0,0 +1,185 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VMXNET3_ETHDEV_H_ +#define _VMXNET3_ETHDEV_H_ + +#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER +#define VMXNET3_ASSERT(x) do { \ + if (!(x)) rte_panic("VMXNET3: %s\n", #x); \ +} while(0) +#else +#define VMXNET3_ASSERT(x) do { (void)(x); } while (0) +#endif + +#define VMXNET3_MAX_MAC_ADDRS 1 + +/* UPT feature to negotiate */ +#define VMXNET3_F_RXCSUM 0x0001 +#define VMXNET3_F_RSS 0x0002 +#define VMXNET3_F_RXVLAN 0x0004 +#define VMXNET3_F_LRO 0x0008 + +/* Hash Types supported by device */ +#define VMXNET3_RSS_HASH_TYPE_NONE 0x0 +#define VMXNET3_RSS_HASH_TYPE_IPV4 0x01 +#define VMXNET3_RSS_HASH_TYPE_TCP_IPV4 0x02 +#define VMXNET3_RSS_HASH_TYPE_IPV6 0x04 +#define VMXNET3_RSS_HASH_TYPE_TCP_IPV6 0x08 + +#define VMXNET3_RSS_HASH_FUNC_NONE 0x0 +#define VMXNET3_RSS_HASH_FUNC_TOEPLITZ 0x01 + +#define VMXNET3_RSS_MAX_KEY_SIZE 40 +#define VMXNET3_RSS_MAX_IND_TABLE_SIZE 128 + +#define VMXNET3_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP) + +/* RSS configuration structure - shared with device through GPA */ +typedef +struct VMXNET3_RSSConf { + uint16_t hashType; + uint16_t hashFunc; + uint16_t hashKeySize; + uint16_t indTableSize; + uint8_t hashKey[VMXNET3_RSS_MAX_KEY_SIZE]; + /* + * indTable is only element that can be changed without + * device quiesce-reset-update-activation cycle + */ + uint8_t indTable[VMXNET3_RSS_MAX_IND_TABLE_SIZE]; +} VMXNET3_RSSConf; + +typedef +struct vmxnet3_mf_table { + void *mfTableBase; /* Multicast addresses list */ + uint64_t mfTablePA; /* Physical address of the list */ + uint16_t num_addrs; /* number of multicast addrs */ +} vmxnet3_mf_table_t; + +struct vmxnet3_hw { + + uint8_t *hw_addr0; /* BAR0: PT-Passthrough Regs */ + uint8_t *hw_addr1; /* BAR1: VD-Virtual Device Regs */ + /* BAR2: MSI-X Regs */ + /* BAR3: Port IO */ + void *back; + + uint16_t device_id; + uint16_t vendor_id; + uint16_t subsystem_device_id; + uint16_t subsystem_vendor_id; + bool adapter_stopped; + + uint8_t perm_addr[ETHER_ADDR_LEN]; + uint8_t num_tx_queues; + uint8_t num_rx_queues; + uint8_t bufs_per_pkt; + + Vmxnet3_TxQueueDesc *tqd_start; /* start address of all tx queue desc */ + Vmxnet3_RxQueueDesc *rqd_start; /* start address of all rx queue desc */ + + Vmxnet3_DriverShared *shared; + uint64_t sharedPA; + + uint64_t queueDescPA; + uint16_t queue_desc_len; + + VMXNET3_RSSConf *rss_conf; + uint64_t rss_confPA; + vmxnet3_mf_table_t *mf_table; + uint32_t shadow_vfta[VMXNET3_VFT_SIZE]; +#define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t)) +}; + +#define VMXNET3_GET_ADDR_LO(reg) ((uint32_t)(reg)) +#define VMXNET3_GET_ADDR_HI(reg) ((uint32_t)(((uint64_t)(reg)) >> 32)) + +/* Config space read/writes */ + +#define VMXNET3_PCI_REG(reg) (*((volatile uint32_t *)(reg))) + +static inline uint32_t vmxnet3_read_addr(volatile void *addr) +{ + return VMXNET3_PCI_REG(addr); +} + +#define VMXNET3_PCI_REG_WRITE(reg, value) do { \ + VMXNET3_PCI_REG((reg)) = (value); \ +} while(0) + +#define VMXNET3_PCI_BAR0_REG_ADDR(hw, reg) \ + ((volatile uint32_t *)((char *)(hw)->hw_addr0 + (reg))) +#define VMXNET3_READ_BAR0_REG(hw, reg) \ + vmxnet3_read_addr(VMXNET3_PCI_BAR0_REG_ADDR((hw), (reg))) +#define VMXNET3_WRITE_BAR0_REG(hw, reg, value) \ + VMXNET3_PCI_REG_WRITE(VMXNET3_PCI_BAR0_REG_ADDR((hw), (reg)), (value)) + +#define VMXNET3_PCI_BAR1_REG_ADDR(hw, reg) \ + ((volatile uint32_t *)((char *)(hw)->hw_addr1 + (reg))) +#define VMXNET3_READ_BAR1_REG(hw, reg) \ + vmxnet3_read_addr(VMXNET3_PCI_BAR1_REG_ADDR((hw), (reg))) +#define VMXNET3_WRITE_BAR1_REG(hw, reg, value) \ + VMXNET3_PCI_REG_WRITE(VMXNET3_PCI_BAR1_REG_ADDR((hw), (reg)), (value)) + +/* + * RX/TX function prototypes + */ + +void vmxnet3_dev_clear_queues(struct rte_eth_dev *dev); + +void vmxnet3_dev_rx_queue_release(void *rxq); +void vmxnet3_dev_tx_queue_release(void *txq); + +int vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); +int vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +int vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev); + +int vmxnet3_rss_configure(struct rte_eth_dev *dev); + +uint16_t vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +#endif /* _VMXNET3_ETHDEV_H_ */ diff --git a/drivers/net/vmxnet3/vmxnet3_logs.h b/drivers/net/vmxnet3/vmxnet3_logs.h new file mode 100644 index 00000000..82639a08 --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_logs.h @@ -0,0 +1,74 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VMXNET3_LOGS_H_ +#define _VMXNET3_LOGS_H_ + +#ifdef RTE_LIBRTE_VMXNET3_DEBUG_INIT +#define PMD_INIT_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +#else +#define PMD_INIT_LOG(level, fmt, args...) do { } while(0) +#define PMD_INIT_FUNC_TRACE() do { } while(0) +#endif + +#ifdef RTE_LIBRTE_VMXNET3_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_VMXNET3_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER +#define PMD_DRV_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_DRV_LOG(level, fmt, args...) do { } while(0) +#endif + +#endif /* _VMXNET3_LOGS_H_ */ diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h new file mode 100644 index 00000000..69ff2ded --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_ring.h @@ -0,0 +1,169 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VMXNET3_RING_H_ +#define _VMXNET3_RING_H_ + +#define VMXNET3_RX_CMDRING_SIZE 2 + +#define VMXNET3_DRIVER_VERSION_NUM 0x01012000 + +/* Default ring size */ +#define VMXNET3_DEF_TX_RING_SIZE 512 +#define VMXNET3_DEF_RX_RING_SIZE 128 + +#define VMXNET3_SUCCESS 0 +#define VMXNET3_FAIL -1 + +#define TRUE 1 +#define FALSE 0 + + +typedef struct vmxnet3_buf_info { + uint16_t len; + struct rte_mbuf *m; + uint64_t bufPA; +} vmxnet3_buf_info_t; + +typedef struct vmxnet3_cmd_ring { + vmxnet3_buf_info_t *buf_info; + uint32_t size; + uint32_t next2fill; + uint32_t next2comp; + uint8_t gen; + uint8_t rid; + Vmxnet3_GenericDesc *base; + uint64_t basePA; +} vmxnet3_cmd_ring_t; + +static inline void +vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring) +{ + ring->next2fill++; + if (unlikely(ring->next2fill == ring->size)) { + ring->next2fill = 0; + ring->gen = (uint8_t)(ring->gen ^ 1); + } +} + +static inline void +vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring) +{ + VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size); +} + +static inline uint32_t +vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring) +{ + return (ring->next2comp > ring->next2fill ? 0 : ring->size) + + ring->next2comp - ring->next2fill - 1; +} + +static inline bool +vmxnet3_cmd_ring_desc_empty(struct vmxnet3_cmd_ring *ring) +{ + return ring->next2comp == ring->next2fill; +} + +typedef struct vmxnet3_comp_ring { + uint32_t size; + uint32_t next2proc; + uint8_t gen; + uint8_t intr_idx; + Vmxnet3_GenericDesc *base; + uint64_t basePA; +} vmxnet3_comp_ring_t; + +struct vmxnet3_data_ring { + struct Vmxnet3_TxDataDesc *base; + uint32_t size; + uint64_t basePA; +}; + +static inline void +vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring) +{ + ring->next2proc++; + if (unlikely(ring->next2proc == ring->size)) { + ring->next2proc = 0; + ring->gen = (uint8_t)(ring->gen ^ 1); + } +} + +struct vmxnet3_txq_stats { + uint64_t drop_total; /* # of pkts dropped by the driver, + * the counters below track droppings due to + * different reasons + */ + uint64_t drop_too_many_segs; + uint64_t drop_tso; + uint64_t tx_ring_full; +}; + +typedef struct vmxnet3_tx_queue { + struct vmxnet3_hw *hw; + struct vmxnet3_cmd_ring cmd_ring; + struct vmxnet3_comp_ring comp_ring; + struct vmxnet3_data_ring data_ring; + uint32_t qid; + struct Vmxnet3_TxQueueDesc *shared; + struct vmxnet3_txq_stats stats; + bool stopped; + uint16_t queue_id; /**< Device TX queue index. */ + uint8_t port_id; /**< Device port identifier. */ +} vmxnet3_tx_queue_t; + +struct vmxnet3_rxq_stats { + uint64_t drop_total; + uint64_t drop_err; + uint64_t drop_fcs; + uint64_t rx_buf_alloc_failure; +}; + +typedef struct vmxnet3_rx_queue { + struct rte_mempool *mp; + struct vmxnet3_hw *hw; + struct vmxnet3_cmd_ring cmd_ring[VMXNET3_RX_CMDRING_SIZE]; + struct vmxnet3_comp_ring comp_ring; + uint32_t qid1; + uint32_t qid2; + Vmxnet3_RxQueueDesc *shared; + struct rte_mbuf *start_seg; + struct rte_mbuf *last_seg; + struct vmxnet3_rxq_stats stats; + bool stopped; + uint16_t queue_id; /**< Device RX queue index. */ + uint8_t port_id; /**< Device port identifier. */ +} vmxnet3_rx_queue_t; + +#endif /* _VMXNET3_RING_H_ */ diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c new file mode 100644 index 00000000..4ac0456c --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -0,0 +1,1105 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> +#include <stdint.h> +#include <stdarg.h> +#include <unistd.h> +#include <inttypes.h> + +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_interrupts.h> +#include <rte_pci.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_ring.h> +#include <rte_mempool.h> +#include <rte_malloc.h> +#include <rte_mbuf.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_prefetch.h> +#include <rte_ip.h> +#include <rte_udp.h> +#include <rte_tcp.h> +#include <rte_sctp.h> +#include <rte_string_fns.h> +#include <rte_errno.h> + +#include "base/vmxnet3_defs.h" +#include "vmxnet3_ring.h" + +#include "vmxnet3_logs.h" +#include "vmxnet3_ethdev.h" + +static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2}; + +static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t); +static void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *); +#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED +static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *); +static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *); +#endif + +static struct rte_mbuf * +rte_rxmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + m = __rte_mbuf_raw_alloc(mp); + __rte_mbuf_sanity_check_raw(m, 0); + return m; +} + +#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED +static void +vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq) +{ + uint32_t avail = 0; + + if (rxq == NULL) + return; + + PMD_RX_LOG(DEBUG, + "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.", + rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base); + PMD_RX_LOG(DEBUG, + "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.", + (unsigned long)rxq->cmd_ring[0].basePA, + (unsigned long)rxq->cmd_ring[1].basePA, + (unsigned long)rxq->comp_ring.basePA); + + avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]); + PMD_RX_LOG(DEBUG, + "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u", + (uint32_t)rxq->cmd_ring[0].size, avail, + rxq->comp_ring.next2proc, + rxq->cmd_ring[0].size - avail); + + avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]); + PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u", + (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc, + rxq->cmd_ring[1].size - avail); + +} + +static void +vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq) +{ + uint32_t avail = 0; + + if (txq == NULL) + return; + + PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p data ring base : 0x%p.", + txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base); + PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.", + (unsigned long)txq->cmd_ring.basePA, + (unsigned long)txq->comp_ring.basePA, + (unsigned long)txq->data_ring.basePA); + + avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring); + PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u", + (uint32_t)txq->cmd_ring.size, avail, + txq->comp_ring.next2proc, txq->cmd_ring.size - avail); +} +#endif + +static void +vmxnet3_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring) +{ + while (ring->next2comp != ring->next2fill) { + /* No need to worry about tx desc ownership, device is quiesced by now. */ + vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp; + + if (buf_info->m) { + rte_pktmbuf_free(buf_info->m); + buf_info->m = NULL; + buf_info->bufPA = 0; + buf_info->len = 0; + } + vmxnet3_cmd_ring_adv_next2comp(ring); + } +} + +static void +vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring) +{ + vmxnet3_cmd_ring_release_mbufs(ring); + rte_free(ring->buf_info); + ring->buf_info = NULL; +} + + +void +vmxnet3_dev_tx_queue_release(void *txq) +{ + vmxnet3_tx_queue_t *tq = txq; + + if (tq != NULL) { + /* Release the cmd_ring */ + vmxnet3_cmd_ring_release(&tq->cmd_ring); + } +} + +void +vmxnet3_dev_rx_queue_release(void *rxq) +{ + int i; + vmxnet3_rx_queue_t *rq = rxq; + + if (rq != NULL) { + /* Release both the cmd_rings */ + for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) + vmxnet3_cmd_ring_release(&rq->cmd_ring[i]); + } +} + +static void +vmxnet3_dev_tx_queue_reset(void *txq) +{ + vmxnet3_tx_queue_t *tq = txq; + struct vmxnet3_cmd_ring *ring = &tq->cmd_ring; + struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring; + struct vmxnet3_data_ring *data_ring = &tq->data_ring; + int size; + + if (tq != NULL) { + /* Release the cmd_ring mbufs */ + vmxnet3_cmd_ring_release_mbufs(&tq->cmd_ring); + } + + /* Tx vmxnet rings structure initialization*/ + ring->next2fill = 0; + ring->next2comp = 0; + ring->gen = VMXNET3_INIT_GEN; + comp_ring->next2proc = 0; + comp_ring->gen = VMXNET3_INIT_GEN; + + size = sizeof(struct Vmxnet3_TxDesc) * ring->size; + size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size; + size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size; + + memset(ring->base, 0, size); +} + +static void +vmxnet3_dev_rx_queue_reset(void *rxq) +{ + int i; + vmxnet3_rx_queue_t *rq = rxq; + struct vmxnet3_cmd_ring *ring0, *ring1; + struct vmxnet3_comp_ring *comp_ring; + int size; + + if (rq != NULL) { + /* Release both the cmd_rings mbufs */ + for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) + vmxnet3_cmd_ring_release_mbufs(&rq->cmd_ring[i]); + } + + ring0 = &rq->cmd_ring[0]; + ring1 = &rq->cmd_ring[1]; + comp_ring = &rq->comp_ring; + + /* Rx vmxnet rings structure initialization */ + ring0->next2fill = 0; + ring1->next2fill = 0; + ring0->next2comp = 0; + ring1->next2comp = 0; + ring0->gen = VMXNET3_INIT_GEN; + ring1->gen = VMXNET3_INIT_GEN; + comp_ring->next2proc = 0; + comp_ring->gen = VMXNET3_INIT_GEN; + + size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size); + size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size; + + memset(ring0->base, 0, size); +} + +void +vmxnet3_dev_clear_queues(struct rte_eth_dev *dev) +{ + unsigned i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i]; + + if (txq != NULL) { + txq->stopped = TRUE; + vmxnet3_dev_tx_queue_reset(txq); + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i]; + + if (rxq != NULL) { + rxq->stopped = TRUE; + vmxnet3_dev_rx_queue_reset(rxq); + } + } +} + +static int +vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq) +{ + int completed = 0; + struct rte_mbuf *mbuf; + + /* Release cmd_ring descriptor and free mbuf */ + VMXNET3_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1); + + mbuf = txq->cmd_ring.buf_info[eop_idx].m; + if (mbuf == NULL) + rte_panic("EOP desc does not point to a valid mbuf"); + rte_pktmbuf_free(mbuf); + + txq->cmd_ring.buf_info[eop_idx].m = NULL; + + while (txq->cmd_ring.next2comp != eop_idx) { + /* no out-of-order completion */ + VMXNET3_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0); + vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring); + completed++; + } + + /* Mark the txd for which tcd was generated as completed */ + vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring); + + return completed + 1; +} + +static void +vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq) +{ + int completed = 0; + vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring; + struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *) + (comp_ring->base + comp_ring->next2proc); + + while (tcd->gen == comp_ring->gen) { + completed += vmxnet3_unmap_pkt(tcd->txdIdx, txq); + + vmxnet3_comp_ring_adv_next2proc(comp_ring); + tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base + + comp_ring->next2proc); + } + + PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed); +} + +uint16_t +vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx; + vmxnet3_tx_queue_t *txq = tx_queue; + struct vmxnet3_hw *hw = txq->hw; + Vmxnet3_TxQueueCtrl *txq_ctrl = &txq->shared->ctrl; + uint32_t deferred = rte_le_to_cpu_32(txq_ctrl->txNumDeferred); + + if (unlikely(txq->stopped)) { + PMD_TX_LOG(DEBUG, "Tx queue is stopped."); + return 0; + } + + /* Free up the comp_descriptors aggressively */ + vmxnet3_tq_tx_complete(txq); + + nb_tx = 0; + while (nb_tx < nb_pkts) { + Vmxnet3_GenericDesc *gdesc; + vmxnet3_buf_info_t *tbi; + uint32_t first2fill, avail, dw2; + struct rte_mbuf *txm = tx_pkts[nb_tx]; + struct rte_mbuf *m_seg = txm; + int copy_size = 0; + bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0; + /* # of descriptors needed for a packet. */ + unsigned count = txm->nb_segs; + + avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring); + if (count > avail) { + /* Is command ring full? */ + if (unlikely(avail == 0)) { + PMD_TX_LOG(DEBUG, "No free ring descriptors"); + txq->stats.tx_ring_full++; + txq->stats.drop_total += (nb_pkts - nb_tx); + break; + } + + /* Command ring is not full but cannot handle the + * multi-segmented packet. Let's try the next packet + * in this case. + */ + PMD_TX_LOG(DEBUG, "Running out of ring descriptors " + "(avail %d needed %d)", avail, count); + txq->stats.drop_total++; + if (tso) + txq->stats.drop_tso++; + rte_pktmbuf_free(txm); + nb_tx++; + continue; + } + + /* Drop non-TSO packet that is excessively fragmented */ + if (unlikely(!tso && count > VMXNET3_MAX_TXD_PER_PKT)) { + PMD_TX_LOG(ERR, "Non-TSO packet cannot occupy more than %d tx " + "descriptors. Packet dropped.", VMXNET3_MAX_TXD_PER_PKT); + txq->stats.drop_too_many_segs++; + txq->stats.drop_total++; + rte_pktmbuf_free(txm); + nb_tx++; + continue; + } + + if (txm->nb_segs == 1 && rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) { + struct Vmxnet3_TxDataDesc *tdd; + + tdd = txq->data_ring.base + txq->cmd_ring.next2fill; + copy_size = rte_pktmbuf_pkt_len(txm); + rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size); + } + + /* use the previous gen bit for the SOP desc */ + dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; + first2fill = txq->cmd_ring.next2fill; + do { + /* Remember the transmit buffer for cleanup */ + tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill; + + /* NB: the following assumes that VMXNET3 maximum + * transmit buffer size (16K) is greater than + * maximum size of mbuf segment size. + */ + gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill; + if (copy_size) + gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA + + txq->cmd_ring.next2fill * + sizeof(struct Vmxnet3_TxDataDesc)); + else + gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg); + + gdesc->dword[2] = dw2 | m_seg->data_len; + gdesc->dword[3] = 0; + + /* move to the next2fill descriptor */ + vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring); + + /* use the right gen for non-SOP desc */ + dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT; + } while ((m_seg = m_seg->next) != NULL); + + /* set the last buf_info for the pkt */ + tbi->m = txm; + /* Update the EOP descriptor */ + gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ; + + /* Add VLAN tag if present */ + gdesc = txq->cmd_ring.base + first2fill; + if (txm->ol_flags & PKT_TX_VLAN_PKT) { + gdesc->txd.ti = 1; + gdesc->txd.tci = txm->vlan_tci; + } + + if (tso) { + uint16_t mss = txm->tso_segsz; + + VMXNET3_ASSERT(mss > 0); + + gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len; + gdesc->txd.om = VMXNET3_OM_TSO; + gdesc->txd.msscof = mss; + + deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss; + } else if (txm->ol_flags & PKT_TX_L4_MASK) { + gdesc->txd.om = VMXNET3_OM_CSUM; + gdesc->txd.hlen = txm->l2_len + txm->l3_len; + + switch (txm->ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum); + break; + case PKT_TX_UDP_CKSUM: + gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum); + break; + default: + PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx", + txm->ol_flags & PKT_TX_L4_MASK); + abort(); + } + deferred++; + } else { + gdesc->txd.hlen = 0; + gdesc->txd.om = VMXNET3_OM_NONE; + gdesc->txd.msscof = 0; + deferred++; + } + + /* flip the GEN bit on the SOP */ + rte_compiler_barrier(); + gdesc->dword[2] ^= VMXNET3_TXD_GEN; + + txq_ctrl->txNumDeferred = rte_cpu_to_le_32(deferred); + nb_tx++; + } + + PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", rte_le_to_cpu_32(txq_ctrl->txThreshold)); + + if (deferred >= rte_le_to_cpu_32(txq_ctrl->txThreshold)) { + txq_ctrl->txNumDeferred = 0; + /* Notify vSwitch that packets are available. */ + VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN), + txq->cmd_ring.next2fill); + } + + return nb_tx; +} + +/* + * Allocates mbufs and clusters. Post rx descriptors with buffer details + * so that device can receive packets in those buffers. + * Ring layout: + * Among the two rings, 1st ring contains buffers of type 0 and type1. + * bufs_per_pkt is set such that for non-LRO cases all the buffers required + * by a frame will fit in 1st ring (1st buf of type0 and rest of type1). + * 2nd ring contains buffers of type 1 alone. Second ring mostly be used + * only for LRO. + * + */ +static int +vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id) +{ + int err = 0; + uint32_t i = 0, val = 0; + struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id]; + + if (ring_id == 0) { + /* Usually: One HEAD type buf per packet + * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ? + * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD; + */ + + /* We use single packet buffer so all heads here */ + val = VMXNET3_RXD_BTYPE_HEAD; + } else { + /* All BODY type buffers for 2nd ring */ + val = VMXNET3_RXD_BTYPE_BODY; + } + + while (vmxnet3_cmd_ring_desc_avail(ring) > 0) { + struct Vmxnet3_RxDesc *rxd; + struct rte_mbuf *mbuf; + vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill]; + + rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill); + + /* Allocate blank mbuf for the current Rx Descriptor */ + mbuf = rte_rxmbuf_alloc(rxq->mp); + if (unlikely(mbuf == NULL)) { + PMD_RX_LOG(ERR, "Error allocating mbuf"); + rxq->stats.rx_buf_alloc_failure++; + err = ENOMEM; + break; + } + + /* + * Load mbuf pointer into buf_info[ring_size] + * buf_info structure is equivalent to cookie for virtio-virtqueue + */ + buf_info->m = mbuf; + buf_info->len = (uint16_t)(mbuf->buf_len - + RTE_PKTMBUF_HEADROOM); + buf_info->bufPA = + rte_mbuf_data_dma_addr_default(mbuf); + + /* Load Rx Descriptor with the buffer's GPA */ + rxd->addr = buf_info->bufPA; + + /* After this point rxd->addr MUST not be NULL */ + rxd->btype = val; + rxd->len = buf_info->len; + /* Flip gen bit at the end to change ownership */ + rxd->gen = ring->gen; + + vmxnet3_cmd_ring_adv_next2fill(ring); + i++; + } + + /* Return error only if no buffers are posted at present */ + if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1)) + return -err; + else + return i; +} + + +/* Receive side checksum and other offloads */ +static void +vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm) +{ + /* Check for hardware stripped VLAN tag */ + if (rcd->ts) { + rxm->ol_flags |= PKT_RX_VLAN_PKT; + rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci); + } + + /* Check for RSS */ + if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) { + rxm->ol_flags |= PKT_RX_RSS_HASH; + rxm->hash.rss = rcd->rssHash; + } + + /* Check packet type, checksum errors, etc. Only support IPv4 for now. */ + if (rcd->v4) { + struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *); + struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1); + + if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr)) + rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT; + else + rxm->packet_type = RTE_PTYPE_L3_IPV4; + + if (!rcd->cnc) { + if (!rcd->ipc) + rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD; + + if ((rcd->tcp || rcd->udp) && !rcd->tuc) + rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD; + } + } +} + +/* + * Process the Rx Completion Ring of given vmxnet3_rx_queue + * for nb_pkts burst and return the number of packets received + */ +uint16_t +vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + uint16_t nb_rx; + uint32_t nb_rxd, idx; + uint8_t ring_idx; + vmxnet3_rx_queue_t *rxq; + Vmxnet3_RxCompDesc *rcd; + vmxnet3_buf_info_t *rbi; + Vmxnet3_RxDesc *rxd; + struct rte_mbuf *rxm = NULL; + struct vmxnet3_hw *hw; + + nb_rx = 0; + ring_idx = 0; + nb_rxd = 0; + idx = 0; + + rxq = rx_queue; + hw = rxq->hw; + + rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd; + + if (unlikely(rxq->stopped)) { + PMD_RX_LOG(DEBUG, "Rx queue is stopped."); + return 0; + } + + while (rcd->gen == rxq->comp_ring.gen) { + if (nb_rx >= nb_pkts) + break; + + idx = rcd->rxdIdx; + ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1); + rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx; + rbi = rxq->cmd_ring[ring_idx].buf_info + idx; + + PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx); + + VMXNET3_ASSERT(rcd->len <= rxd->len); + VMXNET3_ASSERT(rbi->m); + + /* Get the packet buffer pointer from buf_info */ + rxm = rbi->m; + + /* Clear descriptor associated buf_info to be reused */ + rbi->m = NULL; + rbi->bufPA = 0; + + /* Update the index that we received a packet */ + rxq->cmd_ring[ring_idx].next2comp = idx; + + /* For RCD with EOP set, check if there is frame error */ + if (unlikely(rcd->eop && rcd->err)) { + rxq->stats.drop_total++; + rxq->stats.drop_err++; + + if (!rcd->fcs) { + rxq->stats.drop_fcs++; + PMD_RX_LOG(ERR, "Recv packet dropped due to frame err."); + } + PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d", + (int)(rcd - (struct Vmxnet3_RxCompDesc *) + rxq->comp_ring.base), rcd->rxdIdx); + rte_pktmbuf_free_seg(rxm); + goto rcd_done; + } + + + /* Initialize newly received packet buffer */ + rxm->port = rxq->port_id; + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = (uint16_t)rcd->len; + rxm->data_len = (uint16_t)rcd->len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->ol_flags = 0; + rxm->vlan_tci = 0; + + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (rcd->sop) { + VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD); + + if (unlikely(rcd->len == 0)) { + VMXNET3_ASSERT(rcd->eop); + + PMD_RX_LOG(DEBUG, + "Rx buf was skipped. rxring[%d][%d])", + ring_idx, idx); + rte_pktmbuf_free_seg(rxm); + goto rcd_done; + } + + rxq->start_seg = rxm; + vmxnet3_rx_offload(rcd, rxm); + } else { + struct rte_mbuf *start = rxq->start_seg; + + VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY); + + start->pkt_len += rxm->data_len; + start->nb_segs++; + + rxq->last_seg->next = rxm; + } + rxq->last_seg = rxm; + + if (rcd->eop) { + rx_pkts[nb_rx++] = rxq->start_seg; + rxq->start_seg = NULL; + } + +rcd_done: + rxq->cmd_ring[ring_idx].next2comp = idx; + VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size); + + /* It's time to allocate some new buf and renew descriptors */ + vmxnet3_post_rx_bufs(rxq, ring_idx); + if (unlikely(rxq->shared->ctrl.updateRxProd)) { + VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN), + rxq->cmd_ring[ring_idx].next2fill); + } + + /* Advance to the next descriptor in comp_ring */ + vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring); + + rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd; + nb_rxd++; + if (nb_rxd > rxq->cmd_ring[0].size) { + PMD_RX_LOG(ERR, + "Used up quota of receiving packets," + " relinquish control."); + break; + } + } + + return nb_rx; +} + +/* + * Create memzone for device rings. malloc can't be used as the physical address is + * needed. If the memzone is already created, then this function returns a ptr + * to the old one. + */ +static const struct rte_memzone * +ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, + uint16_t queue_id, uint32_t ring_size, int socket_id) +{ + char z_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + + snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + dev->driver->pci_drv.name, ring_name, + dev->data->port_id, queue_id); + + mz = rte_memzone_lookup(z_name); + if (mz) + return mz; + + return rte_memzone_reserve_aligned(z_name, ring_size, + socket_id, 0, VMXNET3_RING_BA_ALIGN); +} + +int +vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + __attribute__((unused)) const struct rte_eth_txconf *tx_conf) +{ + struct vmxnet3_hw *hw = dev->data->dev_private; + const struct rte_memzone *mz; + struct vmxnet3_tx_queue *txq; + struct vmxnet3_cmd_ring *ring; + struct vmxnet3_comp_ring *comp_ring; + struct vmxnet3_data_ring *data_ring; + int size; + + PMD_INIT_FUNC_TRACE(); + + if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) != + ETH_TXQ_FLAGS_NOXSUMSCTP) { + PMD_INIT_LOG(ERR, "SCTP checksum offload not supported"); + return -EINVAL; + } + + txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), RTE_CACHE_LINE_SIZE); + if (txq == NULL) { + PMD_INIT_LOG(ERR, "Can not allocate tx queue structure"); + return -ENOMEM; + } + + txq->queue_id = queue_idx; + txq->port_id = dev->data->port_id; + txq->shared = &hw->tqd_start[queue_idx]; + txq->hw = hw; + txq->qid = queue_idx; + txq->stopped = TRUE; + + ring = &txq->cmd_ring; + comp_ring = &txq->comp_ring; + data_ring = &txq->data_ring; + + /* Tx vmxnet ring length should be between 512-4096 */ + if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) { + PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u", + VMXNET3_DEF_TX_RING_SIZE); + return -EINVAL; + } else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) { + PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u", + VMXNET3_TX_RING_MAX_SIZE); + return -EINVAL; + } else { + ring->size = nb_desc; + ring->size &= ~VMXNET3_RING_SIZE_MASK; + } + comp_ring->size = data_ring->size = ring->size; + + /* Tx vmxnet rings structure initialization*/ + ring->next2fill = 0; + ring->next2comp = 0; + ring->gen = VMXNET3_INIT_GEN; + comp_ring->next2proc = 0; + comp_ring->gen = VMXNET3_INIT_GEN; + + size = sizeof(struct Vmxnet3_TxDesc) * ring->size; + size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size; + size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size; + + mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id); + if (mz == NULL) { + PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone"); + return -ENOMEM; + } + memset(mz->addr, 0, mz->len); + + /* cmd_ring initialization */ + ring->base = mz->addr; + ring->basePA = mz->phys_addr; + + /* comp_ring initialization */ + comp_ring->base = ring->base + ring->size; + comp_ring->basePA = ring->basePA + + (sizeof(struct Vmxnet3_TxDesc) * ring->size); + + /* data_ring initialization */ + data_ring->base = (Vmxnet3_TxDataDesc *)(comp_ring->base + comp_ring->size); + data_ring->basePA = comp_ring->basePA + + (sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size); + + /* cmd_ring0 buf_info allocation */ + ring->buf_info = rte_zmalloc("tx_ring_buf_info", + ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE); + if (ring->buf_info == NULL) { + PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure"); + return -ENOMEM; + } + + /* Update the data portion with txq */ + dev->data->tx_queues[queue_idx] = txq; + + return 0; +} + +int +vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + __attribute__((unused)) const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + const struct rte_memzone *mz; + struct vmxnet3_rx_queue *rxq; + struct vmxnet3_hw *hw = dev->data->dev_private; + struct vmxnet3_cmd_ring *ring0, *ring1, *ring; + struct vmxnet3_comp_ring *comp_ring; + int size; + uint8_t i; + char mem_name[32]; + + PMD_INIT_FUNC_TRACE(); + + rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), RTE_CACHE_LINE_SIZE); + if (rxq == NULL) { + PMD_INIT_LOG(ERR, "Can not allocate rx queue structure"); + return -ENOMEM; + } + + rxq->mp = mp; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + rxq->shared = &hw->rqd_start[queue_idx]; + rxq->hw = hw; + rxq->qid1 = queue_idx; + rxq->qid2 = queue_idx + hw->num_rx_queues; + rxq->stopped = TRUE; + + ring0 = &rxq->cmd_ring[0]; + ring1 = &rxq->cmd_ring[1]; + comp_ring = &rxq->comp_ring; + + /* Rx vmxnet rings length should be between 256-4096 */ + if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) { + PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256"); + return -EINVAL; + } else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) { + PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096"); + return -EINVAL; + } else { + ring0->size = nb_desc; + ring0->size &= ~VMXNET3_RING_SIZE_MASK; + ring1->size = ring0->size; + } + + comp_ring->size = ring0->size + ring1->size; + + /* Rx vmxnet rings structure initialization */ + ring0->next2fill = 0; + ring1->next2fill = 0; + ring0->next2comp = 0; + ring1->next2comp = 0; + ring0->gen = VMXNET3_INIT_GEN; + ring1->gen = VMXNET3_INIT_GEN; + comp_ring->next2proc = 0; + comp_ring->gen = VMXNET3_INIT_GEN; + + size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size); + size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size; + + mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id); + if (mz == NULL) { + PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone"); + return -ENOMEM; + } + memset(mz->addr, 0, mz->len); + + /* cmd_ring0 initialization */ + ring0->base = mz->addr; + ring0->basePA = mz->phys_addr; + + /* cmd_ring1 initialization */ + ring1->base = ring0->base + ring0->size; + ring1->basePA = ring0->basePA + sizeof(struct Vmxnet3_RxDesc) * ring0->size; + + /* comp_ring initialization */ + comp_ring->base = ring1->base + ring1->size; + comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) * + ring1->size; + + /* cmd_ring0-cmd_ring1 buf_info allocation */ + for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) { + + ring = &rxq->cmd_ring[i]; + ring->rid = i; + snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i); + + ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE); + if (ring->buf_info == NULL) { + PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure"); + return -ENOMEM; + } + } + + /* Update the data portion with rxq */ + dev->data->rx_queues[queue_idx] = rxq; + + return 0; +} + +/* + * Initializes Receive Unit + * Load mbufs in rx queue in advance + */ +int +vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev) +{ + struct vmxnet3_hw *hw = dev->data->dev_private; + + int i, ret; + uint8_t j; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < hw->num_rx_queues; i++) { + vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i]; + + for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) { + /* Passing 0 as alloc_num will allocate full ring */ + ret = vmxnet3_post_rx_bufs(rxq, j); + if (ret <= 0) { + PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d", i, j); + return -ret; + } + /* Updating device with the index:next2fill to fill the mbufs for coming packets */ + if (unlikely(rxq->shared->ctrl.updateRxProd)) { + VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN), + rxq->cmd_ring[j].next2fill); + } + } + rxq->stopped = FALSE; + rxq->start_seg = NULL; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i]; + + txq->stopped = FALSE; + } + + return 0; +} + +static uint8_t rss_intel_key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, +}; + +/* + * Configure RSS feature + */ +int +vmxnet3_rss_configure(struct rte_eth_dev *dev) +{ + struct vmxnet3_hw *hw = dev->data->dev_private; + struct VMXNET3_RSSConf *dev_rss_conf; + struct rte_eth_rss_conf *port_rss_conf; + uint64_t rss_hf; + uint8_t i, j; + + PMD_INIT_FUNC_TRACE(); + + dev_rss_conf = hw->rss_conf; + port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf; + + /* loading hashFunc */ + dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ; + /* loading hashKeySize */ + dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE; + /* loading indTableSize : Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/ + dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4); + + if (port_rss_conf->rss_key == NULL) { + /* Default hash key */ + port_rss_conf->rss_key = rss_intel_key; + } + + /* loading hashKey */ + memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key, dev_rss_conf->hashKeySize); + + /* loading indTable */ + for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) { + if (j == dev->data->nb_rx_queues) + j = 0; + dev_rss_conf->indTable[i] = j; + } + + /* loading hashType */ + dev_rss_conf->hashType = 0; + rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL; + if (rss_hf & ETH_RSS_IPV4) + dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4; + if (rss_hf & ETH_RSS_IPV6) + dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6; + if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6; + + return VMXNET3_SUCCESS; +} diff --git a/drivers/net/xenvirt/Makefile b/drivers/net/xenvirt/Makefile new file mode 100644 index 00000000..1d05b71b --- /dev/null +++ b/drivers/net/xenvirt/Makefile @@ -0,0 +1,63 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_xenvirt.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lxenstore + +EXPORT_MAP := rte_eth_xenvirt_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += rte_eth_xenvirt.c rte_mempool_gntalloc.c rte_xen_lib.c + +# +# Export include files +# +SYMLINK-y-include += rte_eth_xenvirt.h + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_net +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_cmdline + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.c b/drivers/net/xenvirt/rte_eth_xenvirt.c new file mode 100644 index 00000000..b9638d96 --- /dev/null +++ b/drivers/net/xenvirt/rte_eth_xenvirt.c @@ -0,0 +1,778 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <unistd.h> +#include <stdlib.h> +#include <string.h> +#include <sys/types.h> +#include <sys/mman.h> +#include <errno.h> +#include <sys/user.h> +#include <linux/binfmts.h> +#include <xen/xen-compat.h> +#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200 +#include <xs.h> +#else +#include <xenstore.h> +#endif +#include <linux/virtio_ring.h> + +#include <rte_mbuf.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> +#include <rte_string_fns.h> +#include <rte_dev.h> +#include <cmdline_parse.h> +#include <cmdline_parse_etheraddr.h> + +#include "rte_xen_lib.h" +#include "virtqueue.h" +#include "rte_eth_xenvirt.h" + +#define VQ_DESC_NUM 256 +#define VIRTIO_MBUF_BURST_SZ 64 + +/* virtio_idx is increased after new device is created.*/ +static int virtio_idx = 0; + +static const char *drivername = "xen virtio PMD"; + +static struct rte_eth_link pmd_link = { + .link_speed = ETH_SPEED_NUM_10G, + .link_duplex = ETH_LINK_FULL_DUPLEX, + .link_status = ETH_LINK_DOWN, + .link_autoneg = ETH_LINK_SPEED_FIXED +}; + +static void +eth_xenvirt_free_queues(struct rte_eth_dev *dev); + +static inline struct rte_mbuf * +rte_rxmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + m = __rte_mbuf_raw_alloc(mp); + __rte_mbuf_sanity_check_raw(m, 0); + + return m; +} + + +static uint16_t +eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct virtqueue *rxvq = q; + struct rte_mbuf *rxm, *new_mbuf; + uint16_t nb_used, num; + uint32_t len[VIRTIO_MBUF_BURST_SZ]; + uint32_t i; + struct pmd_internals *pi = rxvq->internals; + + nb_used = VIRTQUEUE_NUSED(rxvq); + + rte_smp_rmb(); + num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts); + num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ); + if (unlikely(num == 0)) return 0; + + num = virtqueue_dequeue_burst(rxvq, rx_pkts, len, num); + PMD_RX_LOG(DEBUG, "used:%d dequeue:%d\n", nb_used, num); + for (i = 0; i < num ; i ++) { + rxm = rx_pkts[i]; + PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]); + rxm->next = NULL; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr)); + rxm->nb_segs = 1; + rxm->port = pi->port_id; + rxm->pkt_len = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr)); + } + /* allocate new mbuf for the used descriptor */ + while (likely(!virtqueue_full(rxvq))) { + new_mbuf = rte_rxmbuf_alloc(rxvq->mpool); + if (unlikely(new_mbuf == NULL)) { + break; + } + if (unlikely(virtqueue_enqueue_recv_refill(rxvq, new_mbuf))) { + rte_pktmbuf_free_seg(new_mbuf); + break; + } + } + pi->eth_stats.ipackets += num; + return num; +} + +static uint16_t +eth_xenvirt_tx(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct virtqueue *txvq = tx_queue; + struct rte_mbuf *txm; + uint16_t nb_used, nb_tx, num, i; + int error; + uint32_t len[VIRTIO_MBUF_BURST_SZ]; + struct rte_mbuf *snd_pkts[VIRTIO_MBUF_BURST_SZ]; + struct pmd_internals *pi = txvq->internals; + + nb_tx = 0; + + if (unlikely(nb_pkts == 0)) + return 0; + + PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); + nb_used = VIRTQUEUE_NUSED(txvq); + + rte_smp_rmb(); + + num = (uint16_t)(likely(nb_used <= VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ); + num = virtqueue_dequeue_burst(txvq, snd_pkts, len, num); + + for (i = 0; i < num ; i ++) { + /* mergable not supported, one segment only */ + rte_pktmbuf_free_seg(snd_pkts[i]); + } + + while (nb_tx < nb_pkts) { + if (likely(!virtqueue_full(txvq))) { + /* TODO drop tx_pkts if it contains multiple segments */ + txm = tx_pkts[nb_tx]; + error = virtqueue_enqueue_xmit(txvq, txm); + if (unlikely(error)) { + if (error == ENOSPC) + PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0\n"); + else if (error == EMSGSIZE) + PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1\n"); + else + PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d\n", error); + break; + } + nb_tx++; + } else { + PMD_TX_LOG(ERR, "No free tx descriptors to transmit\n"); + /* virtqueue_notify not needed in our para-virt solution */ + break; + } + } + pi->eth_stats.opackets += nb_tx; + return nb_tx; +} + +static int +eth_dev_configure(struct rte_eth_dev *dev __rte_unused) +{ + RTE_LOG(ERR, PMD, "%s\n", __func__); + return 0; +} + +/* + * Create a shared page between guest and host. + * Host monitors this page if it is cleared on unmap, and then + * do necessary clean up. + */ +static void +gntalloc_vring_flag(int vtidx) +{ + char key_str[PATH_MAX]; + char val_str[PATH_MAX]; + uint32_t gref_tmp; + void *ptr; + + if (grefwatch_from_alloc(&gref_tmp, &ptr)) { + RTE_LOG(ERR, PMD, "grefwatch_from_alloc error\n"); + exit(0); + } + + *(uint8_t *)ptr = MAP_FLAG; + snprintf(val_str, sizeof(val_str), "%u", gref_tmp); + snprintf(key_str, sizeof(key_str), + DPDK_XENSTORE_PATH"%d"VRING_FLAG_STR, vtidx); + xenstore_write(key_str, val_str); +} + +/* + * Notify host this virtio device is started. + * Host could start polling this device. + */ +static void +dev_start_notify(int vtidx) +{ + char key_str[PATH_MAX]; + char val_str[PATH_MAX]; + + RTE_LOG(INFO, PMD, "%s: virtio %d is started\n", __func__, vtidx); + gntalloc_vring_flag(vtidx); + + snprintf(key_str, sizeof(key_str), "%s%s%d", + DPDK_XENSTORE_PATH, EVENT_TYPE_START_STR, + vtidx); + snprintf(val_str, sizeof(val_str), "1"); + xenstore_write(key_str, val_str); +} + +/* + * Notify host this virtio device is stopped. + * Host could stop polling this device. + */ +static void +dev_stop_notify(int vtidx) +{ + RTE_SET_USED(vtidx); +} + + +static int +update_mac_address(struct ether_addr *mac_addrs, int vtidx) +{ + char key_str[PATH_MAX]; + char val_str[PATH_MAX]; + int rv; + + if (mac_addrs == NULL) { + RTE_LOG(ERR, PMD, "%s: NULL pointer mac specified\n", __func__); + return -1; + } + rv = snprintf(key_str, sizeof(key_str), + DPDK_XENSTORE_PATH"%d_ether_addr", vtidx); + if (rv == -1) + return rv; + rv = snprintf(val_str, sizeof(val_str), "%02x:%02x:%02x:%02x:%02x:%02x", + mac_addrs->addr_bytes[0], + mac_addrs->addr_bytes[1], + mac_addrs->addr_bytes[2], + mac_addrs->addr_bytes[3], + mac_addrs->addr_bytes[4], + mac_addrs->addr_bytes[5]); + if (rv == -1) + return rv; + if (xenstore_write(key_str, val_str)) + return rv; + return 0; +} + + +static int +eth_dev_start(struct rte_eth_dev *dev) +{ + struct virtqueue *rxvq = dev->data->rx_queues[0]; + struct virtqueue *txvq = dev->data->tx_queues[0]; + struct rte_mbuf *m; + struct pmd_internals *pi = (struct pmd_internals *)dev->data->dev_private; + int rv; + + dev->data->dev_link.link_status = ETH_LINK_UP; + while (!virtqueue_full(rxvq)) { + m = rte_rxmbuf_alloc(rxvq->mpool); + if (m == NULL) + break; + /* Enqueue allocated buffers. */ + if (virtqueue_enqueue_recv_refill(rxvq, m)) { + rte_pktmbuf_free_seg(m); + break; + } + } + + rxvq->internals = pi; + txvq->internals = pi; + + rv = update_mac_address(dev->data->mac_addrs, pi->virtio_idx); + if (rv) + return -1; + dev_start_notify(pi->virtio_idx); + + return 0; +} + +static void +eth_dev_stop(struct rte_eth_dev *dev) +{ + struct pmd_internals *pi = (struct pmd_internals *)dev->data->dev_private; + + dev->data->dev_link.link_status = ETH_LINK_DOWN; + dev_stop_notify(pi->virtio_idx); +} + +/* + * Notify host this virtio device is closed. + * Host could do necessary clean up to this device. + */ +static void +eth_dev_close(struct rte_eth_dev *dev) +{ + eth_xenvirt_free_queues(dev); +} + +static void +eth_dev_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct pmd_internals *internals = dev->data->dev_private; + + RTE_SET_USED(internals); + dev_info->driver_name = drivername; + dev_info->max_mac_addrs = 1; + dev_info->max_rx_pktlen = (uint32_t)2048; + dev_info->max_rx_queues = (uint16_t)1; + dev_info->max_tx_queues = (uint16_t)1; + dev_info->min_rx_bufsize = 0; + dev_info->pci_dev = NULL; +} + +static void +eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct pmd_internals *internals = dev->data->dev_private; + if(stats) + rte_memcpy(stats, &internals->eth_stats, sizeof(*stats)); +} + +static void +eth_stats_reset(struct rte_eth_dev *dev) +{ + struct pmd_internals *internals = dev->data->dev_private; + /* Reset software totals */ + memset(&internals->eth_stats, 0, sizeof(internals->eth_stats)); +} + +static void +eth_queue_release(void *q) +{ + rte_free(q); +} + +static int +eth_link_update(struct rte_eth_dev *dev __rte_unused, + int wait_to_complete __rte_unused) +{ + return 0; +} + +/* + * Create shared vring between guest and host. + * Memory is allocated through grant alloc driver, so it is not physical continuous. + */ +static void * +gntalloc_vring_create(int queue_type, uint32_t size, int vtidx) +{ + char key_str[PATH_MAX] = {0}; + char val_str[PATH_MAX] = {0}; + void *va = NULL; + int pg_size; + uint32_t pg_num; + uint32_t *gref_arr = NULL; + phys_addr_t *pa_arr = NULL; + uint64_t start_index; + int rv; + + pg_size = getpagesize(); + size = RTE_ALIGN_CEIL(size, pg_size); + pg_num = size / pg_size; + + gref_arr = calloc(pg_num, sizeof(gref_arr[0])); + pa_arr = calloc(pg_num, sizeof(pa_arr[0])); + + if (gref_arr == NULL || pa_arr == NULL) { + RTE_LOG(ERR, PMD, "%s: calloc failed\n", __func__); + goto out; + } + + va = gntalloc(size, gref_arr, &start_index); + if (va == NULL) { + RTE_LOG(ERR, PMD, "%s: gntalloc failed\n", __func__); + goto out; + } + + if (get_phys_map(va, pa_arr, pg_num, pg_size)) + goto out; + + /* write in xenstore gref and pfn for each page of vring */ + if (grant_node_create(pg_num, gref_arr, pa_arr, val_str, sizeof(val_str))) { + gntfree(va, size, start_index); + va = NULL; + goto out; + } + + if (queue_type == VTNET_RQ) + rv = snprintf(key_str, sizeof(key_str), DPDK_XENSTORE_PATH"%d"RXVRING_XENSTORE_STR, vtidx); + else + rv = snprintf(key_str, sizeof(key_str), DPDK_XENSTORE_PATH"%d"TXVRING_XENSTORE_STR, vtidx); + if (rv == -1 || xenstore_write(key_str, val_str) == -1) { + gntfree(va, size, start_index); + va = NULL; + } +out: + free(pa_arr); + free(gref_arr); + + return va; +} + + + +static struct virtqueue * +virtio_queue_setup(struct rte_eth_dev *dev, int queue_type) +{ + struct virtqueue *vq = NULL; + uint16_t vq_size = VQ_DESC_NUM; + int i = 0; + char vq_name[VIRTQUEUE_MAX_NAME_SZ]; + size_t size; + struct vring *vr; + + /* Allocate memory for virtqueue. */ + if (queue_type == VTNET_RQ) { + snprintf(vq_name, sizeof(vq_name), "port%d_rvq", + dev->data->port_id); + vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) + + vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE); + if (vq == NULL) { + RTE_LOG(ERR, PMD, "%s: unabled to allocate virtqueue\n", __func__); + return NULL; + } + memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name)); + } else if(queue_type == VTNET_TQ) { + snprintf(vq_name, sizeof(vq_name), "port%d_tvq", + dev->data->port_id); + vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) + + vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE); + if (vq == NULL) { + RTE_LOG(ERR, PMD, "%s: unabled to allocate virtqueue\n", __func__); + return NULL; + } + memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name)); + } + + memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name)); + + vq->vq_alignment = VIRTIO_PCI_VRING_ALIGN; + vq->vq_nentries = vq_size; + vq->vq_free_cnt = vq_size; + /* Calcuate vring size according to virtio spec */ + size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN); + vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN); + /* Allocate memory for virtio vring through gntalloc driver*/ + vq->vq_ring_virt_mem = gntalloc_vring_create(queue_type, vq->vq_ring_size, + ((struct pmd_internals *)dev->data->dev_private)->virtio_idx); + memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size); + vr = &vq->vq_ring; + vring_init(vr, vq_size, vq->vq_ring_virt_mem, vq->vq_alignment); + /* + * Locally maintained last consumed index, this idex trails + * vq_ring.used->idx. + */ + vq->vq_used_cons_idx = 0; + vq->vq_desc_head_idx = 0; + vq->vq_free_cnt = vq->vq_nentries; + memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); + + /* Chain all the descriptors in the ring with an END */ + for (i = 0; i < vq_size - 1; i++) + vr->desc[i].next = (uint16_t)(i + 1); + vr->desc[i].next = VQ_RING_DESC_CHAIN_END; + + return vq; +} + +static int +eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct virtqueue *vq; + vq = dev->data->rx_queues[rx_queue_id] = virtio_queue_setup(dev, VTNET_RQ); + vq->mpool = mb_pool; + return 0; +} + +static int +eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + dev->data->tx_queues[tx_queue_id] = virtio_queue_setup(dev, VTNET_TQ); + return 0; +} + +static void +eth_xenvirt_free_queues(struct rte_eth_dev *dev) +{ + int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + eth_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + eth_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + +static const struct eth_dev_ops ops = { + .dev_start = eth_dev_start, + .dev_stop = eth_dev_stop, + .dev_close = eth_dev_close, + .dev_configure = eth_dev_configure, + .dev_infos_get = eth_dev_info, + .rx_queue_setup = eth_rx_queue_setup, + .tx_queue_setup = eth_tx_queue_setup, + .rx_queue_release = eth_queue_release, + .tx_queue_release = eth_queue_release, + .link_update = eth_link_update, + .stats_get = eth_stats_get, + .stats_reset = eth_stats_reset, +}; + + +static int +rte_eth_xenvirt_parse_args(struct xenvirt_dict *dict, + const char *name, const char *params) +{ + int i; + char *pairs[RTE_ETH_XENVIRT_MAX_ARGS]; + int num_of_pairs; + char *pair[2]; + char *args; + int ret = -1; + + if (params == NULL) + return 0; + + args = rte_zmalloc(NULL, strlen(params) + 1, RTE_CACHE_LINE_SIZE); + if (args == NULL) { + RTE_LOG(ERR, PMD, "Couldn't parse %s device \n", name); + return -1; + } + rte_memcpy(args, params, strlen(params)); + + num_of_pairs = rte_strsplit(args, strnlen(args, MAX_ARG_STRLEN), + pairs, + RTE_ETH_XENVIRT_MAX_ARGS , + RTE_ETH_XENVIRT_PAIRS_DELIM); + + for (i = 0; i < num_of_pairs; i++) { + pair[0] = NULL; + pair[1] = NULL; + rte_strsplit(pairs[i], strnlen(pairs[i], MAX_ARG_STRLEN), + pair, 2, + RTE_ETH_XENVIRT_KEY_VALUE_DELIM); + + if (pair[0] == NULL || pair[1] == NULL || pair[0][0] == 0 + || pair[1][0] == 0) { + RTE_LOG(ERR, PMD, + "Couldn't parse %s device," + "wrong key or value \n", name); + goto err; + } + + if (!strncmp(pair[0], RTE_ETH_XENVIRT_MAC_PARAM, + sizeof(RTE_ETH_XENVIRT_MAC_PARAM))) { + if (cmdline_parse_etheraddr(NULL, + pair[1], + &dict->addr, + sizeof(dict->addr)) < 0) { + RTE_LOG(ERR, PMD, + "Invalid %s device ether address\n", + name); + goto err; + } + + dict->addr_valid = 1; + } + } + + ret = 0; +err: + rte_free(args); + return ret; +} + +enum dev_action { + DEV_CREATE, + DEV_ATTACH +}; + + +static int +eth_dev_xenvirt_create(const char *name, const char *params, + const unsigned numa_node, + enum dev_action action) +{ + struct rte_eth_dev_data *data = NULL; + struct pmd_internals *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + struct xenvirt_dict dict; + + memset(&dict, 0, sizeof(struct xenvirt_dict)); + + RTE_LOG(INFO, PMD, "Creating virtio rings backed ethdev on numa socket %u\n", + numa_node); + RTE_SET_USED(action); + + if (rte_eth_xenvirt_parse_args(&dict, name, params) < 0) { + RTE_LOG(ERR, PMD, "%s: Failed to parse ethdev parameters\n", __func__); + return -1; + } + + /* now do all data allocation - for eth_dev structure, dummy pci driver + * and internal (private) data + */ + data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); + if (data == NULL) + goto err; + + internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node); + if (internals == NULL) + goto err; + + /* reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL); + if (eth_dev == NULL) + goto err; + + data->dev_private = internals; + data->port_id = eth_dev->data->port_id; + data->nb_rx_queues = (uint16_t)1; + data->nb_tx_queues = (uint16_t)1; + data->dev_link = pmd_link; + data->mac_addrs = rte_zmalloc("xen_virtio", ETHER_ADDR_LEN, 0); + + if(dict.addr_valid) + memcpy(&data->mac_addrs->addr_bytes, &dict.addr, sizeof(struct ether_addr)); + else + eth_random_addr(&data->mac_addrs->addr_bytes[0]); + + eth_dev->data = data; + eth_dev->dev_ops = &ops; + + eth_dev->data->dev_flags = RTE_PCI_DRV_DETACHABLE; + eth_dev->data->kdrv = RTE_KDRV_NONE; + eth_dev->data->drv_name = drivername; + eth_dev->driver = NULL; + eth_dev->data->numa_node = numa_node; + + eth_dev->rx_pkt_burst = eth_xenvirt_rx; + eth_dev->tx_pkt_burst = eth_xenvirt_tx; + + internals->virtio_idx = virtio_idx++; + internals->port_id = eth_dev->data->port_id; + + return 0; + +err: + rte_free(data); + rte_free(internals); + + return -1; +} + + +static int +eth_dev_xenvirt_free(const char *name, const unsigned numa_node) +{ + struct rte_eth_dev *eth_dev = NULL; + + RTE_LOG(DEBUG, PMD, + "Free virtio rings backed ethdev on numa socket %u\n", + numa_node); + + /* find an ethdev entry */ + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev == NULL) + return -1; + + if (eth_dev->data->dev_started == 1) { + eth_dev_stop(eth_dev); + eth_dev_close(eth_dev); + } + + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->dev_ops = NULL; + + rte_free(eth_dev->data); + rte_free(eth_dev->data->dev_private); + rte_free(eth_dev->data->mac_addrs); + + virtio_idx--; + + return 0; +} + +/*TODO: Support multiple process model */ +static int +rte_pmd_xenvirt_devinit(const char *name, const char *params) +{ + if (virtio_idx == 0) { + if (xenstore_init() != 0) { + RTE_LOG(ERR, PMD, "%s: xenstore init failed\n", __func__); + return -1; + } + if (gntalloc_open() != 0) { + RTE_LOG(ERR, PMD, "%s: grant init failed\n", __func__); + return -1; + } + } + eth_dev_xenvirt_create(name, params, rte_socket_id(), DEV_CREATE); + return 0; +} + +static int +rte_pmd_xenvirt_devuninit(const char *name) +{ + eth_dev_xenvirt_free(name, rte_socket_id()); + + if (virtio_idx == 0) { + if (xenstore_uninit() != 0) + RTE_LOG(ERR, PMD, "%s: xenstore uninit failed\n", __func__); + + gntalloc_close(); + } + return 0; +} + +static struct rte_driver pmd_xenvirt_drv = { + .name = "eth_xenvirt", + .type = PMD_VDEV, + .init = rte_pmd_xenvirt_devinit, + .uninit = rte_pmd_xenvirt_devuninit, +}; + +PMD_REGISTER_DRIVER(pmd_xenvirt_drv); diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.h b/drivers/net/xenvirt/rte_eth_xenvirt.h new file mode 100644 index 00000000..fc15a636 --- /dev/null +++ b/drivers/net/xenvirt/rte_eth_xenvirt.h @@ -0,0 +1,62 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_ETH_XENVIRT_H_ +#define _RTE_ETH_XENVIRT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include <rte_mempool.h> +#include <rte_ring.h> + +/** + * Creates mempool for xen virtio PMD. + * This function uses memzone_reserve to allocate memory for meta data, + * and uses grant alloc driver to allocate memory for data area. + * The input parameters are exactly the same as rte_mempool_create. + */ +struct rte_mempool * +rte_mempool_gntalloc_create(const char *name, unsigned elt_num, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/xenvirt/rte_eth_xenvirt_version.map b/drivers/net/xenvirt/rte_eth_xenvirt_version.map new file mode 100644 index 00000000..dd636f72 --- /dev/null +++ b/drivers/net/xenvirt/rte_eth_xenvirt_version.map @@ -0,0 +1,7 @@ +DPDK_2.0 { + global: + + rte_mempool_gntalloc_create; + + local: *; +}; diff --git a/drivers/net/xenvirt/rte_mempool_gntalloc.c b/drivers/net/xenvirt/rte_mempool_gntalloc.c new file mode 100644 index 00000000..7bfbfda3 --- /dev/null +++ b/drivers/net/xenvirt/rte_mempool_gntalloc.c @@ -0,0 +1,295 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <unistd.h> +#include <stdlib.h> +#include <sys/mman.h> +#include <sys/ioctl.h> +#include <string.h> +#include <xen/sys/gntalloc.h> + +#include <rte_common.h> +#include <rte_mempool.h> +#include <rte_memory.h> +#include <rte_errno.h> + +#include "rte_xen_lib.h" +#include "rte_eth_xenvirt.h" + +struct _gntarr { + uint32_t gref; + phys_addr_t pa; + uint64_t index; + void *va; +}; + +struct _mempool_gntalloc_info { + struct rte_mempool *mp; + uint32_t pg_num; + uint32_t *gref_arr; + phys_addr_t *pa_arr; + void *va; + uint32_t mempool_idx; + uint64_t start_index; +}; + + +static rte_atomic32_t global_xenvirt_mempool_idx = RTE_ATOMIC32_INIT(-1); + +static int +compare(const void *p1, const void *p2) +{ + return ((const struct _gntarr *)p1)->pa - ((const struct _gntarr *)p2)->pa; +} + + +static struct _mempool_gntalloc_info +_create_mempool(const char *name, unsigned elt_num, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags) +{ + struct _mempool_gntalloc_info mgi; + struct rte_mempool *mp = NULL; + struct rte_mempool_objsz objsz; + uint32_t pg_num, rpg_num, pg_shift, pg_sz; + char *va, *orig_va, *uv; /* uv: from which, the pages could be freed */ + ssize_t sz, usz; /* usz: unused size */ + /* + * for each page allocated through xen_gntalloc driver, + * gref_arr:stores grant references, + * pa_arr: stores physical address, + * gnt_arr: stores all meta dat + */ + uint32_t *gref_arr = NULL; + phys_addr_t *pa_arr = NULL; + struct _gntarr *gnt_arr = NULL; + /* start index of the grant referances, used for dealloc*/ + uint64_t start_index; + uint32_t i, j; + int rv = 0; + struct ioctl_gntalloc_dealloc_gref arg; + + mgi.mp = NULL; + va = orig_va = uv = NULL; + pg_num = rpg_num = 0; + sz = 0; + + pg_sz = getpagesize(); + if (rte_is_power_of_2(pg_sz) == 0) { + goto out; + } + pg_shift = rte_bsf32(pg_sz); + + rte_mempool_calc_obj_size(elt_size, flags, &objsz); + sz = rte_mempool_xmem_size(elt_num, objsz.total_size, pg_shift); + pg_num = sz >> pg_shift; + + pa_arr = calloc(pg_num, sizeof(pa_arr[0])); + gref_arr = calloc(pg_num, sizeof(gref_arr[0])); + gnt_arr = calloc(pg_num, sizeof(gnt_arr[0])); + if ((gnt_arr == NULL) || (gref_arr == NULL) || (pa_arr == NULL)) + goto out; + + /* grant index is continuous in ascending order */ + orig_va = gntalloc(sz, gref_arr, &start_index); + if (orig_va == NULL) + goto out; + + get_phys_map(orig_va, pa_arr, pg_num, pg_sz); + for (i = 0; i < pg_num; i++) { + gnt_arr[i].index = start_index + i * pg_sz; + gnt_arr[i].gref = gref_arr[i]; + gnt_arr[i].pa = pa_arr[i]; + gnt_arr[i].va = RTE_PTR_ADD(orig_va, i * pg_sz); + } + qsort(gnt_arr, pg_num, sizeof(struct _gntarr), compare); + + va = get_xen_virtual(sz, pg_sz); + if (va == NULL) { + goto out; + } + + /* + * map one by one, as index isn't continuous now. + * pg_num VMAs, doesn't linux has a limitation on this? + */ + for (i = 0; i < pg_num; i++) { + /* update gref_arr and pa_arr after sort */ + gref_arr[i] = gnt_arr[i].gref; + pa_arr[i] = gnt_arr[i].pa; + gnt_arr[i].va = mmap(va + i * pg_sz, pg_sz, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_FIXED, gntalloc_fd, gnt_arr[i].index); + if ((gnt_arr[i].va == MAP_FAILED) || (gnt_arr[i].va != (va + i * pg_sz))) { + RTE_LOG(ERR, PMD, "failed to map %d pages\n", i); + goto mmap_failed; + } + } + + /* + * Check that allocated size is big enough to hold elt_num + * objects and a calcualte how many bytes are actually required. + */ + usz = rte_mempool_xmem_usage(va, elt_num, objsz.total_size, pa_arr, pg_num, pg_shift); + if (usz < 0) { + mp = NULL; + i = pg_num; + goto mmap_failed; + } else { + /* unmap unused pages if any */ + uv = RTE_PTR_ADD(va, usz); + if ((usz = va + sz - uv) > 0) { + + RTE_LOG(ERR, PMD, + "%s(%s): unmap unused %zu of %zu " + "mmaped bytes @%p orig:%p\n", + __func__, name, usz, sz, uv, va); + munmap(uv, usz); + i = (sz - usz) / pg_sz; + for (; i < pg_num; i++) { + arg.count = 1; + arg.index = gnt_arr[i].index; + rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg); + if (rv) { + /* shouldn't fail here */ + RTE_LOG(ERR, PMD, "va=%p pa=%"PRIu64"x index=%"PRIu64" %s\n", + gnt_arr[i].va, + gnt_arr[i].pa, + arg.index, strerror(errno)); + rte_panic("gntdealloc failed when freeing pages\n"); + } + } + + rpg_num = (sz - usz) >> pg_shift; + } else + rpg_num = pg_num; + + mp = rte_mempool_xmem_create(name, elt_num, elt_size, + cache_size, private_data_size, + mp_init, mp_init_arg, + obj_init, obj_init_arg, + socket_id, flags, va, pa_arr, rpg_num, pg_shift); + + RTE_VERIFY(elt_num == mp->size); + } + mgi.mp = mp; + mgi.pg_num = rpg_num; + mgi.gref_arr = gref_arr; + mgi.pa_arr = pa_arr; + if (mp) + mgi.mempool_idx = rte_atomic32_add_return(&global_xenvirt_mempool_idx, 1); + mgi.start_index = start_index; + mgi.va = va; + + if (mp == NULL) { + i = pg_num; + goto mmap_failed; + } + +/* + * unmap only, without deallocate grant reference. + * unused pages have already been unmaped, + * unmap twice will fail, but it is safe. + */ +mmap_failed: + for (j = 0; j < i; j++) { + if (gnt_arr[i].va) + munmap(gnt_arr[i].va, pg_sz); + } +out: + free(gnt_arr); + if (orig_va) + munmap(orig_va, sz); + if (mp == NULL) { + free(gref_arr); + free(pa_arr); + + /* some gref has already been de-allocated from the list in the driver, + * so dealloc one by one, and it is safe to deallocate twice + */ + if (orig_va) { + for (i = 0; i < pg_num; i++) { + arg.index = start_index + i * pg_sz; + rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg); + } + } + } + return mgi; +} + +struct rte_mempool * +rte_mempool_gntalloc_create(const char *name, unsigned elt_num, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags) +{ + int rv; + uint32_t i; + struct _mempool_gntalloc_info mgi; + struct ioctl_gntalloc_dealloc_gref arg; + int pg_sz = getpagesize(); + + mgi = _create_mempool(name, elt_num, elt_size, + cache_size, private_data_size, + mp_init, mp_init_arg, + obj_init, obj_init_arg, + socket_id, flags); + if (mgi.mp) { + rv = grant_gntalloc_mbuf_pool(mgi.mp, + mgi.pg_num, + mgi.gref_arr, + mgi.pa_arr, + mgi.mempool_idx); + free(mgi.gref_arr); + free(mgi.pa_arr); + if (rv == 0) + return mgi.mp; + /* + * in _create_mempool, unused pages have already been unmapped, deallocagted + * unmap and dealloc the remained ones here. + */ + munmap(mgi.va, pg_sz * mgi.pg_num); + for (i = 0; i < mgi.pg_num; i++) { + arg.index = mgi.start_index + i * pg_sz; + rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg); + } + return NULL; + } + return NULL; + + + +} diff --git a/drivers/net/xenvirt/rte_xen_lib.c b/drivers/net/xenvirt/rte_xen_lib.c new file mode 100644 index 00000000..de63cd30 --- /dev/null +++ b/drivers/net/xenvirt/rte_xen_lib.c @@ -0,0 +1,446 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <string.h> +#include <sys/types.h> +#include <fcntl.h> +#include <sys/mman.h> +#include <sys/ioctl.h> +#include <xen/xen-compat.h> +#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200 +#include <xs.h> +#else +#include <xenstore.h> +#endif +#include <xen/sys/gntalloc.h> + +#include <rte_common.h> +#include <rte_string_fns.h> +#include <rte_malloc.h> + +#include "rte_xen_lib.h" + +/* + * The grant node format in xenstore for vring/mpool is: + * 0_rx_vring_gref = "gref1#, gref2#, gref3#" + * 0_mempool_gref = "gref1#, gref2#, gref3#" + * each gref# is a grant reference for a shared page. + * In each shared page, we store the grant_node_item items. + */ +struct grant_node_item { + uint32_t gref; + uint32_t pfn; +} __attribute__((packed)); + +/* fd for xen_gntalloc driver, used to allocate grant pages*/ +int gntalloc_fd = -1; + +/* xenstore path for local domain, now it is '/local/domain/domid/' */ +static char *dompath = NULL; +/* handle to xenstore read/write operations */ +static struct xs_handle *xs = NULL; +/* flag to indicate if xenstore cleanup is required */ +static bool is_xenstore_cleaned_up; + +/* + * Reserve a virtual address space. + * On success, returns the pointer. On failure, returns NULL. + */ +void * +get_xen_virtual(size_t size, size_t page_sz) +{ + void *addr; + uintptr_t aligned_addr; + + addr = mmap(NULL, size + page_sz, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (addr == MAP_FAILED) { + RTE_LOG(ERR, PMD, "failed get a virtual area\n"); + return NULL; + } + + aligned_addr = RTE_ALIGN_CEIL((uintptr_t)addr, page_sz); + addr = (void *)(aligned_addr); + + return addr; +} + +/* + * Get the physical address for virtual memory starting at va. + */ +int +get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num, uint32_t pg_sz) +{ + int32_t fd, rc = 0; + uint32_t i, nb; + off_t ofs; + + ofs = (uintptr_t)va / pg_sz * sizeof(*pa); + nb = pg_num * sizeof(*pa); + + if ((fd = open(PAGEMAP_FNAME, O_RDONLY)) < 0 || + (rc = pread(fd, pa, nb, ofs)) < 0 || + (rc -= nb) != 0) { + RTE_LOG(ERR, PMD, "%s: failed read of %u bytes from \'%s\' " + "at offset %lu, error code: %d\n", + __func__, nb, PAGEMAP_FNAME, (unsigned long)ofs, errno); + rc = ENOENT; + } + + close(fd); + for (i = 0; i != pg_num; i++) + pa[i] = (pa[i] & PAGEMAP_PFN_MASK) * pg_sz; + + return rc; +} + +int +gntalloc_open(void) +{ + gntalloc_fd = open(XEN_GNTALLOC_FNAME, O_RDWR); + return (gntalloc_fd != -1) ? 0 : -1; +} + +void +gntalloc_close(void) +{ + if (gntalloc_fd != -1) + close(gntalloc_fd); + gntalloc_fd = -1; +} + +void * +gntalloc(size_t size, uint32_t *gref, uint64_t *start_index) +{ + int page_size = getpagesize(); + uint32_t i, pg_num; + void *va; + int rv; + struct ioctl_gntalloc_alloc_gref *arg; + struct ioctl_gntalloc_dealloc_gref arg_d; + + if (size % page_size) { + RTE_LOG(ERR, PMD, "%s: %zu isn't multiple of page size\n", + __func__, size); + return NULL; + } + + pg_num = size / page_size; + arg = malloc(sizeof(*arg) + (pg_num - 1) * sizeof(uint32_t)); + if (arg == NULL) + return NULL; + arg->domid = DOM0_DOMID; + arg->flags = GNTALLOC_FLAG_WRITABLE; + arg->count = pg_num; + + rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_ALLOC_GREF, arg); + if (rv) { + RTE_LOG(ERR, PMD, "%s: ioctl error\n", __func__); + free(arg); + return NULL; + } + + va = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, gntalloc_fd, arg->index); + if (va == MAP_FAILED) { + RTE_LOG(ERR, PMD, "%s: mmap failed\n", __func__); + arg_d.count = pg_num; + arg_d.index = arg->index; + ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg_d); + free(arg); + return NULL; + } + + if (gref) { + for (i = 0; i < pg_num; i++) { + gref[i] = arg->gref_ids[i]; + } + } + if (start_index) + *start_index = arg->index; + + free(arg); + + return va; +} + +int +grefwatch_from_alloc(uint32_t *gref, void **pptr) +{ + int rv; + void *ptr; + int pg_size = getpagesize(); + struct ioctl_gntalloc_alloc_gref arg = { + .domid = DOM0_DOMID, + .flags = GNTALLOC_FLAG_WRITABLE, + .count = 1 + }; + struct ioctl_gntalloc_dealloc_gref arg_d; + struct ioctl_gntalloc_unmap_notify notify = { + .action = UNMAP_NOTIFY_CLEAR_BYTE + }; + + rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_ALLOC_GREF, &arg); + if (rv) { + RTE_LOG(ERR, PMD, "%s: ioctl error\n", __func__); + return -1; + } + + ptr = (void *)mmap(NULL, pg_size, PROT_READ|PROT_WRITE, MAP_SHARED, gntalloc_fd, arg.index); + arg_d.index = arg.index; + arg_d.count = 1; + if (ptr == MAP_FAILED) { + RTE_LOG(ERR, PMD, "%s: mmap failed\n", __func__); + ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d); + return -1; + } + if (pptr) + *pptr = ptr; + if (gref) + *gref = arg.gref_ids[0]; + + notify.index = arg.index; + rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_SET_UNMAP_NOTIFY, ¬ify); + if (rv) { + RTE_LOG(ERR, PMD, "%s: unmap notify failed\n", __func__); + munmap(ptr, pg_size); + ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d); + return -1; + } + + return 0; +} + +void +gntfree(void *va, size_t sz, uint64_t start_index) +{ + struct ioctl_gntalloc_dealloc_gref arg_d; + + if (va && sz) { + munmap(va, sz); + arg_d.count = sz / getpagesize(); + arg_d.index = start_index; + ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d); + } +} + +static int +xenstore_cleanup(void) +{ + char store_path[PATH_MAX] = {0}; + + if (snprintf(store_path, sizeof(store_path), + "%s%s", dompath, DPDK_XENSTORE_NODE) == -1) + return -1; + + if (xs_rm(xs, XBT_NULL, store_path) == false) { + RTE_LOG(ERR, PMD, "%s: failed cleanup node\n", __func__); + return -1; + } + + return 0; +} + +int +xenstore_init(void) +{ + unsigned int len, domid; + char *buf; + char *end; + + xs = xs_domain_open(); + if (xs == NULL) { + RTE_LOG(ERR, PMD,"%s: xs_domain_open failed\n", __func__); + return -1; + } + buf = xs_read(xs, XBT_NULL, "domid", &len); + if (buf == NULL) { + RTE_LOG(ERR, PMD, "%s: failed read domid\n", __func__); + return -1; + } + errno = 0; + domid = strtoul(buf, &end, 0); + if (errno != 0 || end == NULL || end == buf || domid == 0) + return -1; + + RTE_LOG(INFO, PMD, "retrieved dom ID = %d\n", domid); + + dompath = xs_get_domain_path(xs, domid); + if (dompath == NULL) + return -1; + + xs_transaction_start(xs); /* When to stop transaction */ + + if (is_xenstore_cleaned_up == 0) { + if (xenstore_cleanup()) + return -1; + is_xenstore_cleaned_up = 1; + } + + return 0; +} + +int +xenstore_uninit(void) +{ + xs_close(xs); + + if (is_xenstore_cleaned_up == 0) { + if (xenstore_cleanup()) + return -1; + is_xenstore_cleaned_up = 1; + } + free(dompath); + dompath = NULL; + + return 0; +} + +int +xenstore_write(const char *key_str, const char *val_str) +{ + char grant_path[PATH_MAX]; + int rv, len; + + if (xs == NULL) { + RTE_LOG(ERR, PMD, "%s: xenstore init failed\n", __func__); + return -1; + } + rv = snprintf(grant_path, sizeof(grant_path), "%s%s", dompath, key_str); + if (rv == -1) { + RTE_LOG(ERR, PMD, "%s: snprintf %s %s failed\n", + __func__, dompath, key_str); + return -1; + } + len = strnlen(val_str, PATH_MAX); + + if (xs_write(xs, XBT_NULL, grant_path, val_str, len) == false) { + RTE_LOG(ERR, PMD, "%s: xs_write failed\n", __func__); + return -1; + } + + return 0; +} + +int +grant_node_create(uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, char *val_str, size_t str_size) +{ + uint64_t start_index; + int pg_size; + uint32_t pg_shift; + void *ptr = NULL; + uint32_t count, entries_per_pg; + uint32_t i, j = 0, k = 0; + uint32_t *gref_tmp; + int first = 1; + char tmp_str[PATH_MAX] = {0}; + int rv = -1; + + pg_size = getpagesize(); + if (rte_is_power_of_2(pg_size) == 0) { + return -1; + } + pg_shift = rte_bsf32(pg_size); + if (pg_size % sizeof(struct grant_node_item)) { + RTE_LOG(ERR, PMD, "pg_size isn't a multiple of grant node item\n"); + return -1; + } + + entries_per_pg = pg_size / sizeof(struct grant_node_item); + count = (pg_num + entries_per_pg - 1 ) / entries_per_pg; + gref_tmp = malloc(count * sizeof(uint32_t)); + if (gref_tmp == NULL) + return -1; + ptr = gntalloc(pg_size * count, gref_tmp, &start_index); + if (ptr == NULL) { + RTE_LOG(ERR, PMD, "%s: gntalloc error of %d pages\n", __func__, count); + free(gref_tmp); + return -1; + } + + while (j < pg_num) { + if (first) { + rv = snprintf(val_str, str_size, "%u", gref_tmp[k]); + first = 0; + } else { + snprintf(tmp_str, PATH_MAX, "%s", val_str); + rv = snprintf(val_str, str_size, "%s,%u", tmp_str, gref_tmp[k]); + } + k++; + if (rv == -1) + break; + + for (i = 0; i < entries_per_pg && j < pg_num ; i++) { + ((struct grant_node_item *)ptr)->gref = gref_arr[j]; + ((struct grant_node_item *)ptr)->pfn = pa_arr[j] >> pg_shift; + ptr = RTE_PTR_ADD(ptr, sizeof(struct grant_node_item)); + j++; + } + } + if (rv == -1) { + gntfree(ptr, pg_size * count, start_index); + } else + rv = 0; + free(gref_tmp); + return rv; +} + + +int +grant_gntalloc_mbuf_pool(struct rte_mempool *mpool, uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, int mempool_idx) +{ + char key_str[PATH_MAX] = {0}; + char val_str[PATH_MAX] = {0}; + + if (grant_node_create(pg_num, gref_arr, pa_arr, val_str, sizeof(val_str))) { + return -1; + } + + if (snprintf(key_str, sizeof(key_str), + DPDK_XENSTORE_PATH"%d"MEMPOOL_XENSTORE_STR, mempool_idx) == -1) + return -1; + if (xenstore_write(key_str, val_str) == -1) + return -1; + + if (snprintf(key_str, sizeof(key_str), + DPDK_XENSTORE_PATH"%d"MEMPOOL_VA_XENSTORE_STR, mempool_idx) == -1) + return -1; + if (snprintf(val_str, sizeof(val_str), "%"PRIxPTR, (uintptr_t)mpool->elt_va_start) == -1) + return -1; + if (xenstore_write(key_str, val_str) == -1) + return -1; + + return 0; +} diff --git a/drivers/net/xenvirt/rte_xen_lib.h b/drivers/net/xenvirt/rte_xen_lib.h new file mode 100644 index 00000000..d973eacb --- /dev/null +++ b/drivers/net/xenvirt/rte_xen_lib.h @@ -0,0 +1,116 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_XEN_DUMMY_PMD_H +#define _RTE_XEN_DUMMY_PMD_H + +#include <stdint.h> + +#include <rte_common.h> +#include <rte_mempool.h> +#include <rte_ether.h> + +#define PAGEMAP_FNAME "/proc/self/pagemap" +#define XEN_GNTALLOC_FNAME "/dev/xen/gntalloc" +#define DPDK_XENSTORE_PATH "/control/dpdk/" +#define DPDK_XENSTORE_NODE "/control/dpdk" +/*format 0_mempool_gref = "1537,1524,1533" */ +#define MEMPOOL_XENSTORE_STR "_mempool_gref" +/*format 0_mempool_va = 0x80340000 */ +#define MEMPOOL_VA_XENSTORE_STR "_mempool_va" +/*format 0_rx_vring_gref = "1537,1524,1533" */ +#define RXVRING_XENSTORE_STR "_rx_vring_gref" +/*format 0_tx_vring_gref = "1537,1524,1533" */ +#define TXVRING_XENSTORE_STR "_tx_vring_gref" +#define VRING_FLAG_STR "_vring_flag" +/*format: event_type_start_0 = 1*/ +#define EVENT_TYPE_START_STR "event_type_start_" + +#define DOM0_DOMID 0 +/* + * the pfn (page frame number) are bits 0-54 (see pagemap.txt in linux + * Documentation). + */ +#define PAGEMAP_PFN_BITS 54 +#define PAGEMAP_PFN_MASK RTE_LEN2MASK(PAGEMAP_PFN_BITS, phys_addr_t) + +#define MAP_FLAG 0xA5 + +#define RTE_ETH_XENVIRT_PAIRS_DELIM ';' +#define RTE_ETH_XENVIRT_KEY_VALUE_DELIM '=' +#define RTE_ETH_XENVIRT_MAX_ARGS 1 +#define RTE_ETH_XENVIRT_MAC_PARAM "mac" +struct xenvirt_dict { + uint8_t addr_valid; + struct ether_addr addr; +}; + +extern int gntalloc_fd; + +int +gntalloc_open(void); + +void +gntalloc_close(void); + +void * +gntalloc(size_t sz, uint32_t *gref, uint64_t *start_index); + +void +gntfree(void *va, size_t sz, uint64_t start_index); + +int +xenstore_init(void); + +int +xenstore_uninit(void); + +int +xenstore_write(const char *key_str, const char *val_str); + +int +get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num, uint32_t pg_sz); + +void * +get_xen_virtual(size_t size, size_t page_sz); + +int +grefwatch_from_alloc(uint32_t *gref, void **pptr); + + +int grant_node_create(uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, char *val_str, size_t str_size); + +int +grant_gntalloc_mbuf_pool(struct rte_mempool *mpool, uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, int mempool_idx); + +#endif diff --git a/drivers/net/xenvirt/virtio_logs.h b/drivers/net/xenvirt/virtio_logs.h new file mode 100644 index 00000000..d6c33f7b --- /dev/null +++ b/drivers/net/xenvirt/virtio_logs.h @@ -0,0 +1,70 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VIRTIO_LOGS_H_ +#define _VIRTIO_LOGS_H_ + +#include <rte_log.h> + +#ifdef RTE_LIBRTE_VIRTIO_DEBUG_INIT +#define PMD_INIT_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +#else +#define PMD_INIT_LOG(level, fmt, args...) do { } while(0) +#define PMD_INIT_FUNC_TRACE() do { } while(0) +#endif + +#ifdef RTE_LIBRTE_VIRTIO_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s() rx: " fmt , __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_VIRTIO_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s() tx: " fmt , __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while(0) +#endif + + +#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DRIVER +#define PMD_DRV_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt , __func__, ## args) +#else +#define PMD_DRV_LOG(level, fmt, args...) do { } while(0) +#endif + +#endif /* _VIRTIO_LOGS_H_ */ diff --git a/drivers/net/xenvirt/virtqueue.h b/drivers/net/xenvirt/virtqueue.h new file mode 100644 index 00000000..350eae3e --- /dev/null +++ b/drivers/net/xenvirt/virtqueue.h @@ -0,0 +1,273 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VIRTQUEUE_H_ +#define _VIRTQUEUE_H_ + +#include <stdint.h> +#include <linux/virtio_ring.h> +#include <linux/virtio_net.h> + +#include <rte_atomic.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_mempool.h> + +#include "virtio_logs.h" + +struct rte_mbuf; + +/* The alignment to use between consumer and producer parts of vring. */ +#define VIRTIO_PCI_VRING_ALIGN 4096 + +enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 }; + +/** + * The maximum virtqueue size is 2^15. Use that value as the end of + * descriptor chain terminator since it will never be a valid index + * in the descriptor table. This is used to verify we are correctly + * handling vq_free_cnt. + */ +#define VQ_RING_DESC_CHAIN_END 32768 + +#define VIRTQUEUE_MAX_NAME_SZ 32 + +struct pmd_internals { + struct rte_eth_stats eth_stats; + int port_id; + int virtio_idx; +}; + + +struct virtqueue { + char vq_name[VIRTQUEUE_MAX_NAME_SZ]; + struct rte_mempool *mpool; /**< mempool for mbuf allocation */ + uint16_t queue_id; /**< DPDK queue index. */ + uint16_t vq_queue_index; /**< PCI queue index */ + uint8_t port_id; /**< Device port identifier. */ + + void *vq_ring_virt_mem; /**< virtual address of vring*/ + int vq_alignment; + int vq_ring_size; + + struct vring vq_ring; /**< vring keeping desc, used and avail */ + struct pmd_internals *internals; /**< virtio device internal info. */ + uint16_t vq_nentries; /**< vring desc numbers */ + uint16_t vq_desc_head_idx; + uint16_t vq_free_cnt; /**< num of desc available */ + uint16_t vq_used_cons_idx; /**< Last consumed desc in used table, trails vq_ring.used->idx*/ + + struct vq_desc_extra { + void *cookie; + uint16_t ndescs; + } vq_descx[0] __rte_cache_aligned; +}; + + +#ifdef RTE_LIBRTE_XENVIRT_DEBUG_DUMP +#define VIRTQUEUE_DUMP(vq) do { \ + uint16_t used_idx, nused; \ + used_idx = (vq)->vq_ring.used->idx; \ + nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \ + PMD_INIT_LOG(DEBUG, \ + "VQ: %s - size=%d; free=%d; used=%d; desc_head_idx=%d;" \ + " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \ + " avail.flags=0x%x; used.flags=0x%x\n", \ + (vq)->vq_name, (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \ + (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \ + (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \ + (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \ +} while (0) +#else +#define VIRTQUEUE_DUMP(vq) do { } while (0) +#endif + + +/** + * Dump virtqueue internal structures, for debug purpose only. + */ +void virtqueue_dump(struct virtqueue *vq); + +/** + * Get all mbufs to be freed. + */ +struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq); + +static inline int __attribute__((always_inline)) +virtqueue_full(const struct virtqueue *vq) +{ + return vq->vq_free_cnt == 0; +} + +#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx)) + +static inline void __attribute__((always_inline)) +vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx) +{ + uint16_t avail_idx; + /* + * Place the head of the descriptor chain into the next slot and make + * it usable to the host. The chain is made available now rather than + * deferring to virtqueue_notify() in the hopes that if the host is + * currently running on another CPU, we can keep it processing the new + * descriptor. + */ + avail_idx = (uint16_t)(vq->vq_ring.avail->idx & (vq->vq_nentries - 1)); + vq->vq_ring.avail->ring[avail_idx] = desc_idx; + rte_smp_wmb(); + vq->vq_ring.avail->idx++; +} + +static inline void __attribute__((always_inline)) +vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) +{ + struct vring_desc *dp; + struct vq_desc_extra *dxp; + + dp = &vq->vq_ring.desc[desc_idx]; + dxp = &vq->vq_descx[desc_idx]; + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs); + while (dp->flags & VRING_DESC_F_NEXT) { + dp = &vq->vq_ring.desc[dp->next]; + } + dxp->ndescs = 0; + + /* + * We must append the existing free chain, if any, to the end of + * newly freed chain. If the virtqueue was completely used, then + * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above). + */ + dp->next = vq->vq_desc_head_idx; + vq->vq_desc_head_idx = desc_idx; +} + +static inline int __attribute__((always_inline)) +virtqueue_enqueue_recv_refill(struct virtqueue *rxvq, struct rte_mbuf *cookie) +{ + const uint16_t needed = 1; + const uint16_t head_idx = rxvq->vq_desc_head_idx; + struct vring_desc *start_dp = rxvq->vq_ring.desc; + struct vq_desc_extra *dxp; + + if (unlikely(rxvq->vq_free_cnt == 0)) + return -ENOSPC; + if (unlikely(rxvq->vq_free_cnt < needed)) + return -EMSGSIZE; + if (unlikely(head_idx >= rxvq->vq_nentries)) + return -EFAULT; + + dxp = &rxvq->vq_descx[head_idx]; + dxp->cookie = (void *)cookie; + dxp->ndescs = needed; + + start_dp[head_idx].addr = + (uint64_t) ((uintptr_t)cookie->buf_addr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr)); + start_dp[head_idx].len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr); + start_dp[head_idx].flags = VRING_DESC_F_WRITE; + rxvq->vq_desc_head_idx = start_dp[head_idx].next; + rxvq->vq_free_cnt = (uint16_t)(rxvq->vq_free_cnt - needed); + vq_ring_update_avail(rxvq, head_idx); + + return 0; +} + +static inline int __attribute__((always_inline)) +virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie) +{ + + const uint16_t needed = 2; + struct vring_desc *start_dp = txvq->vq_ring.desc; + uint16_t head_idx = txvq->vq_desc_head_idx; + uint16_t idx = head_idx; + struct vq_desc_extra *dxp; + + if (unlikely(txvq->vq_free_cnt == 0)) + return -ENOSPC; + if (unlikely(txvq->vq_free_cnt < needed)) + return -EMSGSIZE; + if (unlikely(head_idx >= txvq->vq_nentries)) + return -EFAULT; + + dxp = &txvq->vq_descx[idx]; + dxp->cookie = (void *)cookie; + dxp->ndescs = needed; + + start_dp = txvq->vq_ring.desc; + start_dp[idx].addr = 0; +/* + * TODO: save one desc here? + */ + start_dp[idx].len = sizeof(struct virtio_net_hdr); + start_dp[idx].flags = VRING_DESC_F_NEXT; + start_dp[idx].addr = (uintptr_t)NULL; + idx = start_dp[idx].next; + start_dp[idx].addr = (uint64_t)rte_pktmbuf_mtod(cookie, uintptr_t); + start_dp[idx].len = cookie->data_len; + start_dp[idx].flags = 0; + idx = start_dp[idx].next; + txvq->vq_desc_head_idx = idx; + txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed); + vq_ring_update_avail(txvq, head_idx); + + return 0; +} + +static inline uint16_t __attribute__((always_inline)) +virtqueue_dequeue_burst(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num) +{ + struct vring_used_elem *uep; + struct rte_mbuf *cookie; + uint16_t used_idx, desc_idx; + uint16_t i; + /* Caller does the check */ + for (i = 0; i < num ; i ++) { + used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); + uep = &vq->vq_ring.used->ring[used_idx]; + desc_idx = (uint16_t) uep->id; + cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie; + if (unlikely(cookie == NULL)) { + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n", + vq->vq_used_cons_idx); + RTE_LOG(ERR, PMD, "%s: inconsistent (%u, %u)\n", __func__, used_idx , desc_idx); + break; + } + len[i] = uep->len; + rx_pkts[i] = cookie; + vq->vq_used_cons_idx++; + vq_ring_free_chain(vq, desc_idx); + vq->vq_descx[desc_idx].cookie = NULL; + } + return i; +} + +#endif /* _VIRTQUEUE_H_ */ |